From adfc003729b8d39332081eec75c442fd965dab3b Mon Sep 17 00:00:00 2001 From: Lubos Dolezel Date: Wed, 18 Jan 2017 22:41:15 +0100 Subject: [PATCH] objc4-706 --- APPLE_LICENSE | 367 + ReleaseNotes.rtf | 358 + libobjc.order | 358 + markgc.cpp | 562 ++ objc.sln | 34 + objc.suo | Bin 0 -> 24576 bytes objc.vcproj | 1088 +++ objc.xcodeproj/project.pbxproj | 823 +++ objcrt/objcrt.vcproj | 95 + prebuild.bat | 15 + runtime/Messengers.subproj/objc-msg-arm.s | 911 +++ runtime/Messengers.subproj/objc-msg-arm64.s | 561 ++ runtime/Messengers.subproj/objc-msg-i386.s | 1164 +++ .../objc-msg-simulator-i386.s | 1045 +++ .../objc-msg-simulator-x86_64.s | 1215 +++ runtime/Messengers.subproj/objc-msg-win32.m | 520 ++ runtime/Messengers.subproj/objc-msg-x86_64.s | 1329 ++++ runtime/NSObjCRuntime.h | 33 + runtime/NSObject.h | 109 + runtime/NSObject.mm | 2352 ++++++ runtime/Object.h | 167 + runtime/Object.mm | 558 ++ runtime/OldClasses.subproj/List.h | 118 + runtime/OldClasses.subproj/List.m | 294 + runtime/Protocol.h | 88 + runtime/Protocol.mm | 128 + runtime/a1a2-blocktramps-arm.s | 148 + runtime/a1a2-blocktramps-arm64.s | 134 + runtime/a1a2-blocktramps-i386.s | 566 ++ runtime/a1a2-blocktramps-x86_64.s | 564 ++ runtime/a2a3-blocktramps-arm.s | 148 + runtime/a2a3-blocktramps-i386.s | 566 ++ runtime/a2a3-blocktramps-x86_64.s | 565 ++ runtime/hashtable.h | 2 + runtime/hashtable2.h | 229 + runtime/hashtable2.mm | 646 ++ runtime/llvm-AlignOf.h | 171 + runtime/llvm-DenseMap.h | 1097 +++ runtime/llvm-DenseMapInfo.h | 200 + runtime/llvm-MathExtras.h | 480 ++ runtime/llvm-type_traits.h | 221 + runtime/maptable.h | 138 + runtime/maptable.mm | 453 ++ runtime/message.h | 333 + runtime/objc-abi.h | 346 + runtime/objc-accessors.mm | 161 + runtime/objc-api.h | 232 + runtime/objc-auto.h | 255 + runtime/objc-auto.mm | 121 + runtime/objc-block-trampolines.mm | 477 ++ runtime/objc-cache-old.h | 43 + runtime/objc-cache-old.mm | 1793 +++++ runtime/objc-cache.h | 21 + runtime/objc-cache.mm | 1111 +++ runtime/objc-class-old.mm | 2528 +++++++ runtime/objc-class.h | 2 + runtime/objc-class.mm | 1272 ++++ runtime/objc-config.h | 170 + runtime/objc-env.h | 42 + runtime/objc-errors.mm | 306 + runtime/objc-exception.h | 116 + runtime/objc-exception.mm | 1396 ++++ runtime/objc-file-old.h | 50 + runtime/objc-file-old.mm | 165 + runtime/objc-file.h | 52 + runtime/objc-file.mm | 127 + runtime/objc-gdb.h | 223 + runtime/objc-initialize.h | 41 + runtime/objc-initialize.mm | 474 ++ runtime/objc-internal.h | 735 ++ runtime/objc-layout.mm | 928 +++ runtime/objc-load.h | 54 + runtime/objc-load.mm | 167 + runtime/objc-loadmethod.h | 45 + runtime/objc-loadmethod.mm | 367 + runtime/objc-lockdebug.h | 89 + runtime/objc-lockdebug.mm | 471 ++ runtime/objc-object.h | 1217 +++ runtime/objc-opt.mm | 360 + runtime/objc-os.h | 1225 +++ runtime/objc-os.mm | 778 ++ runtime/objc-private.h | 1022 +++ runtime/objc-probes.d | 5 + runtime/objc-references.h | 41 + runtime/objc-references.mm | 334 + runtime/objc-runtime-new.h | 1399 ++++ runtime/objc-runtime-new.mm | 6573 +++++++++++++++++ runtime/objc-runtime-old.h | 399 + runtime/objc-runtime-old.mm | 3233 ++++++++ runtime/objc-runtime.h | 2 + runtime/objc-runtime.mm | 1033 +++ runtime/objc-sel-old.mm | 215 + runtime/objc-sel-set.h | 49 + runtime/objc-sel-set.mm | 176 + runtime/objc-sel-table.s | 69 + runtime/objc-sel.mm | 215 + runtime/objc-sync.h | 67 + runtime/objc-sync.mm | 327 + runtime/objc-typeencoding.mm | 365 + runtime/objc-weak.h | 143 + runtime/objc-weak.mm | 505 ++ runtime/objc.h | 234 + runtime/objcrt.c | 98 + runtime/objcrt.h | 25 + runtime/runtime.h | 1847 +++++ unexported_symbols | 17 + version.bat | 29 + version.rc | 38 + 108 files changed, 57373 insertions(+) create mode 100644 APPLE_LICENSE create mode 100644 ReleaseNotes.rtf create mode 100644 libobjc.order create mode 100644 markgc.cpp create mode 100755 objc.sln create mode 100755 objc.suo create mode 100644 objc.vcproj create mode 100644 objc.xcodeproj/project.pbxproj create mode 100755 objcrt/objcrt.vcproj create mode 100755 prebuild.bat create mode 100644 runtime/Messengers.subproj/objc-msg-arm.s create mode 100755 runtime/Messengers.subproj/objc-msg-arm64.s create mode 100644 runtime/Messengers.subproj/objc-msg-i386.s create mode 100644 runtime/Messengers.subproj/objc-msg-simulator-i386.s create mode 100644 runtime/Messengers.subproj/objc-msg-simulator-x86_64.s create mode 100644 runtime/Messengers.subproj/objc-msg-win32.m create mode 100644 runtime/Messengers.subproj/objc-msg-x86_64.s create mode 100644 runtime/NSObjCRuntime.h create mode 100644 runtime/NSObject.h create mode 100644 runtime/NSObject.mm create mode 100644 runtime/Object.h create mode 100644 runtime/Object.mm create mode 100644 runtime/OldClasses.subproj/List.h create mode 100644 runtime/OldClasses.subproj/List.m create mode 100644 runtime/Protocol.h create mode 100644 runtime/Protocol.mm create mode 100644 runtime/a1a2-blocktramps-arm.s create mode 100644 runtime/a1a2-blocktramps-arm64.s create mode 100755 runtime/a1a2-blocktramps-i386.s create mode 100755 runtime/a1a2-blocktramps-x86_64.s create mode 100644 runtime/a2a3-blocktramps-arm.s create mode 100755 runtime/a2a3-blocktramps-i386.s create mode 100755 runtime/a2a3-blocktramps-x86_64.s create mode 100644 runtime/hashtable.h create mode 100644 runtime/hashtable2.h create mode 100644 runtime/hashtable2.mm create mode 100644 runtime/llvm-AlignOf.h create mode 100644 runtime/llvm-DenseMap.h create mode 100644 runtime/llvm-DenseMapInfo.h create mode 100644 runtime/llvm-MathExtras.h create mode 100644 runtime/llvm-type_traits.h create mode 100644 runtime/maptable.h create mode 100644 runtime/maptable.mm create mode 100644 runtime/message.h create mode 100644 runtime/objc-abi.h create mode 100644 runtime/objc-accessors.mm create mode 100644 runtime/objc-api.h create mode 100644 runtime/objc-auto.h create mode 100644 runtime/objc-auto.mm create mode 100644 runtime/objc-block-trampolines.mm create mode 100644 runtime/objc-cache-old.h create mode 100644 runtime/objc-cache-old.mm create mode 100644 runtime/objc-cache.h create mode 100644 runtime/objc-cache.mm create mode 100644 runtime/objc-class-old.mm create mode 100644 runtime/objc-class.h create mode 100644 runtime/objc-class.mm create mode 100644 runtime/objc-config.h create mode 100644 runtime/objc-env.h create mode 100644 runtime/objc-errors.mm create mode 100644 runtime/objc-exception.h create mode 100644 runtime/objc-exception.mm create mode 100644 runtime/objc-file-old.h create mode 100644 runtime/objc-file-old.mm create mode 100644 runtime/objc-file.h create mode 100644 runtime/objc-file.mm create mode 100644 runtime/objc-gdb.h create mode 100644 runtime/objc-initialize.h create mode 100644 runtime/objc-initialize.mm create mode 100644 runtime/objc-internal.h create mode 100644 runtime/objc-layout.mm create mode 100644 runtime/objc-load.h create mode 100644 runtime/objc-load.mm create mode 100644 runtime/objc-loadmethod.h create mode 100644 runtime/objc-loadmethod.mm create mode 100644 runtime/objc-lockdebug.h create mode 100644 runtime/objc-lockdebug.mm create mode 100644 runtime/objc-object.h create mode 100644 runtime/objc-opt.mm create mode 100644 runtime/objc-os.h create mode 100644 runtime/objc-os.mm create mode 100644 runtime/objc-private.h create mode 100644 runtime/objc-probes.d create mode 100644 runtime/objc-references.h create mode 100644 runtime/objc-references.mm create mode 100644 runtime/objc-runtime-new.h create mode 100644 runtime/objc-runtime-new.mm create mode 100644 runtime/objc-runtime-old.h create mode 100644 runtime/objc-runtime-old.mm create mode 100644 runtime/objc-runtime.h create mode 100644 runtime/objc-runtime.mm create mode 100644 runtime/objc-sel-old.mm create mode 100644 runtime/objc-sel-set.h create mode 100644 runtime/objc-sel-set.mm create mode 100644 runtime/objc-sel-table.s create mode 100644 runtime/objc-sel.mm create mode 100644 runtime/objc-sync.h create mode 100644 runtime/objc-sync.mm create mode 100644 runtime/objc-typeencoding.mm create mode 100644 runtime/objc-weak.h create mode 100644 runtime/objc-weak.mm create mode 100644 runtime/objc.h create mode 100644 runtime/objcrt.c create mode 100644 runtime/objcrt.h create mode 100644 runtime/runtime.h create mode 100644 unexported_symbols create mode 100755 version.bat create mode 100644 version.rc diff --git a/APPLE_LICENSE b/APPLE_LICENSE new file mode 100644 index 0000000..fe81a60 --- /dev/null +++ b/APPLE_LICENSE @@ -0,0 +1,367 @@ +APPLE PUBLIC SOURCE LICENSE +Version 2.0 - August 6, 2003 + +Please read this License carefully before downloading this software. +By downloading or using this software, you are agreeing to be bound by +the terms of this License. If you do not or cannot agree to the terms +of this License, please do not download or use the software. + +1. General; Definitions. This License applies to any program or other +work which Apple Computer, Inc. ("Apple") makes publicly available and +which contains a notice placed by Apple identifying such program or +work as "Original Code" and stating that it is subject to the terms of +this Apple Public Source License version 2.0 ("License"). As used in +this License: + +1.1 "Applicable Patent Rights" mean: (a) in the case where Apple is +the grantor of rights, (i) claims of patents that are now or hereafter +acquired, owned by or assigned to Apple and (ii) that cover subject +matter contained in the Original Code, but only to the extent +necessary to use, reproduce and/or distribute the Original Code +without infringement; and (b) in the case where You are the grantor of +rights, (i) claims of patents that are now or hereafter acquired, +owned by or assigned to You and (ii) that cover subject matter in Your +Modifications, taken alone or in combination with Original Code. + +1.2 "Contributor" means any person or entity that creates or +contributes to the creation of Modifications. + +1.3 "Covered Code" means the Original Code, Modifications, the +combination of Original Code and any Modifications, and/or any +respective portions thereof. + +1.4 "Externally Deploy" means: (a) to sublicense, distribute or +otherwise make Covered Code available, directly or indirectly, to +anyone other than You; and/or (b) to use Covered Code, alone or as +part of a Larger Work, in any way to provide a service, including but +not limited to delivery of content, through electronic communication +with a client other than You. + +1.5 "Larger Work" means a work which combines Covered Code or portions +thereof with code not governed by the terms of this License. + +1.6 "Modifications" mean any addition to, deletion from, and/or change +to, the substance and/or structure of the Original Code, any previous +Modifications, the combination of Original Code and any previous +Modifications, and/or any respective portions thereof. When code is +released as a series of files, a Modification is: (a) any addition to +or deletion from the contents of a file containing Covered Code; +and/or (b) any new file or other representation of computer program +statements that contains any part of Covered Code. + +1.7 "Original Code" means (a) the Source Code of a program or other +work as originally made available by Apple under this License, +including the Source Code of any updates or upgrades to such programs +or works made available by Apple under this License, and that has been +expressly identified by Apple as such in the header file(s) of such +work; and (b) the object code compiled from such Source Code and +originally made available by Apple under this License. + +1.8 "Source Code" means the human readable form of a program or other +work that is suitable for making modifications to it, including all +modules it contains, plus any associated interface definition files, +scripts used to control compilation and installation of an executable +(object code). + +1.9 "You" or "Your" means an individual or a legal entity exercising +rights under this License. For legal entities, "You" or "Your" +includes any entity which controls, is controlled by, or is under +common control with, You, where "control" means (a) the power, direct +or indirect, to cause the direction or management of such entity, +whether by contract or otherwise, or (b) ownership of fifty percent +(50%) or more of the outstanding shares or beneficial ownership of +such entity. + +2. Permitted Uses; Conditions & Restrictions. Subject to the terms +and conditions of this License, Apple hereby grants You, effective on +the date You accept this License and download the Original Code, a +world-wide, royalty-free, non-exclusive license, to the extent of +Apple's Applicable Patent Rights and copyrights covering the Original +Code, to do the following: + +2.1 Unmodified Code. You may use, reproduce, display, perform, +internally distribute within Your organization, and Externally Deploy +verbatim, unmodified copies of the Original Code, for commercial or +non-commercial purposes, provided that in each instance: + +(a) You must retain and reproduce in all copies of Original Code the +copyright and other proprietary notices and disclaimers of Apple as +they appear in the Original Code, and keep intact all notices in the +Original Code that refer to this License; and + +(b) You must include a copy of this License with every copy of Source +Code of Covered Code and documentation You distribute or Externally +Deploy, and You may not offer or impose any terms on such Source Code +that alter or restrict this License or the recipients' rights +hereunder, except as permitted under Section 6. + +2.2 Modified Code. You may modify Covered Code and use, reproduce, +display, perform, internally distribute within Your organization, and +Externally Deploy Your Modifications and Covered Code, for commercial +or non-commercial purposes, provided that in each instance You also +meet all of these conditions: + +(a) You must satisfy all the conditions of Section 2.1 with respect to +the Source Code of the Covered Code; + +(b) You must duplicate, to the extent it does not already exist, the +notice in Exhibit A in each file of the Source Code of all Your +Modifications, and cause the modified files to carry prominent notices +stating that You changed the files and the date of any change; and + +(c) If You Externally Deploy Your Modifications, You must make +Source Code of all Your Externally Deployed Modifications either +available to those to whom You have Externally Deployed Your +Modifications, or publicly available. Source Code of Your Externally +Deployed Modifications must be released under the terms set forth in +this License, including the license grants set forth in Section 3 +below, for as long as you Externally Deploy the Covered Code or twelve +(12) months from the date of initial External Deployment, whichever is +longer. You should preferably distribute the Source Code of Your +Externally Deployed Modifications electronically (e.g. download from a +web site). + +2.3 Distribution of Executable Versions. In addition, if You +Externally Deploy Covered Code (Original Code and/or Modifications) in +object code, executable form only, You must include a prominent +notice, in the code itself as well as in related documentation, +stating that Source Code of the Covered Code is available under the +terms of this License with information on how and where to obtain such +Source Code. + +2.4 Third Party Rights. You expressly acknowledge and agree that +although Apple and each Contributor grants the licenses to their +respective portions of the Covered Code set forth herein, no +assurances are provided by Apple or any Contributor that the Covered +Code does not infringe the patent or other intellectual property +rights of any other entity. Apple and each Contributor disclaim any +liability to You for claims brought by any other entity based on +infringement of intellectual property rights or otherwise. As a +condition to exercising the rights and licenses granted hereunder, You +hereby assume sole responsibility to secure any other intellectual +property rights needed, if any. For example, if a third party patent +license is required to allow You to distribute the Covered Code, it is +Your responsibility to acquire that license before distributing the +Covered Code. + +3. Your Grants. In consideration of, and as a condition to, the +licenses granted to You under this License, You hereby grant to any +person or entity receiving or distributing Covered Code under this +License a non-exclusive, royalty-free, perpetual, irrevocable license, +under Your Applicable Patent Rights and other intellectual property +rights (other than patent) owned or controlled by You, to use, +reproduce, display, perform, modify, sublicense, distribute and +Externally Deploy Your Modifications of the same scope and extent as +Apple's licenses under Sections 2.1 and 2.2 above. + +4. Larger Works. You may create a Larger Work by combining Covered +Code with other code not governed by the terms of this License and +distribute the Larger Work as a single product. In each such instance, +You must make sure the requirements of this License are fulfilled for +the Covered Code or any portion thereof. + +5. Limitations on Patent License. Except as expressly stated in +Section 2, no other patent rights, express or implied, are granted by +Apple herein. Modifications and/or Larger Works may require additional +patent licenses from Apple which Apple may grant in its sole +discretion. + +6. Additional Terms. You may choose to offer, and to charge a fee for, +warranty, support, indemnity or liability obligations and/or other +rights consistent with the scope of the license granted herein +("Additional Terms") to one or more recipients of Covered Code. +However, You may do so only on Your own behalf and as Your sole +responsibility, and not on behalf of Apple or any Contributor. You +must obtain the recipient's agreement that any such Additional Terms +are offered by You alone, and You hereby agree to indemnify, defend +and hold Apple and every Contributor harmless for any liability +incurred by or claims asserted against Apple or such Contributor by +reason of any such Additional Terms. + +7. Versions of the License. Apple may publish revised and/or new +versions of this License from time to time. Each version will be given +a distinguishing version number. Once Original Code has been published +under a particular version of this License, You may continue to use it +under the terms of that version. You may also choose to use such +Original Code under the terms of any subsequent version of this +License published by Apple. No one other than Apple has the right to +modify the terms applicable to Covered Code created under this +License. + +8. NO WARRANTY OR SUPPORT. The Covered Code may contain in whole or in +part pre-release, untested, or not fully tested works. The Covered +Code may contain errors that could cause failures or loss of data, and +may be incomplete or contain inaccuracies. You expressly acknowledge +and agree that use of the Covered Code, or any portion thereof, is at +Your sole and entire risk. THE COVERED CODE IS PROVIDED "AS IS" AND +WITHOUT WARRANTY, UPGRADES OR SUPPORT OF ANY KIND AND APPLE AND +APPLE'S LICENSOR(S) (COLLECTIVELY REFERRED TO AS "APPLE" FOR THE +PURPOSES OF SECTIONS 8 AND 9) AND ALL CONTRIBUTORS EXPRESSLY DISCLAIM +ALL WARRANTIES AND/OR CONDITIONS, EXPRESS OR IMPLIED, INCLUDING, BUT +NOT LIMITED TO, THE IMPLIED WARRANTIES AND/OR CONDITIONS OF +MERCHANTABILITY, OF SATISFACTORY QUALITY, OF FITNESS FOR A PARTICULAR +PURPOSE, OF ACCURACY, OF QUIET ENJOYMENT, AND NONINFRINGEMENT OF THIRD +PARTY RIGHTS. APPLE AND EACH CONTRIBUTOR DOES NOT WARRANT AGAINST +INTERFERENCE WITH YOUR ENJOYMENT OF THE COVERED CODE, THAT THE +FUNCTIONS CONTAINED IN THE COVERED CODE WILL MEET YOUR REQUIREMENTS, +THAT THE OPERATION OF THE COVERED CODE WILL BE UNINTERRUPTED OR +ERROR-FREE, OR THAT DEFECTS IN THE COVERED CODE WILL BE CORRECTED. NO +ORAL OR WRITTEN INFORMATION OR ADVICE GIVEN BY APPLE, AN APPLE +AUTHORIZED REPRESENTATIVE OR ANY CONTRIBUTOR SHALL CREATE A WARRANTY. +You acknowledge that the Covered Code is not intended for use in the +operation of nuclear facilities, aircraft navigation, communication +systems, or air traffic control machines in which case the failure of +the Covered Code could lead to death, personal injury, or severe +physical or environmental damage. + +9. LIMITATION OF LIABILITY. TO THE EXTENT NOT PROHIBITED BY LAW, IN NO +EVENT SHALL APPLE OR ANY CONTRIBUTOR BE LIABLE FOR ANY INCIDENTAL, +SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES ARISING OUT OF OR RELATING +TO THIS LICENSE OR YOUR USE OR INABILITY TO USE THE COVERED CODE, OR +ANY PORTION THEREOF, WHETHER UNDER A THEORY OF CONTRACT, WARRANTY, +TORT (INCLUDING NEGLIGENCE), PRODUCTS LIABILITY OR OTHERWISE, EVEN IF +APPLE OR SUCH CONTRIBUTOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES AND NOTWITHSTANDING THE FAILURE OF ESSENTIAL PURPOSE OF ANY +REMEDY. SOME JURISDICTIONS DO NOT ALLOW THE LIMITATION OF LIABILITY OF +INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO THIS LIMITATION MAY NOT APPLY +TO YOU. In no event shall Apple's total liability to You for all +damages (other than as may be required by applicable law) under this +License exceed the amount of fifty dollars ($50.00). + +10. Trademarks. This License does not grant any rights to use the +trademarks or trade names "Apple", "Apple Computer", "Mac", "Mac OS", +"QuickTime", "QuickTime Streaming Server" or any other trademarks, +service marks, logos or trade names belonging to Apple (collectively +"Apple Marks") or to any trademark, service mark, logo or trade name +belonging to any Contributor. You agree not to use any Apple Marks in +or as part of the name of products derived from the Original Code or +to endorse or promote products derived from the Original Code other +than as expressly permitted by and in strict compliance at all times +with Apple's third party trademark usage guidelines which are posted +at http://www.apple.com/legal/guidelinesfor3rdparties.html. + +11. Ownership. Subject to the licenses granted under this License, +each Contributor retains all rights, title and interest in and to any +Modifications made by such Contributor. Apple retains all rights, +title and interest in and to the Original Code and any Modifications +made by or on behalf of Apple ("Apple Modifications"), and such Apple +Modifications will not be automatically subject to this License. Apple +may, at its sole discretion, choose to license such Apple +Modifications under this License, or on different terms from those +contained in this License or may choose not to license them at all. + +12. Termination. + +12.1 Termination. This License and the rights granted hereunder will +terminate: + +(a) automatically without notice from Apple if You fail to comply with +any term(s) of this License and fail to cure such breach within 30 +days of becoming aware of such breach; + +(b) immediately in the event of the circumstances described in Section +13.5(b); or + +(c) automatically without notice from Apple if You, at any time during +the term of this License, commence an action for patent infringement +against Apple; provided that Apple did not first commence +an action for patent infringement against You in that instance. + +12.2 Effect of Termination. Upon termination, You agree to immediately +stop any further use, reproduction, modification, sublicensing and +distribution of the Covered Code. All sublicenses to the Covered Code +which have been properly granted prior to termination shall survive +any termination of this License. Provisions which, by their nature, +should remain in effect beyond the termination of this License shall +survive, including but not limited to Sections 3, 5, 8, 9, 10, 11, +12.2 and 13. No party will be liable to any other for compensation, +indemnity or damages of any sort solely as a result of terminating +this License in accordance with its terms, and termination of this +License will be without prejudice to any other right or remedy of +any party. + +13. Miscellaneous. + +13.1 Government End Users. The Covered Code is a "commercial item" as +defined in FAR 2.101. Government software and technical data rights in +the Covered Code include only those rights customarily provided to the +public as defined in this License. This customary commercial license +in technical data and software is provided in accordance with FAR +12.211 (Technical Data) and 12.212 (Computer Software) and, for +Department of Defense purchases, DFAR 252.227-7015 (Technical Data -- +Commercial Items) and 227.7202-3 (Rights in Commercial Computer +Software or Computer Software Documentation). Accordingly, all U.S. +Government End Users acquire Covered Code with only those rights set +forth herein. + +13.2 Relationship of Parties. This License will not be construed as +creating an agency, partnership, joint venture or any other form of +legal association between or among You, Apple or any Contributor, and +You will not represent to the contrary, whether expressly, by +implication, appearance or otherwise. + +13.3 Independent Development. Nothing in this License will impair +Apple's right to acquire, license, develop, have others develop for +it, market and/or distribute technology or products that perform the +same or similar functions as, or otherwise compete with, +Modifications, Larger Works, technology or products that You may +develop, produce, market or distribute. + +13.4 Waiver; Construction. Failure by Apple or any Contributor to +enforce any provision of this License will not be deemed a waiver of +future enforcement of that or any other provision. Any law or +regulation which provides that the language of a contract shall be +construed against the drafter will not apply to this License. + +13.5 Severability. (a) If for any reason a court of competent +jurisdiction finds any provision of this License, or portion thereof, +to be unenforceable, that provision of the License will be enforced to +the maximum extent permissible so as to effect the economic benefits +and intent of the parties, and the remainder of this License will +continue in full force and effect. (b) Notwithstanding the foregoing, +if applicable law prohibits or restricts You from fully and/or +specifically complying with Sections 2 and/or 3 or prevents the +enforceability of either of those Sections, this License will +immediately terminate and You must immediately discontinue any use of +the Covered Code and destroy all copies of it that are in your +possession or control. + +13.6 Dispute Resolution. Any litigation or other dispute resolution +between You and Apple relating to this License shall take place in the +Northern District of California, and You and Apple hereby consent to +the personal jurisdiction of, and venue in, the state and federal +courts within that District with respect to this License. The +application of the United Nations Convention on Contracts for the +International Sale of Goods is expressly excluded. + +13.7 Entire Agreement; Governing Law. This License constitutes the +entire agreement between the parties with respect to the subject +matter hereof. This License shall be governed by the laws of the +United States and the State of California, except that body of +California law concerning conflicts of law. + +Where You are located in the province of Quebec, Canada, the following +clause applies: The parties hereby confirm that they have requested +that this License and all related documents be drafted in English. Les +parties ont exige que le present contrat et tous les documents +connexes soient rediges en anglais. + +EXHIBIT A. + +"Portions Copyright (c) 1999-2003 Apple Computer, Inc. All Rights +Reserved. + +This file contains Original Code and/or Modifications of Original Code +as defined in and that are subject to the Apple Public Source License +Version 2.0 (the 'License'). You may not use this file except in +compliance with the License. Please obtain a copy of the License at +http://www.opensource.apple.com/apsl/ and read it before using this +file. + +The Original Code and all software distributed under the License are +distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER +EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, +INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. +Please see the License for the specific language governing rights and +limitations under the License." diff --git a/ReleaseNotes.rtf b/ReleaseNotes.rtf new file mode 100644 index 0000000..eec82a9 --- /dev/null +++ b/ReleaseNotes.rtf @@ -0,0 +1,358 @@ +{\rtf1\mac\ansicpg10000\cocoartf824\cocoasubrtf420 +{\fonttbl\f0\fswiss\fcharset77 Helvetica-Bold;\f1\fswiss\fcharset77 Helvetica;\f2\fnil\fcharset77 Monaco; +} +{\colortbl;\red255\green255\blue255;\red70\green130\blue100;} +\vieww11200\viewh14360\viewkind0 +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs30 \cf0 Objective-C Release Notes\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0\fs24 \cf0 \ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b\fs30 \cf0 Mac OS X 10.5 Leopard +\f1\b0\fs24 \ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 Contents +\f1\b0 \ +\'a5 Garbage Collection\ +\'a5\'caProperties\ +\'a5\'caLoading and Unloading Bundles\ +\'a5 Method and Class Attributes\ +\'a5\'ca@package Instance Variables\ +\'a5\'caRuntime API changes\ +\'a5\'ca64-bit ABI\ +\'a5\'ca64-bit Class and Instance Variable Access Control\ +\'a5\'ca64-bit Non-Fragile Instance Variables\ +\'a5\'ca64-bit Zero-Cost C++-Compatible Exceptions\ +\ +\ + +\f0\b Garbage Collection\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ +The Objective-C runtime examines on startup the execution image to determine whether to run with garbage collection or not. Each object file has an info section and they must all agree for execution to proceed. Standard compilation results in an info section that indicates that no GC capability is present. Compiling with -fobjc-gc indicates that both GC and retain/release logic is present. Compiling with -fobjc-gc-only indicates that only GC logic is present. A non-GC executable that attempts to load a gc-only framework will fail, as will a GC capable executable that attemps to load a GC incapable framework (or bundle).\ +\ +The collector initially runs only on the main thread when requested via objc_collect_if_needed(1), which is called automatically from the autoreleasepool -drain method. The AppKit arranges to call objc_start_collector_thread() after launch and subsequently collections run on a dedicated thread and are responsive to pure allocation demand. The objc_set_collection_threshold and objc_set_collection_ratio calls are used to establish the "need" for a collection. Once every ratio times a full (complete) collection will occur; otherwise a generational collection will be done if allocations have exceeded the threshold.\ +\ +The garbage collector minimally pauses those threads which have been registered to it while collecting. Registration occurs during establishment of an NSThread, not simply a pthread.\ +\ +A critical assumption that the collector makes is that one thread never gains access to an object (or more generally any block of garbage collected memory) by way of a pointer to another thread's stack memory. In other words, the collector does not make provision for cross thread stack references. This enables the collector to avoid pausing all threads at the same time while it examines recursively all of their references.\ +\ +The compiler uses three "helper" functions for assignments of strong pointers to garbage collected memory into global memory ( +\f2\fs20 objc_assign_global +\f1\fs24 ), garbage collected heap memory ( +\f2\fs20 objc_assign_ivar +\f1\fs24 ), or into unknown memory ( +\f2\fs20 objc_assign_strongCast +\f1\fs24 ). For assignments of weak pointers it uses objc_assign_weak and for reads it uses objc_read_weak.\ +\ +When copying memory in bulk into a garbage collected block one must use the API +\f2\fs20 objc_memmove_collectable(void *dst, const void *src, size_t size) +\f1\fs24 .\ +\ +Garbage Collection Errors\ +\ +The collector itself is found in +\f2\fs20 /usr/lib/libauto.dylib +\f1\fs24 . Its error messages are printed using +\f2\fs20 malloc_printf +\f1\fs24 . The ObjC runtime is found in +\f2\fs20 /usr/lib/libobjc.dylib +\f1\fs24 . Its errors are printed using +\f2\fs20 _objc_inform +\f1\fs24 . Currently we note resurrection and reference count underflow errors by calling the following routines:\ +\ +\pard\tx960\pardeftab960\ql\qnatural\pardirnatural + +\f2\fs20 \cf2 \CocoaLigature0 objc_assign_global_error\ +\pard\tx960\pardeftab960\ql\qnatural\pardirnatural +\cf0 objc_assign_ivar_error\ +\pard\tx960\pardeftab960\ql\qnatural\pardirnatural +\cf2 objc_exception_during_finalize_error\ +auto_zone_resurrection_error\cf0 \ +\cf2 auto_refcount_underflow_error +\f1\fs24 \cf0 \CocoaLigature1 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural +\cf0 \ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 Properties +\f1\b0 \ +\ +The syntax for Objective-C properties has been overhauled since WWDC 2006. See the property documentation for details.\ +\ +In summary, @property(attributes) type name introduces an implicit declaration of a "getter" and a "setter" method (unless a read-only property is requested) for the "variable" named. The setter= and getter= attributes allow one to specify the names of the methods, otherwise a "name" method and a "setName:" method are implicitly declared. They may also be explicitly named.\ +\ +By default, properties are assigned when set. For objects under non-GC this is often incorrect and a warning is issued unless the assignment semantic is explicitly named. There are three choices - assign, for non-retained object references, copy, for objects that are copied and implicitly retained, and simply retain, for objects that require being retained when set.\ +\ +Access to properties is atomic by default. This is trivial under GC for almost everything and also trivial under non-GC for everything but objects and structures. In particular atomic access to retained objects under non-GC conditions can be expensive. As such, a nonatomic property attribute is available.\ +\ +Pointers may be held strongly under GC by declaring them __strong, and they can be zeroing weak by declaring them __weak.\ +\ +The implementations for properties can be provided by the compiler and runtime through the use of the @synthesize statement in the @implementation section of the class (or class extension). The compiler expects an instance variable of the same name as the property. If one wishes a different name it can be supplied to the @synthesize statement.\ +\ +In particular the compiler and runtime will implement accessors to retained objects by using atomic compare and swap instructions. It is extremely dangerous to directly access an atomic object property through its instance variable since another thread might change its value unpredictably. As such the compiler will warn you about such unprotected accesses. The runtime, in fact, will temporarily use the least significant bit of the instance variable as a temporary lock while retaining the new value and releasing the old. Direct use of an atomic instance variable under non-GC is strongly discouraged.\ +\ +\ + +\f0\b Loading and Unloading Bundles\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 \ +Since Mac OS X Version 10.4 it has been possible to unload bundles containing Objective-C. No attempt is made to prevent this if objects are still present for classes that are unloaded. Subclasses of classes loaded in bundles are particularly vulnerable.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 Method and Class Attributes\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 Objective-C now supports some gcc attributes for Objective-C methods. Syntactically, attributes for a method follow the method's declaration, and attributes for a method parameter sit between the parameter type and the parameter name. Supported attributes include:\ +\ +Deprecation and availability, including AvailabilityMacros.h\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 - (void)method:(id)param __attribute__((deprecated));\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\fs24 \cf0 \ +Unused parameters\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 - (void)method:(id) __attribute__((unused)) param;\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\fs24 \cf0 \ +Sentinel parameters, including +\f2\fs20 NS_REQUIRES_NIL_TERMINATION +\f1\fs24 \ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 - (void)methodWithObjects:(id)obj, ... NS_REQUIRES_NIL_TERMINATION;\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\fs24 \cf0 \ +Objective-C also supports some gcc attributes for Objective-C classes. Syntactically, attributes for a class precede the class's +\f2\fs20 @interface +\f1\fs24 declaration. Supported attributes include:\ +\ +Deprecation and availability, including AvailabilityMacros.h\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 __attribute__((deprecated))\ + @interface MyDeprecatedClass : SomeSuperclass\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\fs24 \cf0 \ +Visibility\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 __attribute__((visibility("hidden")))\ + @interface MyPrivateClass : SomeSuperclass +\f1\fs24 \ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 @package Instance Variables +\f1\b0 \ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 @package +\f1\fs24 is a new ivar protection class, like +\f2\fs20 @public +\f1\fs24 and +\f2\fs20 @protected +\f1\fs24 . +\f2\fs20 @package +\f1\fs24 ivars behave as follows:\ +\'a5\'ca +\f2\fs20 @public +\f1\fs24 in 32-bit; \ +\'a5\'ca +\f2\fs20 @public +\f1\fs24 in 64-bit, inside the framework that defined the class; \ +\'a5\'ca +\f2\fs20 @private +\f1\fs24 in 64-bit, outside the framework that defined the class.\ +\ +In 64-bit, the ivar symbol for an +\f2\fs20 @package +\f1\fs24 ivar is not exported, so any attempt to use the ivar from outside the framework that defined the class will fail with a link error. See "64-bit Class and Instance Variable Access Control" for more about ivar symbols.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 Runtime API changes\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 The C interface to the Objective-C runtime (in +\f2\fs20 +\f1\fs24 ) has changed significantly. Highlights include:\ +\'a5\'caAlmost all structures are deprecated, including +\f2\fs20 struct objc_class +\f1\fs24 . Functional replacements for most of these are provided.\ +\'a5\'ca +\f2\fs20 class_poseAs +\f1\fs24 is deprecated. Use method list manipulation functions instead.\ +\'a5\'ca +\f2\fs20 class_nextMethodList +\f1\fs24 is deprecated. Use +\f2\fs20 class_copyMethodList +\f1\fs24 instead.\ +\'a5\'ca +\f2\fs20 class_addMethods +\f1\fs24 is deprecated. Use +\f2\fs20 class_addMethod +\f1\fs24 instead.\ +\'a5\'ca +\f2\fs20 objc_addClass +\f1\fs24 is deprecated. Use +\f2\fs20 objc_allocateClassPair +\f1\fs24 and +\f2\fs20 objc_registerClassPair +\f1\fs24 instead.\ +\'a5\'caIn general, all deprecated declarations are absent in 64-bit.\ +\'a5\'caThe API in objc/objc-runtime.h and objc/objc-class.h is now in objc/runtime.h and objc/message.h. The old header files simply #include the new ones.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 64-bit ABI\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 The 64-bit Objective-C ABI is generally unlike the 32-bit ABI. The new ABI provides new features, better performance, and improved future adaptability. All aspects of the 64-bit ABI are private and subject to future change. Forthcoming documentation will describe the ABI for the use of compilers and developer tools only.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 64-bit Class and Instance Variable Access Control\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 In 64-bit Objective-C, access control for classes and each class and instance variable has a symbol associated with it. All uses of a class or instance variable reference this symbol. These symbols are subject to access control by the linker.\ +\ +The upshot is that access to private classes and ivars is more strictly enforced. Illegal use of a private ivar may fail with a link error. Frameworks that provide classes and ivars must correctly export their symbols. In particular, frameworks built with +\f2\fs20 -fvisibility=hidden +\f1\fs24 or a linker export list may need to be changed.\ +\ +Class symbols have names of the form +\f2\fs20 _OBJC_CLASS_$_ClassName +\f1\fs24 and +\f2\fs20 _OBJC_METACLASS_$_ClassName +\f1\fs24 . The class symbol is used by clients who send messages to the class (i.e. +\f2\fs20 [ClassName someMessage] +\f1\fs24 ). The metaclass symbol is used by clients who subclass the class.\ +\ +By default, class symbols are exported. They are affected by gcc's symbol visibility flags, so +\f2\fs20 -fvisibility=hidden +\f1\fs24 will make the class symbols non-exported. The linker recognizes the old symbol name +\f2\fs20 .objc_class_name_ClassName +\f1\fs24 in linker export lists and translates it to these symbols. \ +\ +Visibility of a single class can be changed using an attribute.\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f2\fs20 \cf0 __attribute__((visibility("hidden")))\ + @interface ClassName : SomeSuperclass\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\fs24 \cf0 For classes with +\f2\fs20 "default" +\f1\fs24 visibility, the class symbols are exported, and the ivar symbols are handled as described below. For classes with +\f2\fs20 "hidden" +\f1\fs24 visibility, the class symbols and ivar symbols are all not exported.\ +\ +Ivar symbols have the form +\f2\fs20 _OBJC_IVAR_$_ClassName.IvarName +\f1\fs24 . The ivar symbol is used by clients who read or write the ivar.\ +\ +By default, ivar symbols for +\f2\fs20 @private +\f1\fs24 and +\f2\fs20 @package +\f1\fs24 ivars are not exported, and ivar symbols for +\f2\fs20 @public +\f1\fs24 and +\f2\fs20 @protected +\f1\fs24 ivars are exported. This can be changed by export lists, +\f2\fs20 -fvisibility +\f1\fs24 , or a visibility attribute on the class. Visibility attributes on individual ivars are currently not supported.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 64-bit Non-Fragile Instance Variables\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 All instance variables in 64-bit Objective-C are non-fragile. That is, existing compiled code that uses a class's ivars will not break when the class or a superclass changes its own ivar layout. In particular, framework classes may add new ivars without breaking subclasses compiled against a previous version of the framework.\ +\ +Ivars may be added or reordered freely; existing users of a reordered ivar will adapt transparently. Other ivar changes are safe except that they will break any existing users of the ivar: deleting an ivar, renaming an ivar, moving an ivar to a different class, and changing the type of an ivar. \ +\ +Do not use +\f2\fs20 @defs +\f1\fs24 . The ivar layout it presents cannot adapt to superclass changes.\ +\ +Do not use +\f2\fs20 sizeof(SomeClass) +\f1\fs24 . Use +\f2\fs20 class_getInstanceSize([SomeClass class]) +\f1\fs24 instead.\ +\ +Do not use +\f2\fs20 offsetof(SomeClass, SomeIvar) +\f1\fs24 . Use +\f2\fs20 ivar_getOffset(class_getInstanceVariable([SomeClass class], "SomeIvar")) +\f1\fs24 instead.\ +\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f0\b \cf0 64-bit Zero-Cost C++-Compatible Exceptions\ +\ +\pard\tx720\tx1440\tx2160\tx2880\tx3600\tx4320\tx5040\tx5760\tx6480\tx7200\tx7920\tx8640\ql\qnatural\pardirnatural + +\f1\b0 \cf0 In 64-bit, the implementation of Objective-C exceptions has been rewritten. The new system provides "zero-cost" try blocks and interoperability with C++. \ +\ +"Zero-cost" try blocks incur no time penalty when entering an +\f2\fs20 @try +\f1\fs24 block, unlike 32-bit which must call +\f2\fs20 setjmp() +\f1\fs24 and other additional bookkeeping. On the other hand, actually throwing an exception is much more expensive. For best performance in 64-bit, exceptions should be thrown only in exceptional cases.\ +\ +The Cocoa frameworks require that all exceptions be instances of NSException or its subclasses. Do not throw objects of other types.\ +\ +The Cocoa frameworks are generally not exception-safe. Their general pattern is that exceptions are reserved for programmer error only, and the program should quit soon after catching such an exception. Be careful when throwing exceptions across the Cocoa frameworks.\ +\ +In 64-bit, C++ exceptions and Objective-C exceptions are interoperable. In particular, C++ destructors and Objective-C +\f2\fs20 @finally +\f1\fs24 blocks are honored when unwinding any exception, and default catch clauses - +\f2\fs20 catch (...) +\f1\fs24 and +\f2\fs20 @catch (...) +\f1\fs24 - are able to catch and re-throw any exception.\ +\ +Objective-C +\f2\fs20 @catch (id e) +\f1\fs24 catches any Objective-C exception, but no C++ exceptions. Use +\f2\fs20 @catch (...) +\f1\fs24 to catch everything, and +\f2\fs20 @throw; +\f1\fs24 to re-throw caught exceptions. +\f2\fs20 @catch (...) +\f1\fs24 is allowed in 32-bit, and has the same effect there as +\f2\fs20 @catch (id e) +\f1\fs24 . \ +} \ No newline at end of file diff --git a/libobjc.order b/libobjc.order new file mode 100644 index 0000000..c2f2c6e --- /dev/null +++ b/libobjc.order @@ -0,0 +1,358 @@ +__objc_init +_environ_init +_tls_init +_lock_init +_recursive_mutex_init +_exception_init +_map_images +_map_images_nolock +__getObjcImageInfo +__hasObjcContents +__objc_appendHeader +_verify_gc_readiness +_gc_init +__objc_inform_on_crash +__objc_crashlog +_rtp_init +_gc_fixup_barrier_stubs +__objc_update_stubs_in_mach_header +_sel_init +_sel_lock +___sel_registerName +__objc_search_builtins +__ZNK8objc_opt13objc_selopt_t3getEPKc +__ZNK8objc_opt13objc_selopt_t4hashEPKc +_sel_unlock +_sel_registerName +_arr_init +__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE4initEj +__read_images +__Z11initVtablesv +__Z17appendTrampolinesP22objc_trampoline_header +_gdb_objc_trampolines_changed +_crashlog_header_name +__getObjc2ClassList +_NXCreateMapTableFromZone +_NXCreateHashTable +_NXCreateHashTableFromZone +_NXHashGet +_NXHashInsert +__NXHashRehashToCapacity +_NXNextHashState +_freeBuckets +_NXNoEffectFree +_hashPrototype +_NXPtrHash +_isEqualPrototype +__Z13futureClassesv +_NXCountMapTable +__Z13addNamedClassP7class_tPKc +_NXMapGet +__mapStrHash +_NXMapInsert +__mapPtrHash +__Z10remapClassP7class_t +__Z15remappedClassesa +_NXMapMember +__NXMapMember +__mapStrIsEqual +__mapPtrIsEqual +__getObjc2ClassRefs +__getObjc2SuperRefs +_sel_preoptimizationValid +__getObjc2SelectorRefs +_sel_registerNameNoLock +___objc_sel_set_create +___objc_sel_set_add +___objc_sel_set_findBuckets +___objc_sel_set_get +__Z9protocolsv +__getObjc2ProtocolList +_NXMapKeyCopyingInsert +__NXMapRehash +__getObjc2ProtocolRefs +__Z13remapProtocolm +__getObjc2NonlazyClassList +__Z12realizeClassP7class_t +__Z11addSubclassP7class_tS0_ +__Z17attachMethodListsP7class_tPP13method_list_tiaPa +__Z15fixupMethodListP13method_list_ta +_memdup +__ZNSt3__113__stable_sortIRN8method_t16SortBySELAddressEN13method_list_t15method_iteratorEEEvT0_S6_T_NS_15iterator_traitsIS6_E1 +__Z9addMethodP7class_tP13objc_selectorPFP11objc_objectS3_S1_zEPKca +__Z23getMethodNoSuper_nolockP7class_tP13objc_selector +__ZN7class_t14setHasCustomRREv +__Z20unattachedCategoriesv +_NXMapRemove +__Z21attachCategoryMethodsP7class_tP13category_listPa +_objc_addRegisteredClass +_layout_bitmap_create +_set_bits +_layout_bitmap_free +__ZNSt3__116__insertion_sortIRN8method_t16SortBySELAddressEN13method_list_t15method_iteratorEEEvT0_S6_T_ +__Z17buildProtocolListP13category_listPK15protocol_list_tPS3_ +__Z17buildPropertyListPK15property_list_tP13category_lista +__ZNSt3__120get_temporary_bufferI8method_tEENS_4pairIPT_lEEl +__ZNSt3__118__stable_sort_moveIRN8method_t16SortBySELAddressEN13method_list_t15method_iteratorEEEvT0_S6_T_NS_15iterator_traitsI +__ZNSt3__122__merge_move_constructIRN8method_t16SortBySELAddressEN13method_list_t15method_iteratorES5_EEvT0_S6_T1_S7_PNS_15iter +__ZNSt3__119__merge_move_assignIRN8method_t16SortBySELAddressEPS1_S4_N13method_list_t15method_iteratorEEEvT0_S7_T1_S8_T2_T_ +_NXPtrIsEqual +__getObjc2CategoryList +__Z29addUnattachedCategoryForClassP10category_tP7class_tP12_header_info +__Z16remethodizeClassP7class_t +__Z11flushCachesP7class_t +_flush_cache +__class_getCache +_load_images +_load_images_nolock +_prepare_load_methods +__Z19schedule_class_loadP7class_t +_add_class_to_loadable_list +__class_getLoadMethod +__getObjc2NonlazyCategoryList +_call_load_methods ++[Protocol load] +_objc_lookUpClass +_look_up_class +_object_getClass +_protocol_copyMethodDescriptionList +_class_getClassMethod +__class_getMeta +_look_up_method +__cache_getMethod +__class_getMethod +_method_getTypeEncoding +_method_getImplementation +_method_getName +_class_addMethod +_class_getInstanceMethod +__Z12flushVtablesP7class_t +__Z12updateVtableP7class_ta +_class_replaceMethod +__Z25_method_setImplementationP7class_tP8method_tPFP11objc_objectS4_P13objc_selectorzE +_class_addProtocol +_class_conformsToProtocol +_objc_setExceptionPreprocessor +_objc_setExceptionMatcher +_objc_setUncaughtExceptionHandler +_objc_setForwardHandler +_objc_setEnumerationMutationHandler +_objc_collectingEnabled +_objc_getFutureClass +_objc_assign_strongCast_non_gc +_objc_getClass +__objc_insert_tagged_isa +_objc_msgSend_fixup +__objc_fixupMessageRef +_objc_msgSend +__class_lookupMethodAndLoadCache3 +_lookUpMethod +_prepareForMethodLookup +__class_initialize +__class_getNonMetaClass +__Z15getNonMetaClassP7class_t +__class_getSuperclass +__class_isInitialized +__class_isInitializing +__class_setInitializing +__fetchInitializingClassList +__objc_fetch_pthread_data +_lockForMethodLookup +__cache_getImp +__class_getMethodNoSuper_nolock +_log_and_fill_cache +__cache_fill +_unlockForMethodLookup +_objc_assign_global_non_gc +_class_setSuperclass +_class_setVersion +_objc_msgSend_vtable1 +__objc_rootAlloc +_class_getInstanceSize +__class_getInstanceSize +_class_createInstance +_object_getClassName +__class_getName +_object_getIndexedIvars +_objc_msgSend_vtable0 +__objc_rootAllocWithZone +__objc_rootInit +_objc_msgSend_vtable3 +_objc_assign_ivar_non_gc +__objc_rootRetain +__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE16FindAndConstructERKS2_ +__ZNK4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE15LookupBucketForERKS2_RPNSt3__14pairIS2_mEE +__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE16InsertIntoBucketERKS2_RKmPNSt3__14pairIS2_mEE +__objc_rootRelease +__objc_rootReleaseWasZero +__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE4findERKS2_ +__finishInitializing +__class_setInitialized +_NXFreeMapTable +_NXResetMapTable +__cache_malloc +__class_setCache +__class_setGrowCache +_objc_initializeClassPair +__Z33objc_initializeClassPair_internalP10objc_classPKcS0_S0_ +_objc_registerClassPair +_add_category_to_loadable_list +__category_getLoadMethod +__category_getClass +__class_isLoadable +_objc_msgSendSuper2 +__objc_autoreleasePoolPush +_objc_autoreleasePoolPush +__ZN12_GLOBAL__N_119AutoreleasePoolPageC1EPS0_ +__ZN12_GLOBAL__N_119AutoreleasePoolPage9fastcheckEb +_objc_destructInstance +_objc_clear_deallocating +__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE5eraseERKS2_ +_objc_msgSend_vtable9 +__class_shouldGrowCache +__cache_collect_free +__cache_collect +_class_getSuperclass +_objc_msgSend_vtable2 +_objc_msgSend_vtable13 +_objc_msgSend_vtable14 +_objc_memmove_collectable +_class_respondsToSelector +__class_resolveMethod +__class_isMetaClass +__cache_addForwardEntry +__objc_rootDealloc +_object_dispose +_objc_msgSend_fixedup +_class_getName +_objc_atomicCompareAndSwapPtrBarrier +_objc_msgSend_vtable7 +__objc_rootAutorelease +__Z22_objc_rootAutorelease2P11objc_object +_objc_msgSend_vtable12 +_objc_msgSend_vtable11 +_objc_msgSend_vtable8 +_objc_msgSend_vtable15 +__objc_autoreleasePoolPop +__ZN12_GLOBAL__N_119AutoreleasePoolPage3popEPv +_objc_msgSend_vtable4 +_objc_msgSend_vtable10 +_objc_retain +_objc_atomicCompareAndSwapInstanceVariableBarrier +_objc_msgSendSuper2_fixup +_objc_msgSendSuper2_fixedup +__collecting_in_critical +__cache_free_block +_class_getVersion +_objc_finalizeOnMainThread +_class_getImageName +__objc_rootZone +__Z35_protocol_conformsToProtocol_nolockP10protocol_tS0_ +_objc_msgSend_vtable5 +_objc_sync_enter +_id2data +_fetch_cache +_objc_sync_exit +_gc_enforcer +_cache_region_calloc +_class_getMethodImplementation +_objc_msgSend_stret +__ZN4objc8DenseMapIP11objc_objectmLb1ENS_12DenseMapInfoIS2_EENS3_ImEEE4growEj +__objc_rootHash +_objc_assign_weak_non_gc +_objc_read_weak_non_gc +_sel_getName +_method_getArgumentType +_encoding_getArgumentType +_encoding_getArgumentInfo +_SkipFirstType +_class_isMetaClass +_objc_allocateClassPair +__calloc_class +_class_getInstanceVariable +__class_getVariable +__Z7getIvarP7class_tPKc +_object_setClass +__class_instancesHaveAssociatedObjects +_method_getNumberOfArguments +_encoding_getNumberOfArguments +_method_copyReturnType +_encoding_copyReturnType +_method_copyArgumentType +_encoding_copyArgumentType +__objc_rootRetainCount +_objc_getAssociatedObject_non_gc +__object_get_associative_reference +__ZN19AssociationsManagerC2Ev +__ZN19AssociationsManager12associationsEv +__ZNK23objc_references_support15ObjcPointerHashclEPv +_objc_release +_objc_removeAssociatedObjects +_objc_setProperty_non_gc +_objc_getProperty_non_gc +_objc_autoreleaseReturnValue +_objc_setAssociatedObject_non_gc +__object_set_associative_reference +__ZN9__gnu_cxx8hash_mapIPvPN23objc_references_support20ObjectAssociationMapENS2_15ObjcPointerHashENS2_16ObjcPointerEqualENS2_13 +__ZNSt3__13mapIPvN23objc_references_support15ObjcAssociationENS2_17ObjectPointerLessENS2_13ObjcAllocatorINS_4pairIKS1_S3_EEEEEi +__ZNSt3__13mapIPvN23objc_references_support15ObjcAssociationENS2_17ObjectPointerLessENS2_13ObjcAllocatorINS_4pairIKS1_S3_EEEEE1 +__ZNSt3__127__tree_balance_after_insertIPNS_16__tree_node_baseIPvEEEEvT_S5_ +__class_setInstancesHaveAssociatedObjects +_ivar_getTypeEncoding +_object_getIvar +_ivar_getOffset +__class_usesAutomaticRetainRelease +__objc_msgForward_internal +__objc_msgForward +_class_copyProtocolList +_protocol_getMethodDescription +__protocol_getMethod +__Z26_protocol_getMethod_nolockP10protocol_tP13objc_selectoraa +_method_getDescription +_ivar_getName +_objc_addExceptionHandler +_read_address +_read_sleb +_fetch_handler_list +_objc_removeExceptionHandler +_SubtypeUntil +_objc_collecting_enabled +_objc_msgSend_vtable6 +_objc_is_finalized +_class_copyPropertyList +_property_getName +_property_getAttributes +_objc_msgSendSuper2_stret +_object_setInstanceVariable +_object_setIvar +_objc_assign_ivar +__ZN12_GLOBAL__N_119AutoreleasePoolPage15autoreleaseSlowEP11objc_object +_objc_atomicCompareAndSwapPtr +_objc_atomicCompareAndSwapGlobalBarrier +_sel_getUid +__ZN12_GLOBAL__N_119AutoreleasePoolPage11tls_deallocEPv +__ZN12_GLOBAL__N_119AutoreleasePoolPage4killEv +__objc_constructOrFree +_object_cxxConstruct +_object_cxxConstructFromClass +__class_hasCxxStructors +_lookupMethodInClassAndLoadCache +__class_getMethodNoSuper +_object_cxxDestruct +_object_cxxDestructFromClass +_class_copyIvarList +__objc_rootRetain_slow +__objc_rootReleaseWasZero_slow +_object_copy +__Z20_object_copyFromZoneP11objc_objectmPv +__objc_pthread_destroyspecific +__destroyInitializingClassList +__destroySyncCache +__destroyAltHandlerList +__object_remove_assocations +__ZNSt3__114__split_bufferIN23objc_references_support15ObjcAssociationERNS1_13ObjcAllocatorIS2_EEE9push_backERKS2_ +__ZNSt3__16vectorIN23objc_references_support15ObjcAssociationENS1_13ObjcAllocatorIS2_EEE26__swap_out_circular_bufferERNS_14__sp +__ZNSt3__112__hash_tableINS_4pairIPvPN23objc_references_support20ObjectAssociationMapEEEN9__gnu_cxx17__hash_map_hasherIS6_NS3_1 +__ZNSt3__114__split_bufferIN23objc_references_support15ObjcAssociationERNS1_13ObjcAllocatorIS2_EEE10push_frontERKS2_ +__ZNSt3__16__treeINS_4pairIPvN23objc_references_support15ObjcAssociationEEENS_19__map_value_compareIS2_S4_NS3_17ObjectPointerLe +__ZNSt3__113__tree_removeIPNS_16__tree_node_baseIPvEEEEvT_S5_ diff --git a/markgc.cpp b/markgc.cpp new file mode 100644 index 0000000..49fd2ba --- /dev/null +++ b/markgc.cpp @@ -0,0 +1,562 @@ +/* + * Copyright (c) 2007-2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Some OS X SDKs don't define these. +#ifndef CPU_TYPE_ARM +#define CPU_TYPE_ARM ((cpu_type_t) 12) +#endif +#ifndef CPU_ARCH_ABI64 +#define CPU_ARCH_ABI64 0x01000000 /* 64 bit ABI */ +#endif +#ifndef CPU_TYPE_ARM64 +#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64) +#endif + +// File abstraction taken from ld64/FileAbstraction.hpp +// and ld64/MachOFileAbstraction.hpp. + +#ifdef __OPTIMIZE__ +#define INLINE __attribute__((always_inline)) +#else +#define INLINE +#endif + +// +// This abstraction layer is for use with file formats that have 64-bit/32-bit and Big-Endian/Little-Endian variants +// +// For example: to make a utility that handles 32-bit little enidan files use: Pointer32 +// +// +// get16() read a 16-bit number from an E endian struct +// set16() write a 16-bit number to an E endian struct +// get32() read a 32-bit number from an E endian struct +// set32() write a 32-bit number to an E endian struct +// get64() read a 64-bit number from an E endian struct +// set64() write a 64-bit number to an E endian struct +// +// getBits() read a bit field from an E endian struct (bitCount=number of bits in field, firstBit=bit index of field) +// setBits() write a bit field to an E endian struct (bitCount=number of bits in field, firstBit=bit index of field) +// +// getBitsRaw() read a bit field from a struct with native endianness +// setBitsRaw() write a bit field from a struct with native endianness +// + +class BigEndian +{ +public: + static uint16_t get16(const uint16_t& from) INLINE { return OSReadBigInt16(&from, 0); } + static void set16(uint16_t& into, uint16_t value) INLINE { OSWriteBigInt16(&into, 0, value); } + + static uint32_t get32(const uint32_t& from) INLINE { return OSReadBigInt32(&from, 0); } + static void set32(uint32_t& into, uint32_t value) INLINE { OSWriteBigInt32(&into, 0, value); } + + static uint64_t get64(const uint64_t& from) INLINE { return OSReadBigInt64(&from, 0); } + static void set64(uint64_t& into, uint64_t value) INLINE { OSWriteBigInt64(&into, 0, value); } + + static uint32_t getBits(const uint32_t& from, + uint8_t firstBit, uint8_t bitCount) INLINE { return getBitsRaw(get32(from), firstBit, bitCount); } + static void setBits(uint32_t& into, uint32_t value, + uint8_t firstBit, uint8_t bitCount) INLINE { uint32_t temp = get32(into); setBitsRaw(temp, value, firstBit, bitCount); set32(into, temp); } + + static uint32_t getBitsRaw(const uint32_t& from, + uint8_t firstBit, uint8_t bitCount) INLINE { return ((from >> (32-firstBit-bitCount)) & ((1<> firstBit) & ((1< +class Pointer32 +{ +public: + typedef uint32_t uint_t; + typedef int32_t sint_t; + typedef _E E; + + static uint64_t getP(const uint_t& from) INLINE { return _E::get32(from); } + static void setP(uint_t& into, uint64_t value) INLINE { _E::set32(into, value); } +}; + + +template +class Pointer64 +{ +public: + typedef uint64_t uint_t; + typedef int64_t sint_t; + typedef _E E; + + static uint64_t getP(const uint_t& from) INLINE { return _E::get64(from); } + static void setP(uint_t& into, uint64_t value) INLINE { _E::set64(into, value); } +}; + + +// +// mach-o file header +// +template struct macho_header_content {}; +template <> struct macho_header_content > { mach_header fields; }; +template <> struct macho_header_content > { mach_header_64 fields; }; +template <> struct macho_header_content > { mach_header fields; }; +template <> struct macho_header_content > { mach_header_64 fields; }; + +template +class macho_header { +public: + uint32_t magic() const INLINE { return E::get32(header.fields.magic); } + void set_magic(uint32_t value) INLINE { E::set32(header.fields.magic, value); } + + uint32_t cputype() const INLINE { return E::get32(header.fields.cputype); } + void set_cputype(uint32_t value) INLINE { E::set32((uint32_t&)header.fields.cputype, value); } + + uint32_t cpusubtype() const INLINE { return E::get32(header.fields.cpusubtype); } + void set_cpusubtype(uint32_t value) INLINE { E::set32((uint32_t&)header.fields.cpusubtype, value); } + + uint32_t filetype() const INLINE { return E::get32(header.fields.filetype); } + void set_filetype(uint32_t value) INLINE { E::set32(header.fields.filetype, value); } + + uint32_t ncmds() const INLINE { return E::get32(header.fields.ncmds); } + void set_ncmds(uint32_t value) INLINE { E::set32(header.fields.ncmds, value); } + + uint32_t sizeofcmds() const INLINE { return E::get32(header.fields.sizeofcmds); } + void set_sizeofcmds(uint32_t value) INLINE { E::set32(header.fields.sizeofcmds, value); } + + uint32_t flags() const INLINE { return E::get32(header.fields.flags); } + void set_flags(uint32_t value) INLINE { E::set32(header.fields.flags, value); } + + uint32_t reserved() const INLINE { return E::get32(header.fields.reserved); } + void set_reserved(uint32_t value) INLINE { E::set32(header.fields.reserved, value); } + + typedef typename P::E E; +private: + macho_header_content

header; +}; + + +// +// mach-o load command +// +template +class macho_load_command { +public: + uint32_t cmd() const INLINE { return E::get32(command.cmd); } + void set_cmd(uint32_t value) INLINE { E::set32(command.cmd, value); } + + uint32_t cmdsize() const INLINE { return E::get32(command.cmdsize); } + void set_cmdsize(uint32_t value) INLINE { E::set32(command.cmdsize, value); } + + typedef typename P::E E; +private: + load_command command; +}; + + + + +// +// mach-o segment load command +// +template struct macho_segment_content {}; +template <> struct macho_segment_content > { segment_command fields; enum { CMD = LC_SEGMENT }; }; +template <> struct macho_segment_content > { segment_command_64 fields; enum { CMD = LC_SEGMENT_64 }; }; +template <> struct macho_segment_content > { segment_command fields; enum { CMD = LC_SEGMENT }; }; +template <> struct macho_segment_content > { segment_command_64 fields; enum { CMD = LC_SEGMENT_64 }; }; + +template +class macho_segment_command { +public: + uint32_t cmd() const INLINE { return E::get32(segment.fields.cmd); } + void set_cmd(uint32_t value) INLINE { E::set32(segment.fields.cmd, value); } + + uint32_t cmdsize() const INLINE { return E::get32(segment.fields.cmdsize); } + void set_cmdsize(uint32_t value) INLINE { E::set32(segment.fields.cmdsize, value); } + + const char* segname() const INLINE { return segment.fields.segname; } + void set_segname(const char* value) INLINE { strncpy(segment.fields.segname, value, 16); } + + uint64_t vmaddr() const INLINE { return P::getP(segment.fields.vmaddr); } + void set_vmaddr(uint64_t value) INLINE { P::setP(segment.fields.vmaddr, value); } + + uint64_t vmsize() const INLINE { return P::getP(segment.fields.vmsize); } + void set_vmsize(uint64_t value) INLINE { P::setP(segment.fields.vmsize, value); } + + uint64_t fileoff() const INLINE { return P::getP(segment.fields.fileoff); } + void set_fileoff(uint64_t value) INLINE { P::setP(segment.fields.fileoff, value); } + + uint64_t filesize() const INLINE { return P::getP(segment.fields.filesize); } + void set_filesize(uint64_t value) INLINE { P::setP(segment.fields.filesize, value); } + + uint32_t maxprot() const INLINE { return E::get32(segment.fields.maxprot); } + void set_maxprot(uint32_t value) INLINE { E::set32((uint32_t&)segment.fields.maxprot, value); } + + uint32_t initprot() const INLINE { return E::get32(segment.fields.initprot); } + void set_initprot(uint32_t value) INLINE { E::set32((uint32_t&)segment.fields.initprot, value); } + + uint32_t nsects() const INLINE { return E::get32(segment.fields.nsects); } + void set_nsects(uint32_t value) INLINE { E::set32(segment.fields.nsects, value); } + + uint32_t flags() const INLINE { return E::get32(segment.fields.flags); } + void set_flags(uint32_t value) INLINE { E::set32(segment.fields.flags, value); } + + enum { + CMD = macho_segment_content

::CMD + }; + + typedef typename P::E E; +private: + macho_segment_content

segment; +}; + + +// +// mach-o section +// +template struct macho_section_content {}; +template <> struct macho_section_content > { section fields; }; +template <> struct macho_section_content > { section_64 fields; }; +template <> struct macho_section_content > { section fields; }; +template <> struct macho_section_content > { section_64 fields; }; + +template +class macho_section { +public: + const char* sectname() const INLINE { return section.fields.sectname; } + void set_sectname(const char* value) INLINE { strncpy(section.fields.sectname, value, 16); } + + const char* segname() const INLINE { return section.fields.segname; } + void set_segname(const char* value) INLINE { strncpy(section.fields.segname, value, 16); } + + uint64_t addr() const INLINE { return P::getP(section.fields.addr); } + void set_addr(uint64_t value) INLINE { P::setP(section.fields.addr, value); } + + uint64_t size() const INLINE { return P::getP(section.fields.size); } + void set_size(uint64_t value) INLINE { P::setP(section.fields.size, value); } + + uint32_t offset() const INLINE { return E::get32(section.fields.offset); } + void set_offset(uint32_t value) INLINE { E::set32(section.fields.offset, value); } + + uint32_t align() const INLINE { return E::get32(section.fields.align); } + void set_align(uint32_t value) INLINE { E::set32(section.fields.align, value); } + + uint32_t reloff() const INLINE { return E::get32(section.fields.reloff); } + void set_reloff(uint32_t value) INLINE { E::set32(section.fields.reloff, value); } + + uint32_t nreloc() const INLINE { return E::get32(section.fields.nreloc); } + void set_nreloc(uint32_t value) INLINE { E::set32(section.fields.nreloc, value); } + + uint32_t flags() const INLINE { return E::get32(section.fields.flags); } + void set_flags(uint32_t value) INLINE { E::set32(section.fields.flags, value); } + + uint32_t reserved1() const INLINE { return E::get32(section.fields.reserved1); } + void set_reserved1(uint32_t value) INLINE { E::set32(section.fields.reserved1, value); } + + uint32_t reserved2() const INLINE { return E::get32(section.fields.reserved2); } + void set_reserved2(uint32_t value) INLINE { E::set32(section.fields.reserved2, value); } + + typedef typename P::E E; +private: + macho_section_content

section; +}; + + + + +static bool debug = true; + +bool processFile(const char *filename); + +int main(int argc, const char *argv[]) { + for (int i = 1; i < argc; ++i) { + if (!processFile(argv[i])) return 1; + } + return 0; +} + +struct imageinfo { + uint32_t version; + uint32_t flags; +}; + + +// Segment and section names are 16 bytes and may be un-terminated. +bool segnameEquals(const char *lhs, const char *rhs) +{ + return 0 == strncmp(lhs, rhs, 16); +} + +bool segnameStartsWith(const char *segname, const char *prefix) +{ + return 0 == strncmp(segname, prefix, strlen(prefix)); +} + +bool sectnameEquals(const char *lhs, const char *rhs) +{ + return segnameEquals(lhs, rhs); +} + + +template +void dosect(uint8_t *start, macho_section

*sect) +{ + if (debug) printf("section %.16s from segment %.16s\n", + sect->sectname(), sect->segname()); + + // Strip S_MOD_INIT/TERM_FUNC_POINTERS. We don't want dyld to call + // our init funcs because it is too late, and we don't want anyone to + // call our term funcs ever. + if (segnameStartsWith(sect->segname(), "__DATA") && + sectnameEquals(sect->sectname(), "__mod_init_func")) + { + // section type 0 is S_REGULAR + sect->set_flags(sect->flags() & ~SECTION_TYPE); + sect->set_sectname("__objc_init_func"); + if (debug) printf("disabled __mod_init_func section\n"); + } + if (segnameStartsWith(sect->segname(), "__DATA") && + sectnameEquals(sect->sectname(), "__mod_term_func")) + { + // section type 0 is S_REGULAR + sect->set_flags(sect->flags() & ~SECTION_TYPE); + sect->set_sectname("__objc_term_func"); + if (debug) printf("disabled __mod_term_func section\n"); + } +} + +template +void doseg(uint8_t *start, macho_segment_command

*seg) +{ + if (debug) printf("segment name: %.16s, nsects %u\n", + seg->segname(), seg->nsects()); + macho_section

*sect = (macho_section

*)(seg + 1); + for (uint32_t i = 0; i < seg->nsects(); ++i) { + dosect(start, §[i]); + } +} + + +template +bool parse_macho(uint8_t *buffer) +{ + macho_header

* mh = (macho_header

*)buffer; + uint8_t *cmds = (uint8_t *)(mh + 1); + for (uint32_t c = 0; c < mh->ncmds(); c++) { + macho_load_command

* cmd = (macho_load_command

*)cmds; + cmds += cmd->cmdsize(); + if (cmd->cmd() == LC_SEGMENT || cmd->cmd() == LC_SEGMENT_64) { + doseg(buffer, (macho_segment_command

*)cmd); + } + } + + return true; +} + + +bool parse_macho(uint8_t *buffer) +{ + uint32_t magic = *(uint32_t *)buffer; + + switch (magic) { + case MH_MAGIC_64: + return parse_macho>(buffer); + case MH_MAGIC: + return parse_macho>(buffer); + case MH_CIGAM_64: + return parse_macho>(buffer); + case MH_CIGAM: + return parse_macho>(buffer); + default: + printf("file is not mach-o (magic %x)\n", magic); + return false; + } +} + + +bool parse_fat(uint8_t *buffer, size_t size) +{ + uint32_t magic; + + if (size < sizeof(magic)) { + printf("file is too small\n"); + return false; + } + + magic = *(uint32_t *)buffer; + if (magic != FAT_MAGIC && magic != FAT_CIGAM) { + /* Not a fat file */ + return parse_macho(buffer); + } else { + struct fat_header *fh; + uint32_t fat_magic, fat_nfat_arch; + struct fat_arch *archs; + + if (size < sizeof(struct fat_header)) { + printf("file is too small\n"); + return false; + } + + fh = (struct fat_header *)buffer; + fat_magic = OSSwapBigToHostInt32(fh->magic); + fat_nfat_arch = OSSwapBigToHostInt32(fh->nfat_arch); + + if (size < (sizeof(struct fat_header) + fat_nfat_arch * sizeof(struct fat_arch))) { + printf("file is too small\n"); + return false; + } + + archs = (struct fat_arch *)(buffer + sizeof(struct fat_header)); + + /* Special case hidden CPU_TYPE_ARM64 */ + if (size >= (sizeof(struct fat_header) + (fat_nfat_arch + 1) * sizeof(struct fat_arch))) { + if (fat_nfat_arch > 0 + && OSSwapBigToHostInt32(archs[fat_nfat_arch].cputype) == CPU_TYPE_ARM64) { + fat_nfat_arch++; + } + } + /* End special case hidden CPU_TYPE_ARM64 */ + + if (debug) printf("%d fat architectures\n", + fat_nfat_arch); + + for (uint32_t i = 0; i < fat_nfat_arch; i++) { + uint32_t arch_cputype = OSSwapBigToHostInt32(archs[i].cputype); + uint32_t arch_cpusubtype = OSSwapBigToHostInt32(archs[i].cpusubtype); + uint32_t arch_offset = OSSwapBigToHostInt32(archs[i].offset); + uint32_t arch_size = OSSwapBigToHostInt32(archs[i].size); + + if (debug) printf("cputype %d cpusubtype %d\n", + arch_cputype, arch_cpusubtype); + + /* Check that slice data is after all fat headers and archs */ + if (arch_offset < (sizeof(struct fat_header) + fat_nfat_arch * sizeof(struct fat_arch))) { + printf("file is badly formed\n"); + return false; + } + + /* Check that the slice ends before the file does */ + if (arch_offset > size) { + printf("file is badly formed\n"); + return false; + } + + if (arch_size > size) { + printf("file is badly formed\n"); + return false; + } + + if (arch_offset > (size - arch_size)) { + printf("file is badly formed\n"); + return false; + } + + bool ok = parse_macho(buffer + arch_offset); + if (!ok) return false; + } + return true; + } +} + +bool processFile(const char *filename) +{ + if (debug) printf("file %s\n", filename); + int fd = open(filename, O_RDWR); + if (fd < 0) { + printf("open %s: %s\n", filename, strerror(errno)); + return false; + } + + struct stat st; + if (fstat(fd, &st) < 0) { + printf("fstat %s: %s\n", filename, strerror(errno)); + return false; + } + + void *buffer = mmap(NULL, (size_t)st.st_size, PROT_READ|PROT_WRITE, + MAP_FILE|MAP_SHARED, fd, 0); + if (buffer == MAP_FAILED) { + printf("mmap %s: %s\n", filename, strerror(errno)); + return false; + } + + bool result = parse_fat((uint8_t *)buffer, (size_t)st.st_size); + munmap(buffer, (size_t)st.st_size); + close(fd); + return result; +} diff --git a/objc.sln b/objc.sln new file mode 100755 index 0000000..ddfd6e0 --- /dev/null +++ b/objc.sln @@ -0,0 +1,34 @@ + +Microsoft Visual Studio Solution File, Format Version 9.00 +# Visual C++ Express 2005 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "objc", "objc.vcproj", "{B3408263-0CF1-47BE-83CC-56070EFC9BC1}" +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "objcrt", "objcrt\objcrt.vcproj", "{E38C1996-8B3D-4050-A4B2-DC85957B047D}" + ProjectSection(ProjectDependencies) = postProject + {B3408263-0CF1-47BE-83CC-56070EFC9BC1} = {B3408263-0CF1-47BE-83CC-56070EFC9BC1} + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Win32 = Debug|Win32 + DebugDLL|Win32 = DebugDLL|Win32 + Release|Win32 = Release|Win32 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {B3408263-0CF1-47BE-83CC-56070EFC9BC1}.Debug|Win32.ActiveCfg = Debug|Win32 + {B3408263-0CF1-47BE-83CC-56070EFC9BC1}.Debug|Win32.Build.0 = Debug|Win32 + {B3408263-0CF1-47BE-83CC-56070EFC9BC1}.DebugDLL|Win32.ActiveCfg = DebugDLL|Win32 + {B3408263-0CF1-47BE-83CC-56070EFC9BC1}.DebugDLL|Win32.Build.0 = DebugDLL|Win32 + {B3408263-0CF1-47BE-83CC-56070EFC9BC1}.Release|Win32.ActiveCfg = Release|Win32 + {B3408263-0CF1-47BE-83CC-56070EFC9BC1}.Release|Win32.Build.0 = Release|Win32 + {E38C1996-8B3D-4050-A4B2-DC85957B047D}.Debug|Win32.ActiveCfg = Debug|Win32 + {E38C1996-8B3D-4050-A4B2-DC85957B047D}.Debug|Win32.Build.0 = Debug|Win32 + {E38C1996-8B3D-4050-A4B2-DC85957B047D}.DebugDLL|Win32.ActiveCfg = Debug|Win32 + {E38C1996-8B3D-4050-A4B2-DC85957B047D}.DebugDLL|Win32.Build.0 = Debug|Win32 + {E38C1996-8B3D-4050-A4B2-DC85957B047D}.Release|Win32.ActiveCfg = Release|Win32 + {E38C1996-8B3D-4050-A4B2-DC85957B047D}.Release|Win32.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/objc.suo b/objc.suo new file mode 100755 index 0000000000000000000000000000000000000000..7f8180c631918658823f234ec88fd2017eac6078 GIT binary patch literal 24576 zcmeI433wLOy~YPn5cdUaL5o2)5fa%E5FrpiFf0-QEm4FJAZ$SrKm`PC)hn(QDpso2 ziel}>>aAKITd#_(f*aKKwraJlZLM}wYrU;qB=`NzyhBbx^5qLq+WR~ne82Pme&7Gh zIdkT0b7ta>?MMCm&IjAR8Py7>N6n(0RV}0D0dFE3#o@>(YAOTmtg5O?U=+#xB>X?B zfsa+z19b7)N(V|@?}MZk(jihy>0s$l=`iVV=_u(4NlhR+S~^xbMrtLsmOdqYM*6hW zMmkQSjU2DIqiw#|74r0ZUA#7Hm!)6rbf0{$SpyTJa{a4}a-tE@LdA=ti&eAPhhx=h z(j#*HGle5J514q@DKVw=`1fEA^A+N&}?<66psiK1~`doh}WLhD!O;Flo3{AdQglr{+H|jrR959gkH! zP8u&wkj|1ON=4Gy(j;lJG(|c`I#>FvG*v2=&XdlUrb!n_)1?_wiA26%QanqVEzOb2 zq{}3Jo3D6*v`{LS7D*RMi>1q@N@>a?#fih#j5{u)px0^*{bX5 zs^@ue-z*lNp%zx6)|mFAy(~t&j-tH{SF0^n%VJcVtSu|gHSLnB~vPW+;`%W2|k(z zc4}tJQvYmX)g^~z8e$4!BD}w8`=(klS5-A>8Z{|i>xzz|2d1uVvQ^&`(1cdNdRtB+ z^arg=(RbQvr&|0jeScUwN*b${GCyvw^nAuC`u`%2BFuz?TiD+t39wHVb-ZXsQtz5jP?+duav+2+rE4RbYt{$8m(7pvDXdRC9L>B_WJeWEfRK^comYmG<)l|RQL zZ$f;~20m(@f4PpoMCY6wIXveHs)q{IL8g(Yhw^7#mgJsY{?yfv-5Cm$M}7IHYvF=; zPMBWjS>-8zT5@CZKV1HOd^E#xk{mW%ahSx=X_S zM_TvUo`2Z>dTF)XTkDt}%6FuOv0Po5{u&=f$U0TO4A;M0t*ZOUqrWslUc)cTGZt!1 zo%Gn=x(mrue(CCIn6B$g)lp@<3g_%-_0^-@?x_bwL*=nQOFL_~@`l$ef1j6TqS`mB zSlWAOBWjNEs{XWt;(NJ$TW|H}mZ!0)_Ek=5O_R0e*CG47)~ee~_HB(BsqS=x;yd7d zTU&MKRDVsWaJyC_MN!8?KF;rU-BV{g_}YEjkGXrw5rxs=+uF|mN6QZ5Z>+lNqwQNZ zj=;Y#db?Gpm%q`W!>C8zdUIu$84EmtoR!0UcCuP+{M}onW}8ag2c`FOdAcJi(dd=z zNhQ18|GcAfUTvc@;vO+mpNpgLPLE&NE1@lNr&(R$|ca24|7==qU@-Qk@ z>icA^4y(^ES8TD4v{3g~2~Afytb-P*ce3)LB*|#aT$nke^G|EJWjp_LkngQ@7llbG z3%O2LnJIT#o@8FAZ`GHi<#SKw-*EmrL-jN>&U>NGdA>?BQl(++rWUfwzX|JhM)z#b zKg@r8y!M%|QY?%$JE-{r5%zX61=atqz4&bN@1g_t@WutZ7{$tkHo;0O+3!gEL$0n1 zYqN6AZR|#I=eSxaXl+R^>8R0=9prgRne32Ms1;AvN}-3=2YK;lrt+V&UAIbm5ti;n z+vnT~tY5p%8CYCCV{S=q<>Dm^=f!5Rtje+lr3v?!JT~B>=BF)OGJih%h*8v~p_C=l zOXigBo-Q{|SG~u>nm66!Ss=Ui9#4hZ8jC>9E_}udOGk5R3{Vo|;Tr_9;;?kK@$|~nf9=nHw z$G|-~Ojx23x<@-Tz84#yyRv@qT^qYQ1@WC+58b)-kd?2yx?$@7x#Gj+-&ekUswM=DB3gm7}n)g@ti#c@TnqDOqcS&-G8WD~6#^L5nax(nUA zvkr63wAQb^(hu+Fvb!tUUnR}vu4G@9r9eF|d2X;T?@IPZ3G1sV+UuZr?8w(B!di&Y zWw^$y+4_Y$_=c`{&3pgd?+WWWyPCZx9Ir~^If3UeNuOedUAWt7C)3)k=CQ+$7e&`^ z>$HY5&NTnAsohXWpa1Y&mpP~*e|~3IZKfSF<^*;Z&sHDt9D4&!T*|d(Dko;<5f!J#n`UGu{wouSBaQR z*+Yf##D|fHdoehST>zd!!e|XJeCU-pAb#jHeL;CCb8X7>qIulPJ${DfJs^Hp+~Z8@-eGOf$}HoJ2g{ zjPsr-5f_<}u2|wp^m%5K?|g|moMy&(&yXnJEHlbiCK1myBP}C8e)G&I!$Px*%y`>` zw=6ifRc54JEpaWblsI4N?`kPWS|h>myGA1IwGxantd-hJ>m=AQ(l;cI@qh%QEZ>p1 zK95T&d(!MFGwSFuiSV?9ANBElse|;g1ZyR|Byqk!kYJSM=Tdv=mlBLJ|4O3FZ%Z() z`OhTU#ye8#_Z!(KNWV9uEI*aV;}2#W?_H^r^hY!D{a=Z8^=C8u{vvUGKQimC_(O@~ z{WV}!vgr>LgtFk*R0iw-Gx|Vt+3eo5FvG8cb(KvXxq)8~+4PrQX4FA%+4PsbW~3Qxd$?or9c6p0W6u37 z+mjrVc8cw(0Xxt3bjOr=j%?b^Trqi^g|eF}t_b{=*uKayZF__5 zFFR%&I8oBqC}p&vmJ;E1{gCafm!zoux%-ML`@K`{ARFhM{c0^)^KqX4+!->{`eQ)t z%Bti0o~(aZ`(5f|Wy-%_9r@Kptbgc9w3KXHLWO(}ZI&Db62{EHh{WvIQGziNc9Iw!nL%LPq+SxEZf^<3>`0r=k!V|KTCOwc z>2I(Dq|+q=YxR_kiAk~=f)OT3tWu^(DO)DHKw2I!Rv}#L{|MO4vX@Bf%_!4XCBj`2 z$19iamI#|9*iz|U=^p885{wnfMk!>}%Y72%VD*CEV(Gsm!b1}5V(DRt@GS|pLV8pp zuzG=gPWrY)_-_feH_yHr_w1w3sB7W@kzB)&P0OIwbe2d<%gB>HC7mL{Xc@GY94T!r z)LtKnmeE(jkCrh|B5;LMHZms3+8IVTOQK~INhzz4O}b?PyIeLc(hiC6eF;Xx`5%e!BMCNCdPO3lI%uF;?&$U1R$?#kbpL4*P;cYoVpAlfEcX+V#>c(rpsy zHz@wH?ev&m@%OJfmfI&`Ki5F|281p_9MU(=vz>oy?!JWkLN$L$*0Z$dnon%$K~E%E z$Gs2_dg&H3dLfKn`aLsxA&g$S)r?*UqnAE!mi9>Uc*!j7k@V6ZN>sOh+M}p(Z(Hm> zs(yb&3DP$_BG$H;dl0g_SG9kJlU@9CA6OEZ@DT#eUHkLRVK4v z_4|f{{8o2TMr|iB~y%_lIFncxdd)@4< z!0#Qi_X58U%>Ewu?KC@3LqD?&hirHy_-w?CHz;C?S z)WGk2v)O^)T(b)Uze=-}f!}Jgs{+3@W;X|ZUozVm_-!(~Kk$3d>|24~V`f_ezvs(3@`&*}NW=7gJvdOot8EKC* zJKl_Zo69EO&zO<6pX|Ph`qQ4y1;L} z*R=m-Sw43a=*^E3MFnidH@;xh?J(jIzly95ab7maxQQ4I5c{9?!Dm!2C zYi6YViP`IBok~X5?|8J}6&v#c5k>VRn!i$NNBO$fKp1 zqUbaFpnUDjDBlTYIcAjaP}!tyZ>A{fqYv)h`kIlppILu1MbQ|23{`xl8GeOkW6h2i z8z`GF-i$JLG@EF~*BSa4rMSe5#lPWlwa0L>W@nO7;RZ@l7kolAXnX_3@dFU1_2dhKgA z#0in?y}% z%QZsBJ-NsJx2ogXH+6CUn;Eo|*={H0K|5J&wleTrZMG)xyUuKV;CHLpU1qia8ll&m z3)$}9)nC*4Ze!L}@yQZ>B+sma;=U4NRzI`$idoa}8#{>f5Bl0r$N7q3$4ZQ`gC)jp z7<=jCB#t*;O8w4}-Ay{zjP}5;BYkwL8TJ&3aje)(QT!%)GsUH5_|22rOY_YX#s3Ev z{3-*sOg4;N3d+J5vr1|!t(IWa$LDNc=~z+ppAu>Bm+;f@7(abewCfoP_Cr!L9WDH8 zp8fH60H0j{f2Zr8JY}Vw5T1}|J6k0f^Xp3z{c5|E`u##Ses7s+nu^~AQxv^thW);T z-=CzkEq*AQ@HYvjC1b>fP9F&Xqn|aAK_6{qMjn5c2+hrol+C+aghS1+50g#4tpnD< zjQ6qhUmUudp8U2xWve+@57O>H^ z7-@Oesz7o2E*15%NGw-z`u-H-*=!$YnPZ{oV%e1CvVdJ-wkBZLncWbu^=7vOY?Il& z0sE%eLuQ=YW3m}DzimdDzhm~eSr@Sse7(Oeg8n&K9Uh2wCTY|4B@z*?Cd z8?ZKJ#|7+kv;2UKFdG%HF=mAUJKJn>z*d^AGGpEtCYx|W;CG{J=F|-V+a#OwxX+9> z`yJUiiXRWyoA!IhOg~4zH~T}t{${q*jCN2Zn|wPeqqP3I$R^*eX4J^Lg7> z1%6lC?^?4~^1EI(@eO7?W4J{&YlsbIIkMN=zRhuau{&))60mRCe!($q{6*QPDBf;H zUF|S?*-R+uA=+jQft0ZS&W~_)(U=vS~{L0yfa> zw15pZ8yc{Dvk_(-r;lvTXQUZzqnGVbj_Io>%I>c?Wiq2NVw`WG{a_Q!ip&^urpoTD zxY&&R&Xe6;@%d(4!*gw4;P}&GlVo#Wmok}Ai5U5o+7DJ{HqVSQFPB}YctyZg$sR4X zI$&4JW?s71OlEY8Z1UJ(c8u)XW%p2gN8tBW`)xGi8a*VN@R%9r`)#wQ%=rDgvdQBa zGs^s|?dJl&7wq@48G9fE7ltYu%d{YK#TGu!VwW?k?{+uV@RuDIq0*lrcD*0wu2 z=G?l-X1>WaQxxTy^)VBQ&XUc!6`65vlVx{Qd~V=ZY`^o(m~$?$T^jh!wqKbU{bQc( z#R03ZeTieryj(VO=%r?gqE%*>n^CSSY+nm2xXHR~Dp^)VY1_?>PxGVmL1 zHX-mUGMgIsoo`ke_{}j}6!=w`T@v`MG`lMBTVu91@VnXU%Yok=W*Y;)O=g<|zX!}7 z4*VW9dm`}LV)ks{_ng^|!0!iUuLOQSG5bZ}_m5|7J`DW+V)l=~ugdH|?>*2C z4w5Y#9cD&5IMS?5;CH-P$H4EOu>Tg9Wq6x7e@Y Q{U`o^AN>8~o^Hqf9~E!sJ^%m! literal 0 HcmV?d00001 diff --git a/objc.vcproj b/objc.vcproj new file mode 100644 index 0000000..2c84662 --- /dev/null +++ b/objc.vcproj @@ -0,0 +1,1088 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/objc.xcodeproj/project.pbxproj b/objc.xcodeproj/project.pbxproj new file mode 100644 index 0000000..2c6187d --- /dev/null +++ b/objc.xcodeproj/project.pbxproj @@ -0,0 +1,823 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXAggregateTarget section */ + 837F67A81A771F63004D34FA /* objc-simulator */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 837F67A91A771F63004D34FA /* Build configuration list for PBXAggregateTarget "objc-simulator" */; + buildPhases = ( + ); + dependencies = ( + 837F67AD1A771F6E004D34FA /* PBXTargetDependency */, + ); + name = "objc-simulator"; + productName = objc_simulator; + }; +/* End PBXAggregateTarget section */ + +/* Begin PBXBuildFile section */ + 393CEAC00DC69E3E000B69DE /* objc-references.mm in Sources */ = {isa = PBXBuildFile; fileRef = 393CEABF0DC69E3E000B69DE /* objc-references.mm */; }; + 393CEAC60DC69E67000B69DE /* objc-references.h in Headers */ = {isa = PBXBuildFile; fileRef = 393CEAC50DC69E67000B69DE /* objc-references.h */; }; + 39ABD72312F0B61800D1054C /* objc-weak.h in Headers */ = {isa = PBXBuildFile; fileRef = 39ABD71F12F0B61800D1054C /* objc-weak.h */; }; + 39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */ = {isa = PBXBuildFile; fileRef = 39ABD72012F0B61800D1054C /* objc-weak.mm */; }; + 830F2A740D737FB800392440 /* objc-msg-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A690D737FB800392440 /* objc-msg-arm.s */; }; + 830F2A750D737FB900392440 /* objc-msg-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A6A0D737FB800392440 /* objc-msg-i386.s */; }; + 830F2A7D0D737FBB00392440 /* objc-msg-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A720D737FB800392440 /* objc-msg-x86_64.s */; }; + 830F2A950D73876100392440 /* objc-accessors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 830F2A930D73876100392440 /* objc-accessors.mm */; }; + 830F2A980D738DC200392440 /* hashtable.h in Headers */ = {isa = PBXBuildFile; fileRef = 830F2A970D738DC200392440 /* hashtable.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 83112ED40F00599600A5FBAF /* objc-internal.h in Headers */ = {isa = PBXBuildFile; fileRef = 83112ED30F00599600A5FBAF /* objc-internal.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 831C85D50E10CF850066E64C /* objc-os.h in Headers */ = {isa = PBXBuildFile; fileRef = 831C85D30E10CF850066E64C /* objc-os.h */; }; + 831C85D60E10CF850066E64C /* objc-os.mm in Sources */ = {isa = PBXBuildFile; fileRef = 831C85D40E10CF850066E64C /* objc-os.mm */; }; + 834266D80E665A8B002E4DA2 /* objc-gdb.h in Headers */ = {isa = PBXBuildFile; fileRef = 834266D70E665A8B002E4DA2 /* objc-gdb.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 834DF8B715993EE1002F2BC9 /* objc-sel-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */; }; + 834EC0A411614167009B2563 /* objc-abi.h in Headers */ = {isa = PBXBuildFile; fileRef = 834EC0A311614167009B2563 /* objc-abi.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83725F4914CA5BFA0014370E /* objc-opt.mm */; }; + 8379996E13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */; }; + 8383A3A3122600E9009290B8 /* a1a2-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */; }; + 8383A3A4122600E9009290B8 /* a2a3-blocktramps-arm.s in Sources */ = {isa = PBXBuildFile; fileRef = 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */; }; + 838485BF0D6D687300CEA253 /* hashtable2.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485B70D6D687300CEA253 /* hashtable2.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838485C00D6D687300CEA253 /* hashtable2.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485B80D6D687300CEA253 /* hashtable2.mm */; }; + 838485C30D6D687300CEA253 /* maptable.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BB0D6D687300CEA253 /* maptable.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 838485C40D6D687300CEA253 /* maptable.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485BC0D6D687300CEA253 /* maptable.mm */; }; + 838485EF0D6D68A200CEA253 /* objc-api.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C80D6D68A200CEA253 /* objc-api.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838485F00D6D68A200CEA253 /* objc-auto.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485C90D6D68A200CEA253 /* objc-auto.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838485F10D6D68A200CEA253 /* objc-auto.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CA0D6D68A200CEA253 /* objc-auto.mm */; settings = {COMPILER_FLAGS = "-fexceptions"; }; }; + 838485F20D6D68A200CEA253 /* objc-cache.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CB0D6D68A200CEA253 /* objc-cache.mm */; }; + 838485F30D6D68A200CEA253 /* objc-class-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CC0D6D68A200CEA253 /* objc-class-old.mm */; }; + 838485F40D6D68A200CEA253 /* objc-class.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CD0D6D68A200CEA253 /* objc-class.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838485F50D6D68A200CEA253 /* objc-class.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485CE0D6D68A200CEA253 /* objc-class.mm */; }; + 838485F60D6D68A200CEA253 /* objc-config.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485CF0D6D68A200CEA253 /* objc-config.h */; }; + 838485F70D6D68A200CEA253 /* objc-errors.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D00D6D68A200CEA253 /* objc-errors.mm */; }; + 838485F80D6D68A200CEA253 /* objc-exception.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D10D6D68A200CEA253 /* objc-exception.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838485F90D6D68A200CEA253 /* objc-exception.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D20D6D68A200CEA253 /* objc-exception.mm */; settings = {COMPILER_FLAGS = "-fexceptions"; }; }; + 838485FA0D6D68A200CEA253 /* objc-file.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D30D6D68A200CEA253 /* objc-file.mm */; }; + 838485FB0D6D68A200CEA253 /* objc-initialize.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D40D6D68A200CEA253 /* objc-initialize.h */; }; + 838485FC0D6D68A200CEA253 /* objc-initialize.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D50D6D68A200CEA253 /* objc-initialize.mm */; }; + 838485FD0D6D68A200CEA253 /* objc-layout.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D60D6D68A200CEA253 /* objc-layout.mm */; }; + 838485FE0D6D68A200CEA253 /* objc-load.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D70D6D68A200CEA253 /* objc-load.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838485FF0D6D68A200CEA253 /* objc-load.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485D80D6D68A200CEA253 /* objc-load.mm */; }; + 838486000D6D68A200CEA253 /* objc-loadmethod.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485D90D6D68A200CEA253 /* objc-loadmethod.h */; }; + 838486010D6D68A200CEA253 /* objc-loadmethod.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485DA0D6D68A200CEA253 /* objc-loadmethod.mm */; }; + 838486020D6D68A200CEA253 /* objc-lockdebug.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485DB0D6D68A200CEA253 /* objc-lockdebug.mm */; }; + 838486030D6D68A200CEA253 /* objc-private.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485DC0D6D68A200CEA253 /* objc-private.h */; }; + 838486070D6D68A200CEA253 /* objc-runtime-new.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E00D6D68A200CEA253 /* objc-runtime-new.h */; }; + 838486080D6D68A200CEA253 /* objc-runtime-new.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E10D6D68A200CEA253 /* objc-runtime-new.mm */; }; + 838486090D6D68A200CEA253 /* objc-runtime-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */; }; + 8384860A0D6D68A200CEA253 /* objc-runtime.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E30D6D68A200CEA253 /* objc-runtime.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 8384860B0D6D68A200CEA253 /* objc-runtime.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E40D6D68A200CEA253 /* objc-runtime.mm */; }; + 8384860C0D6D68A200CEA253 /* objc-sel-set.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E50D6D68A200CEA253 /* objc-sel-set.h */; }; + 8384860D0D6D68A200CEA253 /* objc-sel-set.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E60D6D68A200CEA253 /* objc-sel-set.mm */; }; + 8384860F0D6D68A200CEA253 /* objc-sel.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485E80D6D68A200CEA253 /* objc-sel.mm */; }; + 838486100D6D68A200CEA253 /* objc-sync.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485E90D6D68A200CEA253 /* objc-sync.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838486110D6D68A200CEA253 /* objc-sync.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EA0D6D68A200CEA253 /* objc-sync.mm */; }; + 838486120D6D68A200CEA253 /* objc-typeencoding.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */; }; + 838486130D6D68A200CEA253 /* objc.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485EC0D6D68A200CEA253 /* objc.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838486140D6D68A200CEA253 /* Object.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485ED0D6D68A200CEA253 /* Object.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838486150D6D68A200CEA253 /* Object.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838485EE0D6D68A200CEA253 /* Object.mm */; }; + 8384861E0D6D68A800CEA253 /* Protocol.h in Headers */ = {isa = PBXBuildFile; fileRef = 838486180D6D68A800CEA253 /* Protocol.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 8384861F0D6D68A800CEA253 /* Protocol.mm in Sources */ = {isa = PBXBuildFile; fileRef = 838486190D6D68A800CEA253 /* Protocol.mm */; }; + 838486200D6D68A800CEA253 /* runtime.h in Headers */ = {isa = PBXBuildFile; fileRef = 8384861A0D6D68A800CEA253 /* runtime.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838486250D6D68F000CEA253 /* List.m in Sources */ = {isa = PBXBuildFile; fileRef = 838486230D6D68F000CEA253 /* List.m */; }; + 838486260D6D68F000CEA253 /* List.h in Headers */ = {isa = PBXBuildFile; fileRef = 838486240D6D68F000CEA253 /* List.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 838486280D6D6A2400CEA253 /* message.h in Headers */ = {isa = PBXBuildFile; fileRef = 838485BD0D6D687300CEA253 /* message.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 83B1A8BE0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = 83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */; }; + 83BE02E40FCCB23400661494 /* objc-file-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83BE02E30FCCB23400661494 /* objc-file-old.mm */; }; + 83BE02E80FCCB24D00661494 /* objc-file-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E50FCCB24D00661494 /* objc-file-old.h */; }; + 83BE02E90FCCB24D00661494 /* objc-file.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E60FCCB24D00661494 /* objc-file.h */; }; + 83BE02EA0FCCB24D00661494 /* objc-runtime-old.h in Headers */ = {isa = PBXBuildFile; fileRef = 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */; }; + 83C9C3391668B50E00F4E544 /* objc-msg-simulator-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */; }; + 83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */ = {isa = PBXBuildFile; fileRef = 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */; }; + 83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */ = {isa = PBXBuildFile; fileRef = 83EB007A121C9EC200B92C16 /* objc-sel-table.s */; }; + 83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52615E843B100E0926F /* NSObjCRuntime.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 83F4B52915E843B100E0926F /* NSObject.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F4B52715E843B100E0926F /* NSObject.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F550DF155E030800E95D3B /* objc-cache-old.mm */; }; + 87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */ = {isa = PBXBuildFile; fileRef = 87BB4E900EC39633005D08E1 /* objc-probes.d */; }; + 9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */ = {isa = PBXBuildFile; fileRef = 9672F7ED14D5F488007CEC96 /* NSObject.mm */; }; + E8923DA1116AB2820071B552 /* a1a2-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */; }; + E8923DA2116AB2820071B552 /* a1a2-blocktramps-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9D116AB2820071B552 /* a1a2-blocktramps-x86_64.s */; }; + E8923DA3116AB2820071B552 /* a2a3-blocktramps-i386.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9E116AB2820071B552 /* a2a3-blocktramps-i386.s */; }; + E8923DA4116AB2820071B552 /* a2a3-blocktramps-x86_64.s in Sources */ = {isa = PBXBuildFile; fileRef = E8923D9F116AB2820071B552 /* a2a3-blocktramps-x86_64.s */; }; + E8923DA5116AB2820071B552 /* objc-block-trampolines.mm in Sources */ = {isa = PBXBuildFile; fileRef = E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 837F67AC1A771F6E004D34FA /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 08FB7793FE84155DC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = D2AAC0620554660B00DB518D; + remoteInfo = objc; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 393CEABF0DC69E3E000B69DE /* objc-references.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-references.mm"; path = "runtime/objc-references.mm"; sourceTree = ""; }; + 393CEAC50DC69E67000B69DE /* objc-references.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-references.h"; path = "runtime/objc-references.h"; sourceTree = ""; }; + 39ABD71F12F0B61800D1054C /* objc-weak.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-weak.h"; path = "runtime/objc-weak.h"; sourceTree = ""; }; + 39ABD72012F0B61800D1054C /* objc-weak.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-weak.mm"; path = "runtime/objc-weak.mm"; sourceTree = ""; }; + 830F2A690D737FB800392440 /* objc-msg-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-arm.s"; path = "runtime/Messengers.subproj/objc-msg-arm.s"; sourceTree = ""; }; + 830F2A6A0D737FB800392440 /* objc-msg-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-i386.s"; path = "runtime/Messengers.subproj/objc-msg-i386.s"; sourceTree = ""; }; + 830F2A720D737FB800392440 /* objc-msg-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-x86_64.s"; path = "runtime/Messengers.subproj/objc-msg-x86_64.s"; sourceTree = ""; tabWidth = 8; usesTabs = 1; }; + 830F2A930D73876100392440 /* objc-accessors.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-accessors.mm"; path = "runtime/objc-accessors.mm"; sourceTree = ""; }; + 830F2A970D738DC200392440 /* hashtable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = hashtable.h; path = runtime/hashtable.h; sourceTree = ""; }; + 830F2AA50D7394C200392440 /* markgc.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = markgc.cpp; sourceTree = ""; }; + 83112ED30F00599600A5FBAF /* objc-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-internal.h"; path = "runtime/objc-internal.h"; sourceTree = ""; }; + 831C85D30E10CF850066E64C /* objc-os.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-os.h"; path = "runtime/objc-os.h"; sourceTree = ""; }; + 831C85D40E10CF850066E64C /* objc-os.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-os.mm"; path = "runtime/objc-os.mm"; sourceTree = ""; }; + 834266D70E665A8B002E4DA2 /* objc-gdb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-gdb.h"; path = "runtime/objc-gdb.h"; sourceTree = ""; }; + 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-sel-old.mm"; path = "runtime/objc-sel-old.mm"; sourceTree = ""; }; + 834EC0A311614167009B2563 /* objc-abi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-abi.h"; path = "runtime/objc-abi.h"; sourceTree = ""; }; + 83725F4914CA5BFA0014370E /* objc-opt.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-opt.mm"; path = "runtime/objc-opt.mm"; sourceTree = ""; }; + 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-arm64.s"; path = "runtime/a1a2-blocktramps-arm64.s"; sourceTree = ""; }; + 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-arm.s"; path = "runtime/a1a2-blocktramps-arm.s"; sourceTree = ""; }; + 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a2a3-blocktramps-arm.s"; path = "runtime/a2a3-blocktramps-arm.s"; sourceTree = ""; }; + 838485B30D6D682B00CEA253 /* libobjc.order */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = libobjc.order; sourceTree = ""; }; + 838485B40D6D683300CEA253 /* APPLE_LICENSE */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = APPLE_LICENSE; sourceTree = ""; }; + 838485B50D6D683300CEA253 /* ReleaseNotes.rtf */ = {isa = PBXFileReference; lastKnownFileType = text.rtf; path = ReleaseNotes.rtf; sourceTree = ""; }; + 838485B70D6D687300CEA253 /* hashtable2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = hashtable2.h; path = runtime/hashtable2.h; sourceTree = ""; }; + 838485B80D6D687300CEA253 /* hashtable2.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = hashtable2.mm; path = runtime/hashtable2.mm; sourceTree = ""; }; + 838485BB0D6D687300CEA253 /* maptable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = maptable.h; path = runtime/maptable.h; sourceTree = ""; }; + 838485BC0D6D687300CEA253 /* maptable.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = maptable.mm; path = runtime/maptable.mm; sourceTree = ""; }; + 838485BD0D6D687300CEA253 /* message.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = message.h; path = runtime/message.h; sourceTree = ""; }; + 838485C80D6D68A200CEA253 /* objc-api.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-api.h"; path = "runtime/objc-api.h"; sourceTree = ""; }; + 838485C90D6D68A200CEA253 /* objc-auto.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-auto.h"; path = "runtime/objc-auto.h"; sourceTree = ""; }; + 838485CA0D6D68A200CEA253 /* objc-auto.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-auto.mm"; path = "runtime/objc-auto.mm"; sourceTree = ""; }; + 838485CB0D6D68A200CEA253 /* objc-cache.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-cache.mm"; path = "runtime/objc-cache.mm"; sourceTree = ""; }; + 838485CC0D6D68A200CEA253 /* objc-class-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-class-old.mm"; path = "runtime/objc-class-old.mm"; sourceTree = ""; }; + 838485CD0D6D68A200CEA253 /* objc-class.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-class.h"; path = "runtime/objc-class.h"; sourceTree = ""; }; + 838485CE0D6D68A200CEA253 /* objc-class.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-class.mm"; path = "runtime/objc-class.mm"; sourceTree = ""; }; + 838485CF0D6D68A200CEA253 /* objc-config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-config.h"; path = "runtime/objc-config.h"; sourceTree = ""; }; + 838485D00D6D68A200CEA253 /* objc-errors.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-errors.mm"; path = "runtime/objc-errors.mm"; sourceTree = ""; }; + 838485D10D6D68A200CEA253 /* objc-exception.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-exception.h"; path = "runtime/objc-exception.h"; sourceTree = ""; }; + 838485D20D6D68A200CEA253 /* objc-exception.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-exception.mm"; path = "runtime/objc-exception.mm"; sourceTree = ""; }; + 838485D30D6D68A200CEA253 /* objc-file.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-file.mm"; path = "runtime/objc-file.mm"; sourceTree = ""; }; + 838485D40D6D68A200CEA253 /* objc-initialize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-initialize.h"; path = "runtime/objc-initialize.h"; sourceTree = ""; }; + 838485D50D6D68A200CEA253 /* objc-initialize.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-initialize.mm"; path = "runtime/objc-initialize.mm"; sourceTree = ""; }; + 838485D60D6D68A200CEA253 /* objc-layout.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-layout.mm"; path = "runtime/objc-layout.mm"; sourceTree = ""; }; + 838485D70D6D68A200CEA253 /* objc-load.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-load.h"; path = "runtime/objc-load.h"; sourceTree = ""; }; + 838485D80D6D68A200CEA253 /* objc-load.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-load.mm"; path = "runtime/objc-load.mm"; sourceTree = ""; }; + 838485D90D6D68A200CEA253 /* objc-loadmethod.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-loadmethod.h"; path = "runtime/objc-loadmethod.h"; sourceTree = ""; }; + 838485DA0D6D68A200CEA253 /* objc-loadmethod.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-loadmethod.mm"; path = "runtime/objc-loadmethod.mm"; sourceTree = ""; }; + 838485DB0D6D68A200CEA253 /* objc-lockdebug.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-lockdebug.mm"; path = "runtime/objc-lockdebug.mm"; sourceTree = ""; }; + 838485DC0D6D68A200CEA253 /* objc-private.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-private.h"; path = "runtime/objc-private.h"; sourceTree = ""; }; + 838485E00D6D68A200CEA253 /* objc-runtime-new.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime-new.h"; path = "runtime/objc-runtime-new.h"; sourceTree = ""; }; + 838485E10D6D68A200CEA253 /* objc-runtime-new.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-runtime-new.mm"; path = "runtime/objc-runtime-new.mm"; sourceTree = ""; }; + 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-runtime-old.mm"; path = "runtime/objc-runtime-old.mm"; sourceTree = ""; }; + 838485E30D6D68A200CEA253 /* objc-runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime.h"; path = "runtime/objc-runtime.h"; sourceTree = ""; }; + 838485E40D6D68A200CEA253 /* objc-runtime.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-runtime.mm"; path = "runtime/objc-runtime.mm"; sourceTree = ""; }; + 838485E50D6D68A200CEA253 /* objc-sel-set.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-sel-set.h"; path = "runtime/objc-sel-set.h"; sourceTree = ""; }; + 838485E60D6D68A200CEA253 /* objc-sel-set.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-sel-set.mm"; path = "runtime/objc-sel-set.mm"; sourceTree = ""; }; + 838485E80D6D68A200CEA253 /* objc-sel.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-sel.mm"; path = "runtime/objc-sel.mm"; sourceTree = ""; }; + 838485E90D6D68A200CEA253 /* objc-sync.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-sync.h"; path = "runtime/objc-sync.h"; sourceTree = ""; }; + 838485EA0D6D68A200CEA253 /* objc-sync.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-sync.mm"; path = "runtime/objc-sync.mm"; sourceTree = ""; }; + 838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-typeencoding.mm"; path = "runtime/objc-typeencoding.mm"; sourceTree = ""; }; + 838485EC0D6D68A200CEA253 /* objc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = objc.h; path = runtime/objc.h; sourceTree = ""; }; + 838485ED0D6D68A200CEA253 /* Object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Object.h; path = runtime/Object.h; sourceTree = ""; }; + 838485EE0D6D68A200CEA253 /* Object.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = Object.mm; path = runtime/Object.mm; sourceTree = ""; }; + 838486180D6D68A800CEA253 /* Protocol.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Protocol.h; path = runtime/Protocol.h; sourceTree = ""; }; + 838486190D6D68A800CEA253 /* Protocol.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = Protocol.mm; path = runtime/Protocol.mm; sourceTree = ""; }; + 8384861A0D6D68A800CEA253 /* runtime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = runtime.h; path = runtime/runtime.h; sourceTree = ""; }; + 838486230D6D68F000CEA253 /* List.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = List.m; path = runtime/OldClasses.subproj/List.m; sourceTree = ""; }; + 838486240D6D68F000CEA253 /* List.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = List.h; path = runtime/OldClasses.subproj/List.h; sourceTree = ""; }; + 83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-simulator-i386.s"; path = "runtime/Messengers.subproj/objc-msg-simulator-i386.s"; sourceTree = ""; }; + 83BE02E30FCCB23400661494 /* objc-file-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-file-old.mm"; path = "runtime/objc-file-old.mm"; sourceTree = ""; }; + 83BE02E50FCCB24D00661494 /* objc-file-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-file-old.h"; path = "runtime/objc-file-old.h"; sourceTree = ""; }; + 83BE02E60FCCB24D00661494 /* objc-file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-file.h"; path = "runtime/objc-file.h"; sourceTree = ""; }; + 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = "objc-runtime-old.h"; path = "runtime/objc-runtime-old.h"; sourceTree = ""; }; + 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-simulator-x86_64.s"; path = "runtime/Messengers.subproj/objc-msg-simulator-x86_64.s"; sourceTree = ""; }; + 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-msg-arm64.s"; path = "runtime/Messengers.subproj/objc-msg-arm64.s"; sourceTree = ""; }; + 83EB007A121C9EC200B92C16 /* objc-sel-table.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "objc-sel-table.s"; path = "runtime/objc-sel-table.s"; sourceTree = ""; }; + 83F4B52615E843B100E0926F /* NSObjCRuntime.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NSObjCRuntime.h; path = runtime/NSObjCRuntime.h; sourceTree = ""; }; + 83F4B52715E843B100E0926F /* NSObject.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NSObject.h; path = runtime/NSObject.h; sourceTree = ""; }; + 83F550DF155E030800E95D3B /* objc-cache-old.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-cache-old.mm"; path = "runtime/objc-cache-old.mm"; sourceTree = ""; }; + 87BB4E900EC39633005D08E1 /* objc-probes.d */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.dtrace; name = "objc-probes.d"; path = "runtime/objc-probes.d"; sourceTree = ""; }; + 9672F7ED14D5F488007CEC96 /* NSObject.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = NSObject.mm; path = runtime/NSObject.mm; sourceTree = ""; }; + BC8B5D1212D3D48100C78A5B /* libauto.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; name = libauto.dylib; path = /usr/lib/libauto.dylib; sourceTree = ""; }; + D2AAC0630554660B00DB518D /* libobjc.A.dylib */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.dylib"; includeInIndex = 0; path = libobjc.A.dylib; sourceTree = BUILT_PRODUCTS_DIR; }; + E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-i386.s"; path = "runtime/a1a2-blocktramps-i386.s"; sourceTree = ""; }; + E8923D9D116AB2820071B552 /* a1a2-blocktramps-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a1a2-blocktramps-x86_64.s"; path = "runtime/a1a2-blocktramps-x86_64.s"; sourceTree = ""; }; + E8923D9E116AB2820071B552 /* a2a3-blocktramps-i386.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a2a3-blocktramps-i386.s"; path = "runtime/a2a3-blocktramps-i386.s"; sourceTree = ""; }; + E8923D9F116AB2820071B552 /* a2a3-blocktramps-x86_64.s */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.asm; name = "a2a3-blocktramps-x86_64.s"; path = "runtime/a2a3-blocktramps-x86_64.s"; sourceTree = ""; }; + E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = "objc-block-trampolines.mm"; path = "runtime/objc-block-trampolines.mm"; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + D289988505E68E00004EDB86 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 08FB7794FE84155DC02AAC07 /* objc */ = { + isa = PBXGroup; + children = ( + BC8B5D1212D3D48100C78A5B /* libauto.dylib */, + 838485C60D6D687700CEA253 /* Public Headers */, + 838485C70D6D688200CEA253 /* Private Headers */, + 8384862A0D6D6ABC00CEA253 /* Project Headers */, + 838486220D6D68E300CEA253 /* Obsolete Headers */, + 838486270D6D690F00CEA253 /* Obsolete Source */, + 08FB7795FE84155DC02AAC07 /* Source */, + 838485B20D6D67F900CEA253 /* Other */, + 1AB674ADFE9D54B511CA2CBB /* Products */, + ); + name = objc; + sourceTree = ""; + }; + 08FB7795FE84155DC02AAC07 /* Source */ = { + isa = PBXGroup; + children = ( + 8383A3A1122600E9009290B8 /* a1a2-blocktramps-arm.s */, + 8383A3A2122600E9009290B8 /* a2a3-blocktramps-arm.s */, + 838485B80D6D687300CEA253 /* hashtable2.mm */, + 838485BC0D6D687300CEA253 /* maptable.mm */, + 9672F7ED14D5F488007CEC96 /* NSObject.mm */, + 838486190D6D68A800CEA253 /* Protocol.mm */, + 830F2A930D73876100392440 /* objc-accessors.mm */, + 838485CA0D6D68A200CEA253 /* objc-auto.mm */, + 39ABD72012F0B61800D1054C /* objc-weak.mm */, + E8923DA0116AB2820071B552 /* objc-block-trampolines.mm */, + 838485CB0D6D68A200CEA253 /* objc-cache.mm */, + 83F550DF155E030800E95D3B /* objc-cache-old.mm */, + 838485CC0D6D68A200CEA253 /* objc-class-old.mm */, + 838485CE0D6D68A200CEA253 /* objc-class.mm */, + 838485D00D6D68A200CEA253 /* objc-errors.mm */, + 838485D20D6D68A200CEA253 /* objc-exception.mm */, + 838485D30D6D68A200CEA253 /* objc-file.mm */, + 83BE02E30FCCB23400661494 /* objc-file-old.mm */, + 838485D50D6D68A200CEA253 /* objc-initialize.mm */, + 838485D60D6D68A200CEA253 /* objc-layout.mm */, + 838485D80D6D68A200CEA253 /* objc-load.mm */, + 838485DA0D6D68A200CEA253 /* objc-loadmethod.mm */, + 838485DB0D6D68A200CEA253 /* objc-lockdebug.mm */, + 83725F4914CA5BFA0014370E /* objc-opt.mm */, + 831C85D40E10CF850066E64C /* objc-os.mm */, + 393CEABF0DC69E3E000B69DE /* objc-references.mm */, + 838485E10D6D68A200CEA253 /* objc-runtime-new.mm */, + 838485E20D6D68A200CEA253 /* objc-runtime-old.mm */, + 838485E40D6D68A200CEA253 /* objc-runtime.mm */, + 838485E60D6D68A200CEA253 /* objc-sel-set.mm */, + 83EB007A121C9EC200B92C16 /* objc-sel-table.s */, + 838485E80D6D68A200CEA253 /* objc-sel.mm */, + 834DF8B615993EE1002F2BC9 /* objc-sel-old.mm */, + 838485EA0D6D68A200CEA253 /* objc-sync.mm */, + 838485EB0D6D68A200CEA253 /* objc-typeencoding.mm */, + 8379996D13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s */, + E8923D9C116AB2820071B552 /* a1a2-blocktramps-i386.s */, + E8923D9D116AB2820071B552 /* a1a2-blocktramps-x86_64.s */, + E8923D9E116AB2820071B552 /* a2a3-blocktramps-i386.s */, + E8923D9F116AB2820071B552 /* a2a3-blocktramps-x86_64.s */, + 830F2A690D737FB800392440 /* objc-msg-arm.s */, + 83D49E4E13C7C84F0057F1DD /* objc-msg-arm64.s */, + 830F2A6A0D737FB800392440 /* objc-msg-i386.s */, + 83B1A8BC0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s */, + 83C9C3381668B50E00F4E544 /* objc-msg-simulator-x86_64.s */, + 830F2A720D737FB800392440 /* objc-msg-x86_64.s */, + 87BB4E900EC39633005D08E1 /* objc-probes.d */, + ); + name = Source; + sourceTree = ""; + }; + 1AB674ADFE9D54B511CA2CBB /* Products */ = { + isa = PBXGroup; + children = ( + D2AAC0630554660B00DB518D /* libobjc.A.dylib */, + ); + name = Products; + sourceTree = ""; + }; + 838485B20D6D67F900CEA253 /* Other */ = { + isa = PBXGroup; + children = ( + 830F2AA50D7394C200392440 /* markgc.cpp */, + 838485B40D6D683300CEA253 /* APPLE_LICENSE */, + 838485B50D6D683300CEA253 /* ReleaseNotes.rtf */, + 838485B30D6D682B00CEA253 /* libobjc.order */, + ); + name = Other; + sourceTree = ""; + }; + 838485C60D6D687700CEA253 /* Public Headers */ = { + isa = PBXGroup; + children = ( + 83F4B52615E843B100E0926F /* NSObjCRuntime.h */, + 83F4B52715E843B100E0926F /* NSObject.h */, + 838485BD0D6D687300CEA253 /* message.h */, + 838485C80D6D68A200CEA253 /* objc-api.h */, + 838485C90D6D68A200CEA253 /* objc-auto.h */, + 838485D10D6D68A200CEA253 /* objc-exception.h */, + 838485E90D6D68A200CEA253 /* objc-sync.h */, + 838485EC0D6D68A200CEA253 /* objc.h */, + 8384861A0D6D68A800CEA253 /* runtime.h */, + ); + name = "Public Headers"; + sourceTree = ""; + }; + 838485C70D6D688200CEA253 /* Private Headers */ = { + isa = PBXGroup; + children = ( + 83112ED30F00599600A5FBAF /* objc-internal.h */, + 834EC0A311614167009B2563 /* objc-abi.h */, + 838485BB0D6D687300CEA253 /* maptable.h */, + 834266D70E665A8B002E4DA2 /* objc-gdb.h */, + ); + name = "Private Headers"; + sourceTree = ""; + }; + 838486220D6D68E300CEA253 /* Obsolete Headers */ = { + isa = PBXGroup; + children = ( + 830F2A970D738DC200392440 /* hashtable.h */, + 838485B70D6D687300CEA253 /* hashtable2.h */, + 838485CD0D6D68A200CEA253 /* objc-class.h */, + 838485D70D6D68A200CEA253 /* objc-load.h */, + 838485E30D6D68A200CEA253 /* objc-runtime.h */, + 838486240D6D68F000CEA253 /* List.h */, + 838485ED0D6D68A200CEA253 /* Object.h */, + 838486180D6D68A800CEA253 /* Protocol.h */, + ); + name = "Obsolete Headers"; + sourceTree = ""; + }; + 838486270D6D690F00CEA253 /* Obsolete Source */ = { + isa = PBXGroup; + children = ( + 838486230D6D68F000CEA253 /* List.m */, + 838485EE0D6D68A200CEA253 /* Object.mm */, + ); + name = "Obsolete Source"; + sourceTree = ""; + }; + 8384862A0D6D6ABC00CEA253 /* Project Headers */ = { + isa = PBXGroup; + children = ( + 838485CF0D6D68A200CEA253 /* objc-config.h */, + 83BE02E60FCCB24D00661494 /* objc-file.h */, + 83BE02E50FCCB24D00661494 /* objc-file-old.h */, + 838485D40D6D68A200CEA253 /* objc-initialize.h */, + 838485D90D6D68A200CEA253 /* objc-loadmethod.h */, + 831C85D30E10CF850066E64C /* objc-os.h */, + 838485DC0D6D68A200CEA253 /* objc-private.h */, + 393CEAC50DC69E67000B69DE /* objc-references.h */, + 838485E00D6D68A200CEA253 /* objc-runtime-new.h */, + 83BE02E70FCCB24D00661494 /* objc-runtime-old.h */, + 838485E50D6D68A200CEA253 /* objc-sel-set.h */, + 39ABD71F12F0B61800D1054C /* objc-weak.h */, + ); + name = "Project Headers"; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + D2AAC0600554660B00DB518D /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 830F2A980D738DC200392440 /* hashtable.h in Headers */, + 838485BF0D6D687300CEA253 /* hashtable2.h in Headers */, + 838486260D6D68F000CEA253 /* List.h in Headers */, + 838485C30D6D687300CEA253 /* maptable.h in Headers */, + 838486280D6D6A2400CEA253 /* message.h in Headers */, + 834EC0A411614167009B2563 /* objc-abi.h in Headers */, + 838485EF0D6D68A200CEA253 /* objc-api.h in Headers */, + 838485F00D6D68A200CEA253 /* objc-auto.h in Headers */, + 838485F40D6D68A200CEA253 /* objc-class.h in Headers */, + 838485F60D6D68A200CEA253 /* objc-config.h in Headers */, + 838485F80D6D68A200CEA253 /* objc-exception.h in Headers */, + 83BE02E80FCCB24D00661494 /* objc-file-old.h in Headers */, + 83BE02E90FCCB24D00661494 /* objc-file.h in Headers */, + 834266D80E665A8B002E4DA2 /* objc-gdb.h in Headers */, + 838485FB0D6D68A200CEA253 /* objc-initialize.h in Headers */, + 83112ED40F00599600A5FBAF /* objc-internal.h in Headers */, + 838485FE0D6D68A200CEA253 /* objc-load.h in Headers */, + 838486000D6D68A200CEA253 /* objc-loadmethod.h in Headers */, + 831C85D50E10CF850066E64C /* objc-os.h in Headers */, + 838486030D6D68A200CEA253 /* objc-private.h in Headers */, + 393CEAC60DC69E67000B69DE /* objc-references.h in Headers */, + 838486070D6D68A200CEA253 /* objc-runtime-new.h in Headers */, + 83BE02EA0FCCB24D00661494 /* objc-runtime-old.h in Headers */, + 8384860A0D6D68A200CEA253 /* objc-runtime.h in Headers */, + 8384860C0D6D68A200CEA253 /* objc-sel-set.h in Headers */, + 838486100D6D68A200CEA253 /* objc-sync.h in Headers */, + 838486130D6D68A200CEA253 /* objc.h in Headers */, + 838486140D6D68A200CEA253 /* Object.h in Headers */, + 8384861E0D6D68A800CEA253 /* Protocol.h in Headers */, + 838486200D6D68A800CEA253 /* runtime.h in Headers */, + 39ABD72312F0B61800D1054C /* objc-weak.h in Headers */, + 83F4B52815E843B100E0926F /* NSObjCRuntime.h in Headers */, + 83F4B52915E843B100E0926F /* NSObject.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + D2AAC0620554660B00DB518D /* objc */ = { + isa = PBXNativeTarget; + buildConfigurationList = 1DEB914A08733D8E0010E9CD /* Build configuration list for PBXNativeTarget "objc" */; + buildPhases = ( + D2AAC0600554660B00DB518D /* Headers */, + D2AAC0610554660B00DB518D /* Sources */, + D289988505E68E00004EDB86 /* Frameworks */, + 830F2AB60D739AB600392440 /* Run Script (markgc) */, + 830F2AFA0D73BC5800392440 /* Run Script (symlink) */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = objc; + productName = objc; + productReference = D2AAC0630554660B00DB518D /* libobjc.A.dylib */; + productType = "com.apple.product-type.library.dynamic"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 08FB7793FE84155DC02AAC07 /* Project object */ = { + isa = PBXProject; + attributes = { + BuildIndependentTargetsInParallel = NO; + LastUpgradeCheck = 0440; + TargetAttributes = { + 837F67A81A771F63004D34FA = { + CreatedOnToolsVersion = 6.3; + }; + }; + }; + buildConfigurationList = 1DEB914E08733D8E0010E9CD /* Build configuration list for PBXProject "objc" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 1; + knownRegions = ( + English, + Japanese, + French, + German, + ); + mainGroup = 08FB7794FE84155DC02AAC07 /* objc */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + D2AAC0620554660B00DB518D /* objc */, + 837F67A81A771F63004D34FA /* objc-simulator */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXShellScriptBuildPhase section */ + 830F2AB60D739AB600392440 /* Run Script (markgc) */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + comments = "Modify the built dylib (mod_init_funcs and mod_term_funcs)."; + files = ( + ); + inputPaths = ( + ); + name = "Run Script (markgc)"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "set -x\n/usr/bin/xcrun -sdk macosx clang++ -Wall -mmacosx-version-min=10.9 -arch x86_64 -std=c++11 \"${SRCROOT}/markgc.cpp\" -o \"${BUILT_PRODUCTS_DIR}/markgc\"\n\"${BUILT_PRODUCTS_DIR}/markgc\" \"${BUILT_PRODUCTS_DIR}/libobjc.A.dylib\""; + }; + 830F2AFA0D73BC5800392440 /* Run Script (symlink) */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 8; + files = ( + ); + inputPaths = ( + ); + name = "Run Script (symlink)"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 1; + shellPath = /bin/sh; + shellScript = "cd \"${INSTALL_DIR}\"\n/bin/ln -s libobjc.A.dylib libobjc.dylib\n"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + D2AAC0610554660B00DB518D /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 838485C00D6D687300CEA253 /* hashtable2.mm in Sources */, + 838485C40D6D687300CEA253 /* maptable.mm in Sources */, + 838485F10D6D68A200CEA253 /* objc-auto.mm in Sources */, + 838485F20D6D68A200CEA253 /* objc-cache.mm in Sources */, + 838485F30D6D68A200CEA253 /* objc-class-old.mm in Sources */, + 838485F50D6D68A200CEA253 /* objc-class.mm in Sources */, + 838485F70D6D68A200CEA253 /* objc-errors.mm in Sources */, + 838485F90D6D68A200CEA253 /* objc-exception.mm in Sources */, + 838485FA0D6D68A200CEA253 /* objc-file.mm in Sources */, + 838485FC0D6D68A200CEA253 /* objc-initialize.mm in Sources */, + 838485FD0D6D68A200CEA253 /* objc-layout.mm in Sources */, + 838485FF0D6D68A200CEA253 /* objc-load.mm in Sources */, + 838486010D6D68A200CEA253 /* objc-loadmethod.mm in Sources */, + 838486020D6D68A200CEA253 /* objc-lockdebug.mm in Sources */, + 838486080D6D68A200CEA253 /* objc-runtime-new.mm in Sources */, + 838486090D6D68A200CEA253 /* objc-runtime-old.mm in Sources */, + 8384860B0D6D68A200CEA253 /* objc-runtime.mm in Sources */, + 8384860D0D6D68A200CEA253 /* objc-sel-set.mm in Sources */, + 8384860F0D6D68A200CEA253 /* objc-sel.mm in Sources */, + 838486110D6D68A200CEA253 /* objc-sync.mm in Sources */, + 838486120D6D68A200CEA253 /* objc-typeencoding.mm in Sources */, + 838486150D6D68A200CEA253 /* Object.mm in Sources */, + 8384861F0D6D68A800CEA253 /* Protocol.mm in Sources */, + 838486250D6D68F000CEA253 /* List.m in Sources */, + 830F2A740D737FB800392440 /* objc-msg-arm.s in Sources */, + 830F2A750D737FB900392440 /* objc-msg-i386.s in Sources */, + 830F2A7D0D737FBB00392440 /* objc-msg-x86_64.s in Sources */, + 830F2A950D73876100392440 /* objc-accessors.mm in Sources */, + 393CEAC00DC69E3E000B69DE /* objc-references.mm in Sources */, + 831C85D60E10CF850066E64C /* objc-os.mm in Sources */, + 87BB4EA70EC39854005D08E1 /* objc-probes.d in Sources */, + 83BE02E40FCCB23400661494 /* objc-file-old.mm in Sources */, + E8923DA1116AB2820071B552 /* a1a2-blocktramps-i386.s in Sources */, + E8923DA2116AB2820071B552 /* a1a2-blocktramps-x86_64.s in Sources */, + E8923DA3116AB2820071B552 /* a2a3-blocktramps-i386.s in Sources */, + E8923DA4116AB2820071B552 /* a2a3-blocktramps-x86_64.s in Sources */, + E8923DA5116AB2820071B552 /* objc-block-trampolines.mm in Sources */, + 83B1A8BE0FF1AC0D0019EA5B /* objc-msg-simulator-i386.s in Sources */, + 83EB007B121C9EC200B92C16 /* objc-sel-table.s in Sources */, + 8383A3A3122600E9009290B8 /* a1a2-blocktramps-arm.s in Sources */, + 8383A3A4122600E9009290B8 /* a2a3-blocktramps-arm.s in Sources */, + 39ABD72412F0B61800D1054C /* objc-weak.mm in Sources */, + 83D49E4F13C7C84F0057F1DD /* objc-msg-arm64.s in Sources */, + 8379996E13CBAF6F007C2B5F /* a1a2-blocktramps-arm64.s in Sources */, + 9672F7EE14D5F488007CEC96 /* NSObject.mm in Sources */, + 83725F4A14CA5BFA0014370E /* objc-opt.mm in Sources */, + 83F550E0155E030800E95D3B /* objc-cache-old.mm in Sources */, + 834DF8B715993EE1002F2BC9 /* objc-sel-old.mm in Sources */, + 83C9C3391668B50E00F4E544 /* objc-msg-simulator-x86_64.s in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 837F67AD1A771F6E004D34FA /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = D2AAC0620554660B00DB518D /* objc */; + targetProxy = 837F67AC1A771F6E004D34FA /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 1DEB914B08733D8E0010E9CD /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ARCHS = "$(ARCHS_STANDARD_32_64_BIT)"; + COPY_PHASE_STRIP = NO; + DYLIB_CURRENT_VERSION = 228; + EXECUTABLE_PREFIX = lib; + GCC_CW_ASM_SYNTAX = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO; + HEADER_SEARCH_PATHS = ( + "$(DSTROOT)/usr/include/**", + "$(DSTROOT)/usr/local/include/**", + "$(CONFIGURATION_BUILD_DIR)/usr/include/**", + "$(CONFIGURATION_BUILD_DIR)/usr/local/include/**", + /System/Library/Frameworks/System.framework/PrivateHeaders, + ); + INSTALL_PATH = /usr/lib; + ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order"; + "ORDER_FILE[sdk=iphonesimulator*]" = ""; + OTHER_CFLAGS = ( + "-fdollars-in-identifiers", + "$(OTHER_CFLAGS)", + ); + "OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = ( + "-lc++abi", + "-Wl,-segalign,0x4000", + "-Xlinker", + "-sectalign", + "-Xlinker", + __DATA, + "-Xlinker", + __objc_data, + "-Xlinker", + 0x1000, + ); + "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-lc++abi"; + "OTHER_LDFLAGS[sdk=macosx*]" = ( + "-lCrashReporterClient", + "-lauto", + "-lc++abi", + "-Xlinker", + "-sectalign", + "-Xlinker", + __DATA, + "-Xlinker", + __objc_data, + "-Xlinker", + 0x1000, + ); + PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; + PRODUCT_NAME = objc.A; + PUBLIC_HEADERS_FOLDER_PATH = /usr/include/objc; + UNEXPORTED_SYMBOLS_FILE = unexported_symbols; + }; + name = Debug; + }; + 1DEB914C08733D8E0010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + DYLIB_CURRENT_VERSION = 228; + EXECUTABLE_PREFIX = lib; + GCC_CW_ASM_SYNTAX = NO; + GCC_WARN_ABOUT_DEPRECATED_FUNCTIONS = NO; + HEADER_SEARCH_PATHS = ( + "$(DSTROOT)/usr/include/**", + "$(DSTROOT)/usr/local/include/**", + "$(CONFIGURATION_BUILD_DIR)/usr/include/**", + "$(CONFIGURATION_BUILD_DIR)/usr/local/include/**", + /System/Library/Frameworks/System.framework/PrivateHeaders, + ); + INSTALL_PATH = /usr/lib; + ORDER_FILE = "$(SDKROOT)/AppleInternal/OrderFiles/libobjc.order"; + "ORDER_FILE[sdk=iphonesimulator*]" = ""; + OTHER_CFLAGS = ( + "-fdollars-in-identifiers", + "$(OTHER_CFLAGS)", + ); + "OTHER_LDFLAGS[sdk=iphoneos*][arch=*]" = ( + "-lc++abi", + "-Wl,-segalign,0x4000", + "-Xlinker", + "-sectalign", + "-Xlinker", + __DATA, + "-Xlinker", + __objc_data, + "-Xlinker", + 0x1000, + ); + "OTHER_LDFLAGS[sdk=iphonesimulator*][arch=*]" = "-lc++abi"; + "OTHER_LDFLAGS[sdk=macosx*]" = ( + "-lCrashReporterClient", + "-lauto", + "-lc++abi", + "-Xlinker", + "-sectalign", + "-Xlinker", + __DATA, + "-Xlinker", + __objc_data, + "-Xlinker", + 0x1000, + ); + PRIVATE_HEADERS_FOLDER_PATH = /usr/local/include/objc; + PRODUCT_NAME = objc.A; + PUBLIC_HEADERS_FOLDER_PATH = /usr/include/objc; + UNEXPORTED_SYMBOLS_FILE = unexported_symbols; + }; + name = Release; + }; + 1DEB914F08733D8E0010E9CD /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_LINK_OBJC_RUNTIME = NO; + CLANG_OBJC_RUNTIME = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + EXCLUDED_INSTALLSRC_SUBDIRECTORY_PATTERNS = "$(inherited) test"; + GCC_ENABLE_CPP_EXCEPTIONS = NO; + GCC_ENABLE_CPP_RTTI = NO; + GCC_INLINES_ARE_PRIVATE_EXTERN = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = "OS_OBJECT_USE_OBJC=0"; + GCC_STRICT_ALIASING = YES; + GCC_SYMBOLS_PRIVATE_EXTERN = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO; + GCC_WARN_ABOUT_MISSING_NEWLINE = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_SHADOW = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + OTHER_CFLAGS = ""; + "OTHER_CFLAGS[arch=x86_64]" = "-fobjc-legacy-dispatch"; + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-D_LIBCPP_VISIBLE=\"\"", + ); + WARNING_CFLAGS = ( + "-Wall", + "-Wextra", + "-Wstrict-aliasing=2", + "-Wstrict-overflow=4", + "-Wno-unused-parameter", + "-Wno-deprecated-objc-isa-usage", + "-Wno-cast-of-sel-type", + ); + }; + name = Debug; + }; + 1DEB915008733D8E0010E9CD /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + CLANG_CXX_LANGUAGE_STANDARD = "gnu++0x"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_LINK_OBJC_RUNTIME = NO; + CLANG_OBJC_RUNTIME = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + EXCLUDED_INSTALLSRC_SUBDIRECTORY_PATTERNS = "$(inherited) test"; + GCC_ENABLE_CPP_EXCEPTIONS = NO; + GCC_ENABLE_CPP_RTTI = NO; + GCC_INLINES_ARE_PRIVATE_EXTERN = YES; + GCC_PREPROCESSOR_DEFINITIONS = ( + "OS_OBJECT_USE_OBJC=0", + "NDEBUG=1", + ); + GCC_STRICT_ALIASING = YES; + GCC_SYMBOLS_PRIVATE_EXTERN = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO = NO; + GCC_WARN_ABOUT_MISSING_NEWLINE = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES; + GCC_WARN_SHADOW = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + "OTHER_CFLAGS[arch=i386]" = "-momit-leaf-frame-pointer"; + "OTHER_CFLAGS[arch=x86_64]" = ( + "-momit-leaf-frame-pointer", + "-fobjc-legacy-dispatch", + ); + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-D_LIBCPP_VISIBLE=\"\"", + ); + WARNING_CFLAGS = ( + "-Wall", + "-Wextra", + "-Wstrict-aliasing=2", + "-Wstrict-overflow=4", + "-Wno-unused-parameter", + "-Wno-deprecated-objc-isa-usage", + "-Wno-cast-of-sel-type", + ); + }; + name = Release; + }; + 837F67AA1A771F63004D34FA /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Debug; + }; + 837F67AB1A771F63004D34FA /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = "$(TARGET_NAME)"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 1DEB914A08733D8E0010E9CD /* Build configuration list for PBXNativeTarget "objc" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB914B08733D8E0010E9CD /* Debug */, + 1DEB914C08733D8E0010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 1DEB914E08733D8E0010E9CD /* Build configuration list for PBXProject "objc" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 1DEB914F08733D8E0010E9CD /* Debug */, + 1DEB915008733D8E0010E9CD /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 837F67A91A771F63004D34FA /* Build configuration list for PBXAggregateTarget "objc-simulator" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 837F67AA1A771F63004D34FA /* Debug */, + 837F67AB1A771F63004D34FA /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 08FB7793FE84155DC02AAC07 /* Project object */; +} diff --git a/objcrt/objcrt.vcproj b/objcrt/objcrt.vcproj new file mode 100755 index 0000000..22b5919 --- /dev/null +++ b/objcrt/objcrt.vcproj @@ -0,0 +1,95 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/prebuild.bat b/prebuild.bat new file mode 100755 index 0000000..70c55b0 --- /dev/null +++ b/prebuild.bat @@ -0,0 +1,15 @@ +@echo off + +echo prebuild: installing headers +xcopy /Y "%ProjectDir%runtime\objc.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\objc-api.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\objc-auto.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\objc-exception.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\message.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\runtime.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\hashtable.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\hashtable2.h" "%DSTROOT%\AppleInternal\include\objc\" +xcopy /Y "%ProjectDir%runtime\maptable.h" "%DSTROOT%\AppleInternal\include\objc\" + +echo prebuild: setting version +version diff --git a/runtime/Messengers.subproj/objc-msg-arm.s b/runtime/Messengers.subproj/objc-msg-arm.s new file mode 100644 index 0000000..bd88c6a --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-arm.s @@ -0,0 +1,911 @@ +/* + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 1999-2007 Apple Computer, Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/******************************************************************** + * + * objc-msg-arm.s - ARM code to support objc messaging + * + ********************************************************************/ + +#ifdef __arm__ + +#include + +#ifndef _ARM_ARCH_7 +# error requires armv7 +#endif + +// Set FP=1 on architectures that pass parameters in floating-point registers +#if __ARM_ARCH_7K__ +# define FP 1 +#else +# define FP 0 +#endif + +#if FP + +# if !__ARM_NEON__ +# error sorry +# endif + +# define FP_RETURN_ZERO \ + vmov.i32 q0, #0 ; \ + vmov.i32 q1, #0 ; \ + vmov.i32 q2, #0 ; \ + vmov.i32 q3, #0 + +# define FP_SAVE \ + vpush {q0-q3} + +# define FP_RESTORE \ + vpop {q0-q3} + +#else + +# define FP_RETURN_ZERO +# define FP_SAVE +# define FP_RESTORE + +#endif + +// Define SUPPORT_INDEXED_ISA for targets which store the class in the ISA as +// an index in to a class table. +// Note, keep this in sync with objc-config.h. +// FIXME: Remove this duplication. We should get this from objc-config.h. +#if __ARM_ARCH_7K__ >= 2 +# define SUPPORT_INDEXED_ISA 1 +#else +# define SUPPORT_INDEXED_ISA 0 +#endif + +// Note, keep these in sync with objc-private.h +#define ISA_INDEX_IS_NPI 1 +#define ISA_INDEX_MASK 0x0001FFFC +#define ISA_INDEX_SHIFT 2 +#define ISA_INDEX_BITS 15 +#define ISA_INDEX_COUNT (1 << ISA_INDEX_BITS) +#define ISA_INDEX_MAGIC_MASK 0x001E0001 +#define ISA_INDEX_MAGIC_VALUE 0x001C0001 + +.syntax unified + +#define MI_EXTERN(var) \ + .non_lazy_symbol_pointer ;\ +L##var##$$non_lazy_ptr: ;\ + .indirect_symbol var ;\ + .long 0 + +#define MI_GET_EXTERN(reg,var) \ + movw reg, :lower16:(L##var##$$non_lazy_ptr-7f-4) ;\ + movt reg, :upper16:(L##var##$$non_lazy_ptr-7f-4) ;\ +7: add reg, pc ;\ + ldr reg, [reg] + +#define MI_GET_ADDRESS(reg,var) \ + movw reg, :lower16:(var-7f-4) ;\ + movt reg, :upper16:(var-7f-4) ;\ +7: add reg, pc ;\ + + +.data + +#if SUPPORT_INDEXED_ISA + + .align 2 + .globl _objc_indexed_classes +_objc_indexed_classes: + .fill ISA_INDEX_COUNT, 4, 0 + +#endif + + + +// _objc_entryPoints and _objc_exitPoints are used by method dispatch +// caching code to figure out whether any threads are actively +// in the cache for dispatching. The labels surround the asm code +// that do cache lookups. The tables are zero-terminated. + +.align 2 +.private_extern _objc_entryPoints +_objc_entryPoints: + .long _cache_getImp + .long _objc_msgSend + .long _objc_msgSend_stret + .long _objc_msgSendSuper + .long _objc_msgSendSuper_stret + .long _objc_msgSendSuper2 + .long _objc_msgSendSuper2_stret + .long _objc_msgLookup + .long _objc_msgLookup_stret + .long _objc_msgLookupSuper2 + .long _objc_msgLookupSuper2_stret + .long 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .long LExit_cache_getImp + .long LExit_objc_msgSend + .long LExit_objc_msgSend_stret + .long LExit_objc_msgSendSuper + .long LExit_objc_msgSendSuper_stret + .long LExit_objc_msgSendSuper2 + .long LExit_objc_msgSendSuper2_stret + .long LExit_objc_msgLookup + .long LExit_objc_msgLookup_stret + .long LExit_objc_msgLookupSuper2 + .long LExit_objc_msgLookupSuper2_stret + .long 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +7: + .section __DATA,__objc_msg_break + .long 7b + .long ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +7: + .section __DATA,__objc_msg_break + .long 7b + .long FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +7: + .section __DATA,__objc_msg_break + .long 7b + .long SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +7: + .section __DATA,__objc_msg_break + .long 7b + .long NIL_EXIT + .text +.endmacro + + +/******************************************************************** + * Names for relative labels + * DO NOT USE THESE LABELS ELSEWHERE + * Reserved labels: 6: 7: 8: 9: + ********************************************************************/ +// 6: used by CacheLookup +// 7: used by MI_GET_ADDRESS etc and MESSENGER_START etc +// 8: used by CacheLookup +#define LNilReceiver 9 +#define LNilReceiver_f 9f +#define LNilReceiver_b 9b + + +/******************************************************************** + * Macro parameters + ********************************************************************/ + +#define NORMAL 0 +#define STRET 1 + + +/******************************************************************** + * + * Structure definitions. + * + ********************************************************************/ + +/* objc_super parameter to sendSuper */ +#define RECEIVER 0 +#define CLASS 4 + +/* Selected field offsets in class structure */ +#define ISA 0 +#define SUPERCLASS 4 +#define CACHE 8 +#define CACHE_MASK 12 + +/* Selected field offsets in method structure */ +#define METHOD_NAME 0 +#define METHOD_TYPES 4 +#define METHOD_IMP 8 + + +////////////////////////////////////////////////////////////////////// +// +// ENTRY functionName +// +// Assembly directives to begin an exported function. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro ENTRY /* name */ + .text + .thumb + .align 5 + .globl $0 + .thumb_func +$0: +.endmacro + +.macro STATIC_ENTRY /*name*/ + .text + .thumb + .align 5 + .private_extern $0 + .thumb_func +$0: +.endmacro + + +////////////////////////////////////////////////////////////////////// +// +// END_ENTRY functionName +// +// Assembly directives to end an exported function. Just a placeholder, +// a close-parenthesis for ENTRY, until it is needed for something. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro END_ENTRY /* name */ +LExit$0: +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// CacheLookup NORMAL|STRET +// CacheLookup2 NORMAL|STRET +// +// Locate the implementation for a selector in a class's method cache. +// +// Takes: +// $0 = NORMAL, STRET +// r0 or r1 (STRET) = receiver +// r1 or r2 (STRET) = selector +// r9 = class to search in +// +// On exit: r9 clobbered +// (found) continues after CacheLookup, IMP in r12, eq set +// (not found) continues after CacheLookup2 +// +///////////////////////////////////////////////////////////////////// + +.macro CacheLookup + + ldrh r12, [r9, #CACHE_MASK] // r12 = mask + ldr r9, [r9, #CACHE] // r9 = buckets +.if $0 == STRET + and r12, r12, r2 // r12 = index = SEL & mask +.else + and r12, r12, r1 // r12 = index = SEL & mask +.endif + add r9, r9, r12, LSL #3 // r9 = bucket = buckets+index*8 + ldr r12, [r9] // r12 = bucket->sel +6: +.if $0 == STRET + teq r12, r2 +.else + teq r12, r1 +.endif + bne 8f + ldr r12, [r9, #4] // r12 = bucket->imp + +.if $0 == STRET + tst r12, r12 // set ne for stret forwarding +.else + // eq already set for nonstret forwarding by `teq` above +.endif + +.endmacro + +.macro CacheLookup2 + +8: + cmp r12, #1 + blo 8f // if (bucket->sel == 0) cache miss + it eq // if (bucket->sel == 1) cache wrap + ldreq r9, [r9, #4] // bucket->imp is before first bucket + ldr r12, [r9, #8]! // r12 = (++bucket)->sel + b 6b +8: + +.endmacro + +///////////////////////////////////////////////////////////////////// +// +// GetClassFromIsa return-type +// +// Given an Isa, return the class for the Isa. +// +// Takes: +// r9 = class +// +// On exit: r12 clobbered +// r9 contains the class for this Isa. +// +///////////////////////////////////////////////////////////////////// +.macro GetClassFromIsa + +#if SUPPORT_INDEXED_ISA + // Note: We are doing a little wasted work here to load values we might not + // need. Branching turns out to be even worse when performance was measured. + MI_GET_ADDRESS(r12, _objc_indexed_classes) + tst.w r9, #ISA_INDEX_IS_NPI + itt ne + ubfxne r9, r9, #ISA_INDEX_SHIFT, #ISA_INDEX_BITS + ldrne.w r9, [r12, r9, lsl #2] +#endif + +.endmacro + + +/******************************************************************** + * IMP cache_getImp(Class cls, SEL sel) + * + * On entry: r0 = class whose cache is to be searched + * r1 = selector to search for + * + * If found, returns method implementation. + * If not found, returns NULL. + ********************************************************************/ + + STATIC_ENTRY _cache_getImp + + mov r9, r0 + CacheLookup NORMAL + // cache hit, IMP in r12 + mov r0, r12 + bx lr // return imp + + CacheLookup2 GETIMP + // cache miss, return nil + mov r0, #0 + bx lr + + END_ENTRY _cache_getImp + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd, ...); + * IMP objc_msgLookup(id self, SEL _cmd, ...); + * + * objc_msgLookup ABI: + * IMP returned in r12 + * Forwarding returned in Z flag + * r9 reserved for our use but not used + * + ********************************************************************/ + + ENTRY _objc_msgSend + MESSENGER_START + + cbz r0, LNilReceiver_f + + ldr r9, [r0] // r9 = self->isa + GetClassFromIsa // r9 = class + CacheLookup NORMAL + // cache hit, IMP in r12, eq already set for nonstret forwarding + MESSENGER_END_FAST + bx r12 // call imp + + CacheLookup2 NORMAL + // cache miss + ldr r9, [r0] // r9 = self->isa + GetClassFromIsa // r9 = class + MESSENGER_END_SLOW + b __objc_msgSend_uncached + +LNilReceiver: + // r0 is already zero + mov r1, #0 + mov r2, #0 + mov r3, #0 + FP_RETURN_ZERO + MESSENGER_END_NIL + bx lr + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgLookup + + cbz r0, LNilReceiver_f + + ldr r9, [r0] // r9 = self->isa + GetClassFromIsa // r9 = class + CacheLookup NORMAL + // cache hit, IMP in r12, eq already set for nonstret forwarding + bx lr + + CacheLookup2 NORMAL + // cache miss + ldr r9, [r0] // r9 = self->isa + GetClassFromIsa // r9 = class + b __objc_msgLookup_uncached + +LNilReceiver: + MI_GET_ADDRESS(r12, __objc_msgNil) + bx lr + + END_ENTRY _objc_msgLookup + + + STATIC_ENTRY __objc_msgNil + + // r0 is already zero + mov r1, #0 + mov r2, #0 + mov r3, #0 + FP_RETURN_ZERO + bx lr + + END_ENTRY __objc_msgNil + + +/******************************************************************** + * void objc_msgSend_stret(void *st_addr, id self, SEL op, ...); + * IMP objc_msgLookup_stret(void *st_addr, id self, SEL op, ...); + * + * objc_msgSend_stret is the struct-return form of msgSend. + * The ABI calls for r0 to be used as the address of the structure + * being returned, with the parameters in the succeeding registers. + * + * On entry: r0 is the address where the structure is returned, + * r1 is the message receiver, + * r2 is the selector + ********************************************************************/ + + ENTRY _objc_msgSend_stret + MESSENGER_START + + cbz r1, LNilReceiver_f + + ldr r9, [r1] // r9 = self->isa + GetClassFromIsa // r9 = class + CacheLookup STRET + // cache hit, IMP in r12, ne already set for stret forwarding + MESSENGER_END_FAST + bx r12 + + CacheLookup2 STRET + // cache miss + ldr r9, [r1] // r9 = self->isa + GetClassFromIsa // r9 = class + MESSENGER_END_SLOW + b __objc_msgSend_stret_uncached + +LNilReceiver: + MESSENGER_END_NIL + bx lr + + END_ENTRY _objc_msgSend_stret + + + ENTRY _objc_msgLookup_stret + + cbz r1, LNilReceiver_f + + ldr r9, [r1] // r9 = self->isa + GetClassFromIsa // r9 = class + CacheLookup STRET + // cache hit, IMP in r12, ne already set for stret forwarding + bx lr + + CacheLookup2 STRET + // cache miss + ldr r9, [r1] // r9 = self->isa + GetClassFromIsa // r9 = class + b __objc_msgLookup_stret_uncached + +LNilReceiver: + MI_GET_ADDRESS(r12, __objc_msgNil_stret) + bx lr + + END_ENTRY _objc_msgLookup_stret + + + STATIC_ENTRY __objc_msgNil_stret + + bx lr + + END_ENTRY __objc_msgNil_stret + + +/******************************************************************** + * id objc_msgSendSuper(struct objc_super *super, SEL op, ...) + * + * struct objc_super { + * id receiver; + * Class cls; // the class to search + * } + ********************************************************************/ + + ENTRY _objc_msgSendSuper + MESSENGER_START + + ldr r9, [r0, #CLASS] // r9 = struct super->class + CacheLookup NORMAL + // cache hit, IMP in r12, eq already set for nonstret forwarding + ldr r0, [r0, #RECEIVER] // load real receiver + MESSENGER_END_FAST + bx r12 // call imp + + CacheLookup2 NORMAL + // cache miss + ldr r9, [r0, #CLASS] // r9 = struct super->class + ldr r0, [r0, #RECEIVER] // load real receiver + MESSENGER_END_SLOW + b __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper + + +/******************************************************************** + * id objc_msgSendSuper2(struct objc_super *super, SEL op, ...) + * + * struct objc_super { + * id receiver; + * Class cls; // SUBCLASS of the class to search + * } + ********************************************************************/ + + ENTRY _objc_msgSendSuper2 + MESSENGER_START + + ldr r9, [r0, #CLASS] // class = struct super->class + ldr r9, [r9, #SUPERCLASS] // class = class->superclass + CacheLookup NORMAL + // cache hit, IMP in r12, eq already set for nonstret forwarding + ldr r0, [r0, #RECEIVER] // load real receiver + MESSENGER_END_FAST + bx r12 // call imp + + CacheLookup2 NORMAL + // cache miss + ldr r9, [r0, #CLASS] // class = struct super->class + ldr r9, [r9, #SUPERCLASS] // class = class->superclass + ldr r0, [r0, #RECEIVER] // load real receiver + MESSENGER_END_SLOW + b __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgLookupSuper2 + + ldr r9, [r0, #CLASS] // class = struct super->class + ldr r9, [r9, #SUPERCLASS] // class = class->superclass + CacheLookup NORMAL + // cache hit, IMP in r12, eq already set for nonstret forwarding + ldr r0, [r0, #RECEIVER] // load real receiver + bx lr + + CacheLookup2 NORMAL + // cache miss + ldr r9, [r0, #CLASS] + ldr r9, [r9, #SUPERCLASS] // r9 = class to search + ldr r0, [r0, #RECEIVER] // load real receiver + b __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookupSuper2 + + +/******************************************************************** + * void objc_msgSendSuper_stret(void *st_addr, objc_super *self, SEL op, ...); + * + * objc_msgSendSuper_stret is the struct-return form of msgSendSuper. + * The ABI calls for r0 to be used as the address of the structure + * being returned, with the parameters in the succeeding registers. + * + * On entry: r0 is the address where the structure is returned, + * r1 is the address of the objc_super structure, + * r2 is the selector + ********************************************************************/ + + ENTRY _objc_msgSendSuper_stret + MESSENGER_START + + ldr r9, [r1, #CLASS] // r9 = struct super->class + CacheLookup STRET + // cache hit, IMP in r12, ne already set for stret forwarding + ldr r1, [r1, #RECEIVER] // load real receiver + MESSENGER_END_FAST + bx r12 // call imp + + CacheLookup2 STRET + // cache miss + ldr r9, [r1, #CLASS] // r9 = struct super->class + ldr r1, [r1, #RECEIVER] // load real receiver + MESSENGER_END_SLOW + b __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper_stret + + +/******************************************************************** + * id objc_msgSendSuper2_stret + ********************************************************************/ + + ENTRY _objc_msgSendSuper2_stret + MESSENGER_START + + ldr r9, [r1, #CLASS] // class = struct super->class + ldr r9, [r9, #SUPERCLASS] // class = class->superclass + CacheLookup STRET + // cache hit, IMP in r12, ne already set for stret forwarding + ldr r1, [r1, #RECEIVER] // load real receiver + MESSENGER_END_FAST + bx r12 // call imp + + CacheLookup2 STRET + // cache miss + ldr r9, [r1, #CLASS] // class = struct super->class + ldr r9, [r9, #SUPERCLASS] // class = class->superclass + ldr r1, [r1, #RECEIVER] // load real receiver + MESSENGER_END_SLOW + b __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper2_stret + + + ENTRY _objc_msgLookupSuper2_stret + + ldr r9, [r1, #CLASS] // class = struct super->class + ldr r9, [r9, #SUPERCLASS] // class = class->superclass + CacheLookup STRET + // cache hit, IMP in r12, ne already set for stret forwarding + ldr r1, [r1, #RECEIVER] // load real receiver + bx lr + + CacheLookup2 STRET + // cache miss + ldr r9, [r1, #CLASS] + ldr r9, [r9, #SUPERCLASS] // r9 = class to search + ldr r1, [r1, #RECEIVER] // load real receiver + b __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookupSuper2_stret + + +///////////////////////////////////////////////////////////////////// +// +// MethodTableLookup NORMAL|STRET +// +// Locate the implementation for a selector in a class's method lists. +// +// Takes: +// $0 = NORMAL, STRET +// r0 or r1 (STRET) = receiver +// r1 or r2 (STRET) = selector +// r9 = class to search in +// +// On exit: IMP in r12, eq/ne set for forwarding +// +///////////////////////////////////////////////////////////////////// + +.macro MethodTableLookup + + stmfd sp!, {r0-r3,r7,lr} + add r7, sp, #16 + sub sp, #8 // align stack + FP_SAVE + +.if $0 == NORMAL + // receiver already in r0 + // selector already in r1 +.else + mov r0, r1 // receiver + mov r1, r2 // selector +.endif + mov r2, r9 // class to search + + blx __class_lookupMethodAndLoadCache3 + mov r12, r0 // r12 = IMP + +.if $0 == NORMAL + cmp r12, r12 // set eq for nonstret forwarding +.else + tst r12, r12 // set ne for stret forwarding +.endif + + FP_RESTORE + add sp, #8 // align stack + ldmfd sp!, {r0-r3,r7,lr} + +.endmacro + + +/******************************************************************** + * + * _objc_msgSend_uncached + * _objc_msgSend_stret_uncached + * _objc_msgLookup_uncached + * _objc_msgLookup_stret_uncached + * The uncached method lookup. + * + ********************************************************************/ + + STATIC_ENTRY __objc_msgSend_uncached + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r9 is the class to search + + MethodTableLookup NORMAL // returns IMP in r12 + bx r12 + + END_ENTRY __objc_msgSend_uncached + + + STATIC_ENTRY __objc_msgSend_stret_uncached + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r9 is the class to search + + MethodTableLookup STRET // returns IMP in r12 + bx r12 + + END_ENTRY __objc_msgSend_stret_uncached + + + STATIC_ENTRY __objc_msgLookup_uncached + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r9 is the class to search + + MethodTableLookup NORMAL // returns IMP in r12 + bx lr + + END_ENTRY __objc_msgLookup_uncached + + + STATIC_ENTRY __objc_msgLookup_stret_uncached + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r9 is the class to search + + MethodTableLookup STRET // returns IMP in r12 + bx lr + + END_ENTRY __objc_msgLookup_stret_uncached + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + MI_EXTERN(__objc_forward_handler) + MI_EXTERN(__objc_forward_stret_handler) + + STATIC_ENTRY __objc_msgForward_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band Z is 0 (EQ) for normal, 1 (NE) for stret + + MESSENGER_START + nop + MESSENGER_END_SLOW + + beq __objc_msgForward + b __objc_msgForward_stret + + END_ENTRY __objc_msgForward_impcache + + + ENTRY __objc_msgForward + // Non-stret version + + MI_GET_EXTERN(r12, __objc_forward_handler) + ldr r12, [r12] + bx r12 + + END_ENTRY __objc_msgForward + + + ENTRY __objc_msgForward_stret + // Struct-return version + + MI_GET_EXTERN(r12, __objc_forward_stret_handler) + ldr r12, [r12] + bx r12 + + END_ENTRY __objc_msgForward_stret + + + ENTRY _objc_msgSend_noarg + b _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + ENTRY _objc_msgSend_debug + b _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + b _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + ENTRY _objc_msgSend_stret_debug + b _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_debug + + ENTRY _objc_msgSendSuper2_stret_debug + b _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_debug + + + ENTRY _method_invoke + // r1 is method triplet instead of SEL + ldr r12, [r1, #METHOD_IMP] + ldr r1, [r1, #METHOD_NAME] + bx r12 + END_ENTRY _method_invoke + + + ENTRY _method_invoke_stret + // r2 is method triplet instead of SEL + ldr r12, [r2, #METHOD_IMP] + ldr r2, [r2, #METHOD_NAME] + bx r12 + END_ENTRY _method_invoke_stret + + +.section __DATA,__objc_msg_break +.long 0 +.long 0 + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-arm64.s b/runtime/Messengers.subproj/objc-msg-arm64.s new file mode 100755 index 0000000..c24de97 --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-arm64.s @@ -0,0 +1,561 @@ +/* + * @APPLE_LICENSE_HEADER_START@ + * + * Copyright (c) 2011 Apple Inc. All Rights Reserved. + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/******************************************************************** + * + * objc-msg-arm64.s - ARM64 code to support objc messaging + * + ********************************************************************/ + +#ifdef __arm64__ + +#include + + +.data + +// _objc_entryPoints and _objc_exitPoints are used by method dispatch +// caching code to figure out whether any threads are actively +// in the cache for dispatching. The labels surround the asm code +// that do cache lookups. The tables are zero-terminated. + +.align 4 +.private_extern _objc_entryPoints +_objc_entryPoints: + .quad _cache_getImp + .quad _objc_msgSend + .quad _objc_msgSendSuper + .quad _objc_msgSendSuper2 + .quad _objc_msgLookup + .quad _objc_msgLookupSuper2 + .quad 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .quad LExit_cache_getImp + .quad LExit_objc_msgSend + .quad LExit_objc_msgSendSuper + .quad LExit_objc_msgSendSuper2 + .quad LExit_objc_msgLookup + .quad LExit_objc_msgLookupSuper2 + .quad 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad NIL_EXIT + .text +.endmacro + + +/* objc_super parameter to sendSuper */ +#define RECEIVER 0 +#define CLASS 8 + +/* Selected field offsets in class structure */ +#define SUPERCLASS 8 +#define CACHE 16 + +/* Selected field offsets in isa field */ +#define ISA_MASK 0x0000000ffffffff8 + +/* Selected field offsets in method structure */ +#define METHOD_NAME 0 +#define METHOD_TYPES 8 +#define METHOD_IMP 16 + + +/******************************************************************** + * ENTRY functionName + * STATIC_ENTRY functionName + * END_ENTRY functionName + ********************************************************************/ + +.macro ENTRY /* name */ + .text + .align 5 + .globl $0 +$0: +.endmacro + +.macro STATIC_ENTRY /*name*/ + .text + .align 5 + .private_extern $0 +$0: +.endmacro + +.macro END_ENTRY /* name */ +LExit$0: +.endmacro + + +/******************************************************************** + * UNWIND name, flags + * Unwind info generation + ********************************************************************/ +.macro UNWIND + .section __LD,__compact_unwind,regular,debug + .quad $0 + .set LUnwind$0, LExit$0 - $0 + .long LUnwind$0 + .long $1 + .quad 0 /* no personality */ + .quad 0 /* no LSDA */ + .text +.endmacro + +#define NoFrame 0x02000000 // no frame, no SP adjustment +#define FrameWithNoSaves 0x04000000 // frame, no non-volatile saves + + +/******************************************************************** + * + * CacheLookup NORMAL|GETIMP|LOOKUP + * + * Locate the implementation for a selector in a class method cache. + * + * Takes: + * x1 = selector + * x16 = class to be searched + * + * Kills: + * x9,x10,x11,x12, x17 + * + * On exit: (found) calls or returns IMP + * with x16 = class, x17 = IMP + * (not found) jumps to LCacheMiss + * + ********************************************************************/ + +#define NORMAL 0 +#define GETIMP 1 +#define LOOKUP 2 + +.macro CacheHit +.if $0 == NORMAL + MESSENGER_END_FAST + br x17 // call imp +.elseif $0 == GETIMP + mov x0, x17 // return imp + ret +.elseif $0 == LOOKUP + ret // return imp via x17 +.else +.abort oops +.endif +.endmacro + +.macro CheckMiss + // miss if bucket->sel == 0 +.if $0 == GETIMP + cbz x9, LGetImpMiss +.elseif $0 == NORMAL + cbz x9, __objc_msgSend_uncached +.elseif $0 == LOOKUP + cbz x9, __objc_msgLookup_uncached +.else +.abort oops +.endif +.endmacro + +.macro JumpMiss +.if $0 == GETIMP + b LGetImpMiss +.elseif $0 == NORMAL + b __objc_msgSend_uncached +.elseif $0 == LOOKUP + b __objc_msgLookup_uncached +.else +.abort oops +.endif +.endmacro + +.macro CacheLookup + // x1 = SEL, x16 = isa + ldp x10, x11, [x16, #CACHE] // x10 = buckets, x11 = occupied|mask + and w12, w1, w11 // x12 = _cmd & mask + add x12, x10, x12, LSL #4 // x12 = buckets + ((_cmd & mask)<<4) + + ldp x9, x17, [x12] // {x9, x17} = *bucket +1: cmp x9, x1 // if (bucket->sel != _cmd) + b.ne 2f // scan more + CacheHit $0 // call or return imp + +2: // not hit: x12 = not-hit bucket + CheckMiss $0 // miss if bucket->sel == 0 + cmp x12, x10 // wrap if bucket == buckets + b.eq 3f + ldp x9, x17, [x12, #-16]! // {x9, x17} = *--bucket + b 1b // loop + +3: // wrap: x12 = first bucket, w11 = mask + add x12, x12, w11, UXTW #4 // x12 = buckets+(mask<<4) + + // Clone scanning loop to miss instead of hang when cache is corrupt. + // The slow path may detect any corruption and halt later. + + ldp x9, x17, [x12] // {x9, x17} = *bucket +1: cmp x9, x1 // if (bucket->sel != _cmd) + b.ne 2f // scan more + CacheHit $0 // call or return imp + +2: // not hit: x12 = not-hit bucket + CheckMiss $0 // miss if bucket->sel == 0 + cmp x12, x10 // wrap if bucket == buckets + b.eq 3f + ldp x9, x17, [x12, #-16]! // {x9, x17} = *--bucket + b 1b // loop + +3: // double wrap + JumpMiss $0 + +.endmacro + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd, ...); + * IMP objc_msgLookup(id self, SEL _cmd, ...); + * + * objc_msgLookup ABI: + * IMP returned in x17 + * x16 reserved for our use but not used + * + ********************************************************************/ + + .data + .align 3 + .globl _objc_debug_taggedpointer_classes +_objc_debug_taggedpointer_classes: + .fill 16, 8, 0 + .globl _objc_debug_taggedpointer_ext_classes +_objc_debug_taggedpointer_ext_classes: + .fill 256, 8, 0 + + ENTRY _objc_msgSend + UNWIND _objc_msgSend, NoFrame + MESSENGER_START + + cmp x0, #0 // nil check and tagged pointer check + b.le LNilOrTagged // (MSB tagged pointer looks negative) + ldr x13, [x0] // x13 = isa + and x16, x13, #ISA_MASK // x16 = class +LGetIsaDone: + CacheLookup NORMAL // calls imp or objc_msgSend_uncached + +LNilOrTagged: + b.eq LReturnZero // nil check + + // tagged + mov x10, #0xf000000000000000 + cmp x0, x10 + b.hs LExtTag + adrp x10, _objc_debug_taggedpointer_classes@PAGE + add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF + ubfx x11, x0, #60, #4 + ldr x16, [x10, x11, LSL #3] + b LGetIsaDone + +LExtTag: + // ext tagged + adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE + add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF + ubfx x11, x0, #52, #8 + ldr x16, [x10, x11, LSL #3] + b LGetIsaDone + +LReturnZero: + // x0 is already zero + mov x1, #0 + movi d0, #0 + movi d1, #0 + movi d2, #0 + movi d3, #0 + MESSENGER_END_NIL + ret + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgLookup + UNWIND _objc_msgLookup, NoFrame + + cmp x0, #0 // nil check and tagged pointer check + b.le LLookup_NilOrTagged // (MSB tagged pointer looks negative) + ldr x13, [x0] // x13 = isa + and x16, x13, #ISA_MASK // x16 = class +LLookup_GetIsaDone: + CacheLookup LOOKUP // returns imp + +LLookup_NilOrTagged: + b.eq LLookup_Nil // nil check + + // tagged + mov x10, #0xf000000000000000 + cmp x0, x10 + b.hs LLookup_ExtTag + adrp x10, _objc_debug_taggedpointer_classes@PAGE + add x10, x10, _objc_debug_taggedpointer_classes@PAGEOFF + ubfx x11, x0, #60, #4 + ldr x16, [x10, x11, LSL #3] + b LLookup_GetIsaDone + +LLookup_ExtTag: + adrp x10, _objc_debug_taggedpointer_ext_classes@PAGE + add x10, x10, _objc_debug_taggedpointer_ext_classes@PAGEOFF + ubfx x11, x0, #52, #8 + ldr x16, [x10, x11, LSL #3] + b LLookup_GetIsaDone + +LLookup_Nil: + adrp x17, __objc_msgNil@PAGE + add x17, x17, __objc_msgNil@PAGEOFF + ret + + END_ENTRY _objc_msgLookup + + + STATIC_ENTRY __objc_msgNil + + // x0 is already zero + mov x1, #0 + movi d0, #0 + movi d1, #0 + movi d2, #0 + movi d3, #0 + ret + + END_ENTRY __objc_msgNil + + + ENTRY _objc_msgSendSuper + UNWIND _objc_msgSendSuper, NoFrame + MESSENGER_START + + ldp x0, x16, [x0] // x0 = real receiver, x16 = class + CacheLookup NORMAL // calls imp or objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper + + // no _objc_msgLookupSuper + + ENTRY _objc_msgSendSuper2 + UNWIND _objc_msgSendSuper2, NoFrame + MESSENGER_START + + ldp x0, x16, [x0] // x0 = real receiver, x16 = class + ldr x16, [x16, #SUPERCLASS] // x16 = class->superclass + CacheLookup NORMAL + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgLookupSuper2 + UNWIND _objc_msgLookupSuper2, NoFrame + + ldp x0, x16, [x0] // x0 = real receiver, x16 = class + ldr x16, [x16, #SUPERCLASS] // x16 = class->superclass + CacheLookup LOOKUP + + END_ENTRY _objc_msgLookupSuper2 + + +.macro MethodTableLookup + + // push frame + stp fp, lr, [sp, #-16]! + mov fp, sp + + // save parameter registers: x0..x8, q0..q7 + sub sp, sp, #(10*8 + 8*16) + stp q0, q1, [sp, #(0*16)] + stp q2, q3, [sp, #(2*16)] + stp q4, q5, [sp, #(4*16)] + stp q6, q7, [sp, #(6*16)] + stp x0, x1, [sp, #(8*16+0*8)] + stp x2, x3, [sp, #(8*16+2*8)] + stp x4, x5, [sp, #(8*16+4*8)] + stp x6, x7, [sp, #(8*16+6*8)] + str x8, [sp, #(8*16+8*8)] + + // receiver and selector already in x0 and x1 + mov x2, x16 + bl __class_lookupMethodAndLoadCache3 + + // imp in x0 + mov x17, x0 + + // restore registers and return + ldp q0, q1, [sp, #(0*16)] + ldp q2, q3, [sp, #(2*16)] + ldp q4, q5, [sp, #(4*16)] + ldp q6, q7, [sp, #(6*16)] + ldp x0, x1, [sp, #(8*16+0*8)] + ldp x2, x3, [sp, #(8*16+2*8)] + ldp x4, x5, [sp, #(8*16+4*8)] + ldp x6, x7, [sp, #(8*16+6*8)] + ldr x8, [sp, #(8*16+8*8)] + + mov sp, fp + ldp fp, lr, [sp], #16 + +.endmacro + + STATIC_ENTRY __objc_msgSend_uncached + UNWIND __objc_msgSend_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band x16 is the class to search + + MethodTableLookup + br x17 + + END_ENTRY __objc_msgSend_uncached + + + STATIC_ENTRY __objc_msgLookup_uncached + UNWIND __objc_msgLookup_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band x16 is the class to search + + MethodTableLookup + ret + + END_ENTRY __objc_msgLookup_uncached + + + STATIC_ENTRY _cache_getImp + + and x16, x0, #ISA_MASK + CacheLookup GETIMP + +LGetImpMiss: + mov x0, #0 + ret + + END_ENTRY _cache_getImp + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward is the externally-callable +* function returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + STATIC_ENTRY __objc_msgForward_impcache + + MESSENGER_START + nop + MESSENGER_END_SLOW + + // No stret specialization. + b __objc_msgForward + + END_ENTRY __objc_msgForward_impcache + + + ENTRY __objc_msgForward + + adrp x17, __objc_forward_handler@PAGE + ldr x17, [x17, __objc_forward_handler@PAGEOFF] + br x17 + + END_ENTRY __objc_msgForward + + + ENTRY _objc_msgSend_noarg + b _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + ENTRY _objc_msgSend_debug + b _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + b _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + + ENTRY _method_invoke + // x1 is method triplet instead of SEL + ldr x17, [x1, #METHOD_IMP] + ldr x1, [x1, #METHOD_NAME] + br x17 + END_ENTRY _method_invoke + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-i386.s b/runtime/Messengers.subproj/objc-msg-i386.s new file mode 100644 index 0000000..8a64ba0 --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-i386.s @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#if defined(__i386__) && !TARGET_OS_SIMULATOR + +/******************************************************************** + ******************************************************************** + ** + ** objc-msg-i386.s - i386 code to support objc messaging. + ** + ******************************************************************** + ********************************************************************/ + + +/******************************************************************** +* Data used by the ObjC runtime. +* +********************************************************************/ + +.data + +// _objc_entryPoints and _objc_exitPoints are used by objc +// to get the critical regions for which method caches +// cannot be garbage collected. + +.align 2 +.private_extern _objc_entryPoints +_objc_entryPoints: + .long __cache_getImp + .long __cache_getMethod + .long _objc_msgSend + .long _objc_msgSend_fpret + .long _objc_msgSend_stret + .long _objc_msgSendSuper + .long _objc_msgSendSuper_stret + .long 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .long LGetImpExit + .long LGetMethodExit + .long LMsgSendExit + .long LMsgSendFpretExit + .long LMsgSendStretExit + .long LMsgSendSuperExit + .long LMsgSendSuperStretExit + .long 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .long 4b + .long ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .long 4b + .long FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .long 4b + .long SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .long 4b + .long NIL_EXIT + .text +.endmacro + + +/******************************************************************** + * + * Common offsets. + * + ********************************************************************/ + + self = 4 + super = 4 + selector = 8 + marg_size = 12 + marg_list = 16 + first_arg = 12 + + struct_addr = 4 + + self_stret = 8 + super_stret = 8 + selector_stret = 12 + marg_size_stret = 16 + marg_list_stret = 20 + + +/******************************************************************** + * + * Structure definitions. + * + ********************************************************************/ + +// objc_super parameter to sendSuper + receiver = 0 + class = 4 + +// Selected field offsets in class structure + isa = 0 + cache = 32 + +// Method descriptor + method_name = 0 + method_imp = 8 + +// Cache header + mask = 0 + occupied = 4 + buckets = 8 // variable length array + +#if defined(OBJC_INSTRUMENTED) +// Cache instrumentation data, follows buckets + hitCount = 0 + hitProbes = hitCount + 4 + maxHitProbes = hitProbes + 4 + missCount = maxHitProbes + 4 + missProbes = missCount + 4 + maxMissProbes = missProbes + 4 + flushCount = maxMissProbes + 4 + flushedEntries = flushCount + 4 + +// Buckets in CacheHitHistogram and CacheMissHistogram + CACHE_HISTOGRAM_SIZE = 512 +#endif + + +////////////////////////////////////////////////////////////////////// +// +// ENTRY functionName +// +// Assembly directives to begin an exported function. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro ENTRY + .text + .globl $0 + .align 4, 0x90 +$0: +.endmacro + +.macro STATIC_ENTRY + .text + .private_extern $0 + .align 4, 0x90 +$0: +.endmacro + +////////////////////////////////////////////////////////////////////// +// +// END_ENTRY functionName +// +// Assembly directives to end an exported function. Just a placeholder, +// a close-parenthesis for ENTRY, until it is needed for something. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro END_ENTRY +.endmacro + +////////////////////////////////////////////////////////////////////// +// +// CALL_MCOUNTER +// +// Calls mcount() profiling routine. Must be called immediately on +// function entry, before any prologue executes. +// +////////////////////////////////////////////////////////////////////// + +.macro CALL_MCOUNTER +#ifdef PROFILE + // Current stack contents: ret + pushl %ebp + movl %esp,%ebp + subl $$8,%esp + // Current stack contents: ret, ebp, pad, pad + call mcount + movl %ebp,%esp + popl %ebp +#endif +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// +// CacheLookup WORD_RETURN | STRUCT_RETURN, MSG_SEND | MSG_SENDSUPER | CACHE_GET, cacheMissLabel +// +// Locate the implementation for a selector in a class method cache. +// +// Takes: WORD_RETURN (first parameter is at sp+4) +// STRUCT_RETURN (struct address is at sp+4, first parameter at sp+8) +// MSG_SEND (first parameter is receiver) +// MSG_SENDSUPER (first parameter is address of objc_super structure) +// CACHE_GET (first parameter is class; return method triplet) +// selector in %ecx +// class to search in %edx +// +// cacheMissLabel = label to branch to iff method is not cached +// +// On exit: (found) MSG_SEND and MSG_SENDSUPER: return imp in eax +// (found) CACHE_GET: return method triplet in eax +// (not found) jumps to cacheMissLabel +// +///////////////////////////////////////////////////////////////////// + + +// Values to specify to method lookup macros whether the return type of +// the method is word or structure. +WORD_RETURN = 0 +STRUCT_RETURN = 1 + +// Values to specify to method lookup macros whether the first argument +// is an object/class reference or a 'objc_super' structure. +MSG_SEND = 0 // first argument is receiver, search the isa +MSG_SENDSUPER = 1 // first argument is objc_super, search the class +CACHE_GET = 2 // first argument is class, search that class + +.macro CacheLookup + +// load variables and save caller registers. + + pushl %edi // save scratch register + movl cache(%edx), %edi // cache = class->cache + pushl %esi // save scratch register + +#if defined(OBJC_INSTRUMENTED) + pushl %ebx // save non-volatile register + pushl %eax // save cache pointer + xorl %ebx, %ebx // probeCount = 0 +#endif + movl mask(%edi), %esi // mask = cache->mask + movl %ecx, %edx // index = selector + shrl $$2, %edx // index = selector >> 2 + +// search the receiver's cache +// ecx = selector +// edi = cache +// esi = mask +// edx = index +// eax = method (soon) +LMsgSendProbeCache_$0_$1_$2: +#if defined(OBJC_INSTRUMENTED) + addl $$1, %ebx // probeCount += 1 +#endif + andl %esi, %edx // index &= mask + movl buckets(%edi, %edx, 4), %eax // meth = cache->buckets[index] + + testl %eax, %eax // check for end of bucket + je LMsgSendCacheMiss_$0_$1_$2 // go to cache miss code + cmpl method_name(%eax), %ecx // check for method name match + je LMsgSendCacheHit_$0_$1_$2 // go handle cache hit + addl $$1, %edx // bump index ... + jmp LMsgSendProbeCache_$0_$1_$2 // ... and loop + +// not found in cache: restore state and go to callers handler +LMsgSendCacheMiss_$0_$1_$2: +#if defined(OBJC_INSTRUMENTED) + popl %edx // retrieve cache pointer + movl mask(%edx), %esi // mask = cache->mask + testl %esi, %esi // a mask of zero is only for the... + je LMsgSendMissInstrumentDone_$0_$1_$2 // ... emptyCache, do not record anything + + // locate and update the CacheInstrumentation structure + addl $$1, %esi // entryCount = mask + 1 + shll $$2, %esi // tableSize = entryCount * sizeof(entry) + addl $buckets, %esi // offset = buckets + tableSize + addl %edx, %esi // cacheData = &cache->buckets[mask+1] + + movl missCount(%esi), %edi // + addl $$1, %edi // + movl %edi, missCount(%esi) // cacheData->missCount += 1 + movl missProbes(%esi), %edi // + addl %ebx, %edi // + movl %edi, missProbes(%esi) // cacheData->missProbes += probeCount + movl maxMissProbes(%esi), %edi// if (cacheData->maxMissProbes < probeCount) + cmpl %ebx, %edi // + jge LMsgSendMaxMissProbeOK_$0_$1_$2 // + movl %ebx, maxMissProbes(%esi)// cacheData->maxMissProbes = probeCount +LMsgSendMaxMissProbeOK_$0_$1_$2: + + // update cache miss probe histogram + cmpl $CACHE_HISTOGRAM_SIZE, %ebx // pin probeCount to max index + jl LMsgSendMissHistoIndexSet_$0_$1_$2 + movl $(CACHE_HISTOGRAM_SIZE-1), %ebx +LMsgSendMissHistoIndexSet_$0_$1_$2: + LEA_STATIC_DATA %esi, _CacheMissHistogram, EXTERNAL_SYMBOL + shll $$2, %ebx // convert probeCount to histogram index + addl %ebx, %esi // calculate &CacheMissHistogram[probeCount<<2] + movl 0(%esi), %edi // get current tally + addl $$1, %edi // + movl %edi, 0(%esi) // tally += 1 +LMsgSendMissInstrumentDone_$0_$1_$2: + popl %ebx // restore non-volatile register +#endif + +.if $0 == WORD_RETURN // Regular word return +.if $1 == MSG_SEND // MSG_SEND + popl %esi // restore callers register + popl %edi // restore callers register + movl self(%esp), %edx // get messaged object + movl isa(%edx), %eax // get objects class +.elseif $1 == MSG_SENDSUPER // MSG_SENDSUPER + // replace "super" arg with "receiver" + movl super+8(%esp), %edi // get super structure + movl receiver(%edi), %edx // get messaged object + movl %edx, super+8(%esp) // make it the first argument + movl class(%edi), %eax // get messaged class + popl %esi // restore callers register + popl %edi // restore callers register +.else // CACHE_GET + popl %esi // restore callers register + popl %edi // restore callers register +.endif +.else // Struct return +.if $1 == MSG_SEND // MSG_SEND (stret) + popl %esi // restore callers register + popl %edi // restore callers register + movl self_stret(%esp), %edx // get messaged object + movl isa(%edx), %eax // get objects class +.elseif $1 == MSG_SENDSUPER // MSG_SENDSUPER (stret) + // replace "super" arg with "receiver" + movl super_stret+8(%esp), %edi// get super structure + movl receiver(%edi), %edx // get messaged object + movl %edx, super_stret+8(%esp)// make it the first argument + movl class(%edi), %eax // get messaged class + popl %esi // restore callers register + popl %edi // restore callers register +.else // CACHE_GET + !! This should not happen. +.endif +.endif + + // edx = receiver + // ecx = selector + // eax = class + jmp $2 // go to callers handler + +// eax points to matching cache entry + .align 4, 0x90 +LMsgSendCacheHit_$0_$1_$2: +#if defined(OBJC_INSTRUMENTED) + popl %edx // retrieve cache pointer + movl mask(%edx), %esi // mask = cache->mask + testl %esi, %esi // a mask of zero is only for the... + je LMsgSendHitInstrumentDone_$0_$1_$2 // ... emptyCache, do not record anything + + // locate and update the CacheInstrumentation structure + addl $$1, %esi // entryCount = mask + 1 + shll $$2, %esi // tableSize = entryCount * sizeof(entry) + addl $buckets, %esi // offset = buckets + tableSize + addl %edx, %esi // cacheData = &cache->buckets[mask+1] + + movl hitCount(%esi), %edi + addl $$1, %edi + movl %edi, hitCount(%esi) // cacheData->hitCount += 1 + movl hitProbes(%esi), %edi + addl %ebx, %edi + movl %edi, hitProbes(%esi) // cacheData->hitProbes += probeCount + movl maxHitProbes(%esi), %edi// if (cacheData->maxHitProbes < probeCount) + cmpl %ebx, %edi + jge LMsgSendMaxHitProbeOK_$0_$1_$2 + movl %ebx, maxHitProbes(%esi)// cacheData->maxHitProbes = probeCount +LMsgSendMaxHitProbeOK_$0_$1_$2: + + // update cache hit probe histogram + cmpl $CACHE_HISTOGRAM_SIZE, %ebx // pin probeCount to max index + jl LMsgSendHitHistoIndexSet_$0_$1_$2 + movl $(CACHE_HISTOGRAM_SIZE-1), %ebx +LMsgSendHitHistoIndexSet_$0_$1_$2: + LEA_STATIC_DATA %esi, _CacheHitHistogram, EXTERNAL_SYMBOL + shll $$2, %ebx // convert probeCount to histogram index + addl %ebx, %esi // calculate &CacheHitHistogram[probeCount<<2] + movl 0(%esi), %edi // get current tally + addl $$1, %edi // + movl %edi, 0(%esi) // tally += 1 +LMsgSendHitInstrumentDone_$0_$1_$2: + popl %ebx // restore non-volatile register +#endif + +// load implementation address, restore state, and we're done +.if $1 == CACHE_GET + // method triplet is already in eax +.else + movl method_imp(%eax), %eax // imp = method->method_imp +.endif + +.if $0 == WORD_RETURN // Regular word return +.if $1 == MSG_SENDSUPER // MSG_SENDSUPER + // replace "super" arg with "self" + movl super+8(%esp), %edi + movl receiver(%edi), %esi + movl %esi, super+8(%esp) +.endif +.else // Struct return +.if $1 == MSG_SENDSUPER // MSG_SENDSUPER (stret) + // replace "super" arg with "self" + movl super_stret+8(%esp), %edi + movl receiver(%edi), %esi + movl %esi, super_stret+8(%esp) +.endif +.endif + + // restore caller registers + popl %esi + popl %edi +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// MethodTableLookup WORD_RETURN | STRUCT_RETURN, MSG_SEND | MSG_SENDSUPER +// +// Takes: WORD_RETURN (first parameter is at sp+4) +// STRUCT_RETURN (struct address is at sp+4, first parameter at sp+8) +// MSG_SEND (first parameter is receiver) +// MSG_SENDSUPER (first parameter is address of objc_super structure) +// +// edx = receiver +// ecx = selector +// eax = class +// (all set by CacheLookup's miss case) +// +// Stack must be at 0xXXXXXXXc on entrance. +// +// On exit: esp unchanged +// imp in eax +// +///////////////////////////////////////////////////////////////////// + +.macro MethodTableLookup + MESSENGER_END_SLOW + + // stack has return address and nothing else + subl $$(12+5*16), %esp + + movdqa %xmm3, 4*16(%esp) + movdqa %xmm2, 3*16(%esp) + movdqa %xmm1, 2*16(%esp) + movdqa %xmm0, 1*16(%esp) + + movl %eax, 8(%esp) // class + movl %ecx, 4(%esp) // selector + movl %edx, 0(%esp) // receiver + call __class_lookupMethodAndLoadCache3 + + movdqa 4*16(%esp), %xmm3 + movdqa 3*16(%esp), %xmm2 + movdqa 2*16(%esp), %xmm1 + movdqa 1*16(%esp), %xmm0 + + addl $$(12+5*16), %esp // pop parameters +.endmacro + + +/******************************************************************** + * Method _cache_getMethod(Class cls, SEL sel, IMP msgForward_internal_imp) + * + * If found, returns method triplet pointer. + * If not found, returns NULL. + * + * NOTE: _cache_getMethod never returns any cache entry whose implementation + * is _objc_msgForward_impcache. It returns 1 instead. This prevents thread- + * safety and memory management bugs in _class_lookupMethodAndLoadCache. + * See _class_lookupMethodAndLoadCache for details. + * + * _objc_msgForward_impcache is passed as a parameter because it's more + * efficient to do the (PIC) lookup once in the caller than repeatedly here. + ********************************************************************/ + + STATIC_ENTRY __cache_getMethod + +// load the class and selector + movl selector(%esp), %ecx + movl self(%esp), %edx + +// do lookup + CacheLookup WORD_RETURN, CACHE_GET, LGetMethodMiss + +// cache hit, method triplet in %eax + movl first_arg(%esp), %ecx // check for _objc_msgForward_impcache + cmpl method_imp(%eax), %ecx // if (imp==_objc_msgForward_impcache) + je 1f // return (Method)1 + ret // else return method triplet address +1: movl $1, %eax + ret + +LGetMethodMiss: +// cache miss, return nil + xorl %eax, %eax // zero %eax + ret + +LGetMethodExit: + END_ENTRY __cache_getMethod + + +/******************************************************************** + * IMP _cache_getImp(Class cls, SEL sel) + * + * If found, returns method implementation. + * If not found, returns NULL. + ********************************************************************/ + + STATIC_ENTRY __cache_getImp + +// load the class and selector + movl selector(%esp), %ecx + movl self(%esp), %edx + +// do lookup + CacheLookup WORD_RETURN, CACHE_GET, LGetImpMiss + +// cache hit, method triplet in %eax + movl method_imp(%eax), %eax // return method imp + ret + +LGetImpMiss: +// cache miss, return nil + xorl %eax, %eax // zero %eax + ret + +LGetImpExit: + END_ENTRY __cache_getImp + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd,...); + * + ********************************************************************/ + + ENTRY _objc_msgSend + MESSENGER_START + CALL_MCOUNTER + +// load receiver and selector + movl selector(%esp), %ecx + movl self(%esp), %eax + +// check whether receiver is nil + testl %eax, %eax + je LMsgSendNilSelf + +// receiver (in %eax) is non-nil: search the cache +LMsgSendReceiverOk: + movl isa(%eax), %edx // class = self->isa + CacheLookup WORD_RETURN, MSG_SEND, LMsgSendCacheMiss + xor %edx, %edx // set nonstret for msgForward_internal + MESSENGER_END_FAST + jmp *%eax + +// cache miss: go search the method lists +LMsgSendCacheMiss: + MethodTableLookup WORD_RETURN, MSG_SEND + xor %edx, %edx // set nonstret for msgForward_internal + jmp *%eax // goto *imp + +// message sent to nil: redirect to nil receiver, if any +LMsgSendNilSelf: + // %eax is already zero + movl $0,%edx + xorps %xmm0, %xmm0 +LMsgSendDone: + MESSENGER_END_NIL + ret + +// guaranteed non-nil entry point (disabled for now) +// .globl _objc_msgSendNonNil +// _objc_msgSendNonNil: +// movl self(%esp), %eax +// jmp LMsgSendReceiverOk + +LMsgSendExit: + END_ENTRY _objc_msgSend + +/******************************************************************** + * + * id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + ********************************************************************/ + + ENTRY _objc_msgSendSuper + MESSENGER_START + CALL_MCOUNTER + +// load selector and class to search + movl super(%esp), %eax // struct objc_super + movl selector(%esp), %ecx + movl class(%eax), %edx // struct objc_super->class + +// search the cache (class in %edx) + CacheLookup WORD_RETURN, MSG_SENDSUPER, LMsgSendSuperCacheMiss + xor %edx, %edx // set nonstret for msgForward_internal + MESSENGER_END_FAST + jmp *%eax // goto *imp + +// cache miss: go search the method lists +LMsgSendSuperCacheMiss: + MethodTableLookup WORD_RETURN, MSG_SENDSUPER + xor %edx, %edx // set nonstret for msgForward_internal + jmp *%eax // goto *imp + +// ignored selector: return self +LMsgSendSuperIgnored: + movl super(%esp), %eax + movl receiver(%eax), %eax + MESSENGER_END_NIL + ret + +LMsgSendSuperExit: + END_ENTRY _objc_msgSendSuper + +/******************************************************************** + * id objc_msgSendv(id self, SEL _cmd, unsigned size, marg_list frame); + * + * On entry: + * (sp+4) is the message receiver, + * (sp+8) is the selector, + * (sp+12) is the size of the marg_list, in bytes, + * (sp+16) is the address of the marg_list + * + ********************************************************************/ + + ENTRY _objc_msgSendv + +#if defined(KERNEL) + trap // _objc_msgSendv is not for the kernel +#else + pushl %ebp + movl %esp, %ebp + // stack is currently aligned assuming no extra arguments + movl (marg_list+4)(%ebp), %edx + addl $8, %edx // skip self & selector + movl (marg_size+4)(%ebp), %ecx + subl $8, %ecx // skip self & selector + shrl $2, %ecx + je LMsgSendvArgsOK + + // %esp = %esp - (16 - ((numVariableArguments & 3) << 2)) + movl %ecx, %eax // 16-byte align stack + andl $3, %eax + shll $2, %eax + subl $16, %esp + addl %eax, %esp + +LMsgSendvArgLoop: + decl %ecx + movl 0(%edx, %ecx, 4), %eax + pushl %eax + jg LMsgSendvArgLoop + +LMsgSendvArgsOK: + movl (selector+4)(%ebp), %ecx + pushl %ecx + movl (self+4)(%ebp),%ecx + pushl %ecx + call _objc_msgSend + movl %ebp,%esp + popl %ebp + + ret +#endif + END_ENTRY _objc_msgSendv + +/******************************************************************** + * + * double objc_msgSend_fpret(id self, SEL _cmd,...); + * + ********************************************************************/ + + ENTRY _objc_msgSend_fpret + MESSENGER_START + CALL_MCOUNTER + +// load receiver and selector + movl selector(%esp), %ecx + movl self(%esp), %eax + +// check whether receiver is nil + testl %eax, %eax + je LMsgSendFpretNilSelf + +// receiver (in %eax) is non-nil: search the cache +LMsgSendFpretReceiverOk: + movl isa(%eax), %edx // class = self->isa + CacheLookup WORD_RETURN, MSG_SEND, LMsgSendFpretCacheMiss + xor %edx, %edx // set nonstret for msgForward_internal + MESSENGER_END_FAST + jmp *%eax // goto *imp + +// cache miss: go search the method lists +LMsgSendFpretCacheMiss: + MethodTableLookup WORD_RETURN, MSG_SEND + xor %edx, %edx // set nonstret for msgForward_internal + jmp *%eax // goto *imp + +// message sent to nil: redirect to nil receiver, if any +LMsgSendFpretNilSelf: + // %eax is already zero + fldz +LMsgSendFpretDone: + MESSENGER_END_NIL + ret + +LMsgSendFpretExit: + END_ENTRY _objc_msgSend_fpret + +/******************************************************************** + * double objc_msgSendv_fpret(id self, SEL _cmd, unsigned size, marg_list frame); + * + * On entry: + * (sp+4) is the message receiver, + * (sp+8) is the selector, + * (sp+12) is the size of the marg_list, in bytes, + * (sp+16) is the address of the marg_list + * + ********************************************************************/ + + ENTRY _objc_msgSendv_fpret + +#if defined(KERNEL) + trap // _objc_msgSendv is not for the kernel +#else + pushl %ebp + movl %esp, %ebp + // stack is currently aligned assuming no extra arguments + movl (marg_list+4)(%ebp), %edx + addl $8, %edx // skip self & selector + movl (marg_size+4)(%ebp), %ecx + subl $8, %ecx // skip self & selector + shrl $2, %ecx + je LMsgSendvFpretArgsOK + + // %esp = %esp - (16 - ((numVariableArguments & 3) << 2)) + movl %ecx, %eax // 16-byte align stack + andl $3, %eax + shll $2, %eax + subl $16, %esp + addl %eax, %esp + +LMsgSendvFpretArgLoop: + decl %ecx + movl 0(%edx, %ecx, 4), %eax + pushl %eax + jg LMsgSendvFpretArgLoop + +LMsgSendvFpretArgsOK: + movl (selector+4)(%ebp), %ecx + pushl %ecx + movl (self+4)(%ebp),%ecx + pushl %ecx + call _objc_msgSend_fpret + movl %ebp,%esp + popl %ebp + + ret +#endif + END_ENTRY _objc_msgSendv_fpret + +/******************************************************************** + * + * void objc_msgSend_stret(void *st_addr , id self, SEL _cmd, ...); + * + * + * objc_msgSend_stret is the struct-return form of msgSend. + * The ABI calls for (sp+4) to be used as the address of the structure + * being returned, with the parameters in the succeeding locations. + * + * On entry: (sp+4)is the address where the structure is returned, + * (sp+8) is the message receiver, + * (sp+12) is the selector + ********************************************************************/ + + ENTRY _objc_msgSend_stret + MESSENGER_START + CALL_MCOUNTER + +// load receiver and selector + movl self_stret(%esp), %eax + movl (selector_stret)(%esp), %ecx + +// check whether receiver is nil + testl %eax, %eax + je LMsgSendStretNilSelf + +// receiver (in %eax) is non-nil: search the cache +LMsgSendStretReceiverOk: + movl isa(%eax), %edx // class = self->isa + CacheLookup STRUCT_RETURN, MSG_SEND, LMsgSendStretCacheMiss + movl $1, %edx // set stret for objc_msgForward + MESSENGER_END_FAST + jmp *%eax // goto *imp + +// cache miss: go search the method lists +LMsgSendStretCacheMiss: + MethodTableLookup STRUCT_RETURN, MSG_SEND + movl $1, %edx // set stret for objc_msgForward + jmp *%eax // goto *imp + +// message sent to nil: redirect to nil receiver, if any +LMsgSendStretNilSelf: + MESSENGER_END_NIL + ret $4 // pop struct return address (#2995932) + +// guaranteed non-nil entry point (disabled for now) +// .globl _objc_msgSendNonNil_stret +// _objc_msgSendNonNil_stret: +// CALL_MCOUNTER +// movl self_stret(%esp), %eax +// jmp LMsgSendStretReceiverOk + +LMsgSendStretExit: + END_ENTRY _objc_msgSend_stret + +/******************************************************************** + * + * void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + * + * objc_msgSendSuper_stret is the struct-return form of msgSendSuper. + * The ABI calls for (sp+4) to be used as the address of the structure + * being returned, with the parameters in the succeeding registers. + * + * On entry: (sp+4)is the address where the structure is returned, + * (sp+8) is the address of the objc_super structure, + * (sp+12) is the selector + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper_stret + MESSENGER_START + CALL_MCOUNTER + +// load selector and class to search + movl super_stret(%esp), %eax // struct objc_super + movl (selector_stret)(%esp), %ecx // get selector + movl class(%eax), %edx // struct objc_super->class + +// search the cache (class in %edx) + CacheLookup STRUCT_RETURN, MSG_SENDSUPER, LMsgSendSuperStretCacheMiss + movl $1, %edx // set stret for objc_msgForward + MESSENGER_END_FAST + jmp *%eax // goto *imp + +// cache miss: go search the method lists +LMsgSendSuperStretCacheMiss: + MethodTableLookup STRUCT_RETURN, MSG_SENDSUPER + movl $1, %edx // set stret for objc_msgForward + jmp *%eax // goto *imp + +LMsgSendSuperStretExit: + END_ENTRY _objc_msgSendSuper_stret + + +/******************************************************************** + * void objc_msgSendv_stret(void *st_addr, id self, SEL _cmd, unsigned size, marg_list frame); + * + * objc_msgSendv_stret is the struct-return form of msgSendv. + * This function does not use the struct-return ABI; instead, the + * structure return address is passed as a normal parameter. + * + * On entry: (sp+4) is the address in which the returned struct is put, + * (sp+8) is the message receiver, + * (sp+12) is the selector, + * (sp+16) is the size of the marg_list, in bytes, + * (sp+20) is the address of the marg_list + * + ********************************************************************/ + + ENTRY _objc_msgSendv_stret + +#if defined(KERNEL) + trap // _objc_msgSendv_stret is not for the kernel +#else + pushl %ebp + movl %esp, %ebp + subl $12, %esp // align stack assuming no extra arguments + movl (marg_list_stret+4)(%ebp), %edx + addl $8, %edx // skip self & selector + movl (marg_size_stret+4)(%ebp), %ecx + subl $5, %ecx // skip self & selector + shrl $2, %ecx + jle LMsgSendvStretArgsOK + + // %esp = %esp - (16 - ((numVariableArguments & 3) << 2)) + movl %ecx, %eax // 16-byte align stack + andl $3, %eax + shll $2, %eax + subl $16, %esp + addl %eax, %esp + +LMsgSendvStretArgLoop: + decl %ecx + movl 0(%edx, %ecx, 4), %eax + pushl %eax + jg LMsgSendvStretArgLoop + +LMsgSendvStretArgsOK: + movl (selector_stret+4)(%ebp), %ecx + pushl %ecx + movl (self_stret+4)(%ebp),%ecx + pushl %ecx + movl (struct_addr+4)(%ebp),%ecx + pushl %ecx + call _objc_msgSend_stret + movl %ebp,%esp + popl %ebp + + ret +#endif + END_ENTRY _objc_msgSendv_stret + + +/******************************************************************** + * + * id _objc_msgForward(id self, SEL _cmd,...); + * + ********************************************************************/ + +// _FwdSel is @selector(forward::), set up in map_images(). +// ALWAYS dereference _FwdSel to get to "forward::" !! + .data + .align 2 + .private_extern _FwdSel +_FwdSel: .long 0 + + .cstring + .align 2 +LUnkSelStr: .ascii "Does not recognize selector %s (while forwarding %s)\0" + + .non_lazy_symbol_pointer +L_forward_handler: + .indirect_symbol __objc_forward_handler + .long 0 +L_forward_stret_handler: + .indirect_symbol __objc_forward_stret_handler + .long 0 + + STATIC_ENTRY __objc_msgForward_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band register %edx is nonzero for stret, zero otherwise + + MESSENGER_START + nop + MESSENGER_END_SLOW + + // Check return type (stret or not) + testl %edx, %edx + jnz __objc_msgForward_stret + jmp __objc_msgForward + + END_ENTRY _objc_msgForward_impcache + + + ENTRY __objc_msgForward + // Non-struct return version + + // Get PIC base into %edx + call L__objc_msgForward$pic_base +L__objc_msgForward$pic_base: + popl %edx + + // Call user handler, if any + movl L_forward_handler-L__objc_msgForward$pic_base(%edx),%ecx + movl (%ecx), %ecx + testl %ecx, %ecx // if not NULL + je 1f // skip to default handler + jmp *%ecx // call __objc_forward_handler +1: + // No user handler + // Push stack frame + pushl %ebp + movl %esp, %ebp + + // Die if forwarding "forward::" + movl (selector+4)(%ebp), %eax + movl _FwdSel-L__objc_msgForward$pic_base(%edx),%ecx + cmpl %ecx, %eax + je LMsgForwardError + + // Call [receiver forward:sel :margs] + subl $8, %esp // 16-byte align the stack + leal (self+4)(%ebp), %ecx + pushl %ecx // &margs + pushl %eax // sel + movl _FwdSel-L__objc_msgForward$pic_base(%edx),%ecx + pushl %ecx // forward:: + pushl (self+4)(%ebp) // receiver + + call _objc_msgSend + + movl %ebp, %esp + popl %ebp + ret + +LMsgForwardError: + // Call __objc_error(receiver, "unknown selector %s %s", "forward::", forwardedSel) + subl $8, %esp // 16-byte align the stack + pushl (selector+4+4)(%ebp) // the forwarded selector + movl _FwdSel-L__objc_msgForward$pic_base(%edx),%eax + pushl %eax + leal LUnkSelStr-L__objc_msgForward$pic_base(%edx),%eax + pushl %eax + pushl (self+4)(%ebp) + call ___objc_error // never returns + + END_ENTRY __objc_msgForward + + + ENTRY __objc_msgForward_stret + // Struct return version + + // Get PIC base into %edx + call L__objc_msgForwardStret$pic_base +L__objc_msgForwardStret$pic_base: + popl %edx + + // Call user handler, if any + movl L_forward_stret_handler-L__objc_msgForwardStret$pic_base(%edx), %ecx + movl (%ecx), %ecx + testl %ecx, %ecx // if not NULL + je 1f // skip to default handler + jmp *%ecx // call __objc_forward_stret_handler +1: + // No user handler + // Push stack frame + pushl %ebp + movl %esp, %ebp + + // Die if forwarding "forward::" + movl (selector_stret+4)(%ebp), %eax + movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx), %ecx + cmpl %ecx, %eax + je LMsgForwardStretError + + // Call [receiver forward:sel :margs] + subl $8, %esp // 16-byte align the stack + leal (self_stret+4)(%ebp), %ecx + pushl %ecx // &margs + pushl %eax // sel + movl _FwdSel-L__objc_msgForwardStret$pic_base(%edx),%ecx + pushl %ecx // forward:: + pushl (self_stret+4)(%ebp) // receiver + + call _objc_msgSend + + movl %ebp, %esp + popl %ebp + ret $4 // pop struct return address (#2995932) + +LMsgForwardStretError: + // Call __objc_error(receiver, "unknown selector %s %s", "forward::", forwardedSelector) + subl $8, %esp // 16-byte align the stack + pushl (selector_stret+4+4)(%ebp) // the forwarded selector + leal _FwdSel-L__objc_msgForwardStret$pic_base(%edx),%eax + pushl %eax + leal LUnkSelStr-L__objc_msgForwardStret$pic_base(%edx),%eax + pushl %eax + pushl (self_stret+4)(%ebp) + call ___objc_error // never returns + + END_ENTRY __objc_msgForward_stret + + + ENTRY _method_invoke + + movl selector(%esp), %ecx + movl method_name(%ecx), %edx + movl method_imp(%ecx), %eax + movl %edx, selector(%esp) + jmp *%eax + + END_ENTRY _method_invoke + + + ENTRY _method_invoke_stret + + movl selector_stret(%esp), %ecx + movl method_name(%ecx), %edx + movl method_imp(%ecx), %eax + movl %edx, selector_stret(%esp) + jmp *%eax + + END_ENTRY _method_invoke_stret + + +.section __DATA,__objc_msg_break +.long 0 +.long 0 + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-simulator-i386.s b/runtime/Messengers.subproj/objc-msg-simulator-i386.s new file mode 100644 index 0000000..beb7ac5 --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-simulator-i386.s @@ -0,0 +1,1045 @@ +/* + * Copyright (c) 1999-2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#if defined(__i386__) && TARGET_OS_SIMULATOR + +#include "objc-config.h" + +.data + +// _objc_entryPoints and _objc_exitPoints are used by objc +// to get the critical regions for which method caches +// cannot be garbage collected. + +.align 2 +.private_extern _objc_entryPoints +_objc_entryPoints: + .long _cache_getImp + .long _objc_msgSend + .long _objc_msgSend_fpret + .long _objc_msgSend_stret + .long _objc_msgSendSuper + .long _objc_msgSendSuper2 + .long _objc_msgSendSuper_stret + .long _objc_msgSendSuper2_stret + .long _objc_msgLookup + .long _objc_msgLookup_fpret + .long _objc_msgLookup_stret + .long _objc_msgLookupSuper2 + .long _objc_msgLookupSuper2_stret + .long 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .long LExit_cache_getImp + .long LExit_objc_msgSend + .long LExit_objc_msgSend_fpret + .long LExit_objc_msgSend_stret + .long LExit_objc_msgSendSuper + .long LExit_objc_msgSendSuper2 + .long LExit_objc_msgSendSuper_stret + .long LExit_objc_msgSendSuper2_stret + .long LExit_objc_msgLookup + .long LExit_objc_msgLookup_fpret + .long LExit_objc_msgLookup_stret + .long LExit_objc_msgLookupSuper2 + .long LExit_objc_msgLookupSuper2_stret + .long 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .long 4b + .long ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .long 4b + .long FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .long 4b + .long SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .long 4b + .long NIL_EXIT + .text +.endmacro + + +/******************************************************************** + * Names for relative labels + * DO NOT USE THESE LABELS ELSEWHERE + * Reserved labels: 5: 6: 7: 8: 9: + ********************************************************************/ +#define LCacheMiss 5 +#define LCacheMiss_f 5f +#define LCacheMiss_b 5b +#define LNilTestDone 6 +#define LNilTestDone_f 6f +#define LNilTestDone_b 6b +#define LNilTestSlow 7 +#define LNilTestSlow_f 7f +#define LNilTestSlow_b 7b +#define LGetIsaDone 8 +#define LGetIsaDone_f 8f +#define LGetIsaDone_b 8b +#define LGetIsaSlow 9 +#define LGetIsaSlow_f 9f +#define LGetIsaSlow_b 9b + +/******************************************************************** + * Macro parameters + ********************************************************************/ + + +#define NORMAL 0 +#define FPRET 1 +#define STRET 2 + +#define CALL 100 +#define GETIMP 101 +#define LOOKUP 102 + + +/******************************************************************** + * + * Structure definitions. + * + ********************************************************************/ + +// Offsets from %esp +#define self 4 +#define super 4 +#define selector 8 +#define marg_size 12 +#define marg_list 16 +#define first_arg 12 + +#define struct_addr 4 + +#define self_stret 8 +#define super_stret 8 +#define selector_stret 12 +#define marg_size_stret 16 +#define marg_list_stret 20 + +// objc_super parameter to sendSuper +#define receiver 0 +#define class 4 + +// Selected field offsets in class structure +#define isa 0 +#define superclass 4 + +// Method descriptor +#define method_name 0 +#define method_imp 8 + + +////////////////////////////////////////////////////////////////////// +// +// ENTRY functionName +// +// Assembly directives to begin an exported function. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro ENTRY + .text + .globl $0 + .align 2, 0x90 +$0: +.endmacro + +.macro STATIC_ENTRY + .text + .private_extern $0 + .align 4, 0x90 +$0: +.endmacro + +////////////////////////////////////////////////////////////////////// +// +// END_ENTRY functionName +// +// Assembly directives to end an exported function. Just a placeholder, +// a close-parenthesis for ENTRY, until it is needed for something. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro END_ENTRY +LExit$0: +.endmacro + + + /******************************************************************** + * UNWIND name, flags + * Unwind info generation + ********************************************************************/ +.macro UNWIND + .section __LD,__compact_unwind,regular,debug + .long $0 + .set LUnwind$0, LExit$0 - $0 + .long LUnwind$0 + .long $1 + .long 0 /* no personality */ + .long 0 /* no LSDA */ + .text +.endmacro + +#define NoFrame 0x02010000 // no frame, no SP adjustment except return address +#define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves + + +///////////////////////////////////////////////////////////////////// +// +// CacheLookup return-type, caller +// +// Locate the implementation for a selector in a class method cache. +// +// Takes: +// $0 = NORMAL, FPRET, STRET +// $1 = CALL, LOOKUP, GETIMP +// ecx = selector to search for +// edx = class to search +// +// On exit: ecx clobbered +// (found) calls or returns IMP in eax, eq/ne set for forwarding +// (not found) jumps to LCacheMiss, class still in edx +// +///////////////////////////////////////////////////////////////////// + +.macro CacheHit + + // CacheHit must always be preceded by a not-taken `jne` instruction + // in case the imp is _objc_msgForward_impcache. + + // eax = found bucket + +.if $1 == GETIMP + movl 4(%eax), %eax // return imp + ret + +.else + +.if $0 != STRET + // eq already set for forwarding by `jne` +.else + test %eax, %eax // set ne for stret forwarding +.endif + +.if $1 == CALL + MESSENGER_END_FAST + jmp *4(%eax) // call imp + +.elseif $1 == LOOKUP + movl 4(%eax), %eax // return imp + ret + +.else +.abort oops +.endif + +.endif + +.endmacro + + +.macro CacheLookup + + movzwl 12(%edx), %eax // eax = mask + andl %ecx, %eax // eax = SEL & mask + shll $$3, %eax // eax = offset = (SEL & mask) * 8 + addl 8(%edx), %eax // eax = bucket = cache->buckets+offset + cmpl (%eax), %ecx // if (bucket->sel != SEL) + jne 1f // scan more + // The `jne` above sets flags for CacheHit + CacheHit $0, $1 // call or return imp + +1: + // loop + cmpl $$1, (%eax) + jbe 3f // if (bucket->sel <= 1) wrap or miss + + addl $$8, %eax // bucket++ +2: + cmpl (%eax), %ecx // if (bucket->sel != sel) + jne 1b // scan more + // The `jne` above sets flags for CacheHit + CacheHit $0, $1 // call or return imp + +3: + // wrap or miss + jb LCacheMiss_f // if (bucket->sel < 1) cache miss + // wrap + movl 4(%eax), %eax // bucket->imp is really first bucket + jmp 2f + + // Clone scanning loop to miss instead of hang when cache is corrupt. + // The slow path may detect any corruption and halt later. + +1: + // loop + cmpq $$1, (%eax) + jbe 3f // if (bucket->sel <= 1) wrap or miss + + addl $$8, %eax // bucket++ +2: + cmpl (%eax), %ecx // if (bucket->sel != sel) + jne 1b // scan more + // The `jne` above sets flags for CacheHit + CacheHit $0, $1 // call or return imp + +3: + // double wrap or miss + jmp LCacheMiss_f + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// MethodTableLookup NORMAL|STRET +// +// Takes: +// receiver (not struct objc_super) and selector on stack +// edx = class to search +// +// On exit: IMP in eax, eq/ne set for forwarding +// +///////////////////////////////////////////////////////////////////// + +.macro MethodTableLookup + pushl %ebp + movl %esp, %ebp + + subl $$(8+5*16), %esp + +.if $0 == NORMAL + movl self+4(%ebp), %eax + movl selector+4(%ebp), %ecx +.else + movl self_stret+4(%ebp), %eax + movl selector_stret+4(%ebp), %ecx +.endif + + movdqa %xmm3, 4*16(%esp) + movdqa %xmm2, 3*16(%esp) + movdqa %xmm1, 2*16(%esp) + movdqa %xmm0, 1*16(%esp) + + movl %edx, 8(%esp) // class + movl %ecx, 4(%esp) // selector + movl %eax, 0(%esp) // receiver + call __class_lookupMethodAndLoadCache3 + + // imp in eax + + movdqa 4*16(%esp), %xmm3 + movdqa 3*16(%esp), %xmm2 + movdqa 2*16(%esp), %xmm1 + movdqa 1*16(%esp), %xmm0 + +.if $0 == NORMAL + cmp %eax, %eax // set eq for nonstret forwarding +.else + test %eax, %eax // set ne for stret forwarding +.endif + + leave + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// NilTest return-type +// +// Takes: $0 = NORMAL or FPRET or STRET +// eax = receiver +// +// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp), +// or returns zero. +// +// NilTestReturnZero return-type +// +// Takes: $0 = NORMAL or FPRET or STRET +// eax = receiver +// +// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp), +// or returns zero. +// +// NilTestReturnIMP return-type +// +// Takes: $0 = NORMAL or FPRET or STRET +// eax = receiver +// +// On exit: Loads non-nil receiver in eax and self(esp) or self_stret(esp), +// or returns an IMP in eax that returns zero. +// +///////////////////////////////////////////////////////////////////// + +.macro ZeroReturn + xorl %eax, %eax + xorl %edx, %edx + xorps %xmm0, %xmm0 + xorps %xmm1, %xmm1 +.endmacro + +.macro ZeroReturnFPRET + fldz +.endmacro + +.macro ZeroReturnSTRET + // empty +.endmacro + + STATIC_ENTRY __objc_msgNil + ZeroReturn + ret + END_ENTRY __objc_msgNil + + STATIC_ENTRY __objc_msgNil_fpret + ZeroReturnFPRET + ret + END_ENTRY __objc_msgNil_fpret + + STATIC_ENTRY __objc_msgNil_stret + ZeroReturnSTRET + ret $4 + END_ENTRY __objc_msgNil_stret + + +.macro NilTest + testl %eax, %eax + jz LNilTestSlow_f +LNilTestDone: +.endmacro + +.macro NilTestReturnZero + .align 3 +LNilTestSlow: + +.if $0 == NORMAL + ZeroReturn + MESSENGER_END_NIL + ret +.elseif $0 == FPRET + ZeroReturnFPRET + MESSENGER_END_NIL + ret +.elseif $0 == STRET + ZeroReturnSTRET + MESSENGER_END_NIL + ret $$4 +.else +.abort oops +.endif +.endmacro + +.macro NilTestReturnIMP + .align 3 +LNilTestSlow: + + call 1f +1: pop %eax +.if $0 == NORMAL + leal __objc_msgNil-1b(%eax), %eax +.elseif $0 == FPRET + leal __objc_msgNil_fpret-1b(%eax), %eax +.elseif $0 == STRET + leal __objc_msgNil_stret-1b(%eax), %eax +.else +.abort oops +.endif + ret +.endmacro + + +/******************************************************************** + * IMP _cache_getImp(Class cls, SEL sel) + * + * If found, returns method implementation. + * If not found, returns NULL. + ********************************************************************/ + + STATIC_ENTRY _cache_getImp + +// load the class and selector + movl selector(%esp), %ecx + movl self(%esp), %edx + + CacheLookup NORMAL, GETIMP // returns IMP on success + +LCacheMiss: +// cache miss, return nil + xorl %eax, %eax + ret + + END_ENTRY _cache_getImp + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd, ...); + * IMP objc_msgLookup(id self, SEL _cmd, ...); + * + * objc_msgLookup ABI: + * IMP returned in eax + * Forwarding returned in Z flag + * edx reserved for our use but not used + * + ********************************************************************/ + + ENTRY _objc_msgSend + UNWIND _objc_msgSend, NoFrame + MESSENGER_START + + movl selector(%esp), %ecx + movl self(%esp), %eax + + NilTest NORMAL + + movl isa(%eax), %edx // class = self->isa + CacheLookup NORMAL, CALL // calls IMP on success + + NilTestReturnZero NORMAL + +LCacheMiss: + // isa still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgLookup + UNWIND _objc_msgLookup, NoFrame + + movl selector(%esp), %ecx + movl self(%esp), %eax + + NilTest NORMAL + + movl isa(%eax), %edx // class = self->isa + CacheLookup NORMAL, LOOKUP // returns IMP on success + + NilTestReturnIMP NORMAL + +LCacheMiss: + // isa still in edx + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup + + +/******************************************************************** + * + * id objc_msgSendSuper(struct objc_super *super, SEL _cmd, ...); + * IMP objc_msgLookupSuper(struct objc_super *super, SEL _cmd, ...); + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper + UNWIND _objc_msgSendSuper, NoFrame + MESSENGER_START + + movl selector(%esp), %ecx + movl super(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super(%esp) // replace super arg with receiver + CacheLookup NORMAL, CALL // calls IMP on success + +LCacheMiss: + // class still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper + + + + ENTRY _objc_msgLookupSuper + UNWIND _objc_msgLookupSuper, NoFrame + + movl selector(%esp), %ecx + movl super(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super(%esp) // replace super arg with receiver + CacheLookup NORMAL, LOOKUP // returns IMP on success + +LCacheMiss: + // class still in edx + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookupSuper + + +/******************************************************************** + * + * id objc_msgSendSuper2(struct objc_super *super, SEL _cmd, ...); + * IMP objc_msgLookupSuper2(struct objc_super *super, SEL _cmd, ...); + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper2 + UNWIND _objc_msgSendSuper2, NoFrame + MESSENGER_START + + movl selector(%esp), %ecx + movl super(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super(%esp) // replace super arg with receiver + movl superclass(%edx), %edx // edx = objc_super->class->super_class + CacheLookup NORMAL, CALL // calls IMP on success + +LCacheMiss: + // class still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgLookupSuper2 + UNWIND _objc_msgLookupSuper2, NoFrame + + movl selector(%esp), %ecx + movl super(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super(%esp) // replace super arg with receiver + movl superclass(%edx), %edx // edx = objc_super->class->super_class + CacheLookup NORMAL, LOOKUP // returns IMP on success + +LCacheMiss: + // class still in edx + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookupSuper2 + + +/******************************************************************** + * + * double objc_msgSend_fpret(id self, SEL _cmd, ...); + * IMP objc_msgLookup_fpret(id self, SEL _cmd, ...); + * + ********************************************************************/ + + ENTRY _objc_msgSend_fpret + UNWIND _objc_msgSend_fpret, NoFrame + MESSENGER_START + + movl selector(%esp), %ecx + movl self(%esp), %eax + + NilTest FPRET + + movl isa(%eax), %edx // class = self->isa + CacheLookup FPRET, CALL // calls IMP on success + + NilTestReturnZero FPRET + +LCacheMiss: + // class still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend_fpret + + + ENTRY _objc_msgLookup_fpret + UNWIND _objc_msgLookup_fpret, NoFrame + + movl selector(%esp), %ecx + movl self(%esp), %eax + + NilTest FPRET + + movl isa(%eax), %edx // class = self->isa + CacheLookup FPRET, LOOKUP // returns IMP on success + + NilTestReturnIMP FPRET + +LCacheMiss: + // class still in edx + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup_fpret + + +/******************************************************************** + * + * void objc_msgSend_stret(void *st_addr, id self, SEL _cmd, ...); + * IMP objc_msgLookup_stret(void *st_addr, id self, SEL _cmd, ...); + * + ********************************************************************/ + + ENTRY _objc_msgSend_stret + UNWIND _objc_msgSend_stret, NoFrame + MESSENGER_START + + movl selector_stret(%esp), %ecx + movl self_stret(%esp), %eax + + NilTest STRET + + movl isa(%eax), %edx // class = self->isa + CacheLookup STRET, CALL // calls IMP on success + + NilTestReturnZero STRET + +LCacheMiss: + // class still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSend_stret + + + ENTRY _objc_msgLookup_stret + UNWIND _objc_msgLookup_stret, NoFrame + + movl selector_stret(%esp), %ecx + movl self_stret(%esp), %eax + + NilTest STRET + + movl isa(%eax), %edx // class = self->isa + CacheLookup STRET, LOOKUP // returns IMP on success + + NilTestReturnIMP STRET + +LCacheMiss: + // class still in edx + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookup_stret + + +/******************************************************************** + * + * void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * IMP objc_msgLookupSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper_stret + UNWIND _objc_msgSendSuper_stret, NoFrame + MESSENGER_START + + movl selector_stret(%esp), %ecx + movl super_stret(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super_stret(%esp) // replace super arg with receiver + CacheLookup STRET, CALL // calls IMP on success + +LCacheMiss: + // class still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper_stret + + + ENTRY _objc_msgLookupSuper_stret + UNWIND _objc_msgLookupSuper_stret, NoFrame + + movl selector_stret(%esp), %ecx + movl super_stret(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super_stret(%esp) // replace super arg with receiver + CacheLookup STRET, LOOKUP // returns IMP on success + +LCacheMiss: + // class still in edx + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookupSuper_stret + + +/******************************************************************** + * + * void objc_msgSendSuper2_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * IMP objc_msgLookupSuper2_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper2_stret + UNWIND _objc_msgSendSuper2_stret, NoFrame + MESSENGER_START + + movl selector_stret(%esp), %ecx + movl super_stret(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super_stret(%esp) // replace super arg with receiver + mov superclass(%edx), %edx // edx = objc_super->class->super_class + CacheLookup STRET, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in edx + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper2_stret + + + ENTRY _objc_msgLookupSuper2_stret + UNWIND _objc_msgLookupSuper2_stret, NoFrame + + movl selector_stret(%esp), %ecx + movl super_stret(%esp), %eax // struct objc_super + movl class(%eax), %edx // struct objc_super->class + movl receiver(%eax), %eax // struct objc_super->receiver + movl %eax, super_stret(%esp) // replace super arg with receiver + mov superclass(%edx), %edx // edx = objc_super->class->super_class + CacheLookup STRET, LOOKUP // returns IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in edx + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookupSuper2_stret + + +/******************************************************************** + * + * _objc_msgSend_uncached + * _objc_msgSend_stret_uncached + * _objc_msgLookup_uncached + * _objc_msgLookup_stret_uncached + * + * The uncached method lookup. + * + ********************************************************************/ + + STATIC_ENTRY __objc_msgSend_uncached + UNWIND __objc_msgSend_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band edx is the searched class + + // edx is already the class to search + MethodTableLookup NORMAL + jmp *%eax // call imp + + END_ENTRY __objc_msgSend_uncached + + + STATIC_ENTRY __objc_msgSend_stret_uncached + UNWIND __objc_msgSend_stret_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band edx is the searched class + + // edx is already the class to search + MethodTableLookup STRET + jmp *%eax // call imp + + END_ENTRY __objc_msgSend_stret_uncached + + + STATIC_ENTRY __objc_msgLookup_uncached + UNWIND __objc_msgLookup_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band edx is the searched class + + // edx is already the class to search + MethodTableLookup NORMAL // eax = IMP + ret + + END_ENTRY __objc_msgLookup_uncached + + + STATIC_ENTRY __objc_msgLookup_stret_uncached + UNWIND __objc_msgLookup_stret_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band edx is the searched class + + // edx is already the class to search + MethodTableLookup STRET // eax = IMP + ret + + END_ENTRY __objc_msgLookup_stret_uncached + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + .non_lazy_symbol_pointer +L_forward_handler: + .indirect_symbol __objc_forward_handler + .long 0 +L_forward_stret_handler: + .indirect_symbol __objc_forward_stret_handler + .long 0 + + STATIC_ENTRY __objc_msgForward_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band condition register is NE for stret, EQ otherwise. + + MESSENGER_START + nop + MESSENGER_END_SLOW + + jne __objc_msgForward_stret + jmp __objc_msgForward + + END_ENTRY _objc_msgForward_impcache + + + ENTRY __objc_msgForward + // Non-struct return version + + call 1f +1: popl %edx + movl L_forward_handler-1b(%edx), %edx + jmp *(%edx) + + END_ENTRY __objc_msgForward + + + ENTRY __objc_msgForward_stret + // Struct return version + + call 1f +1: popl %edx + movl L_forward_stret_handler-1b(%edx), %edx + jmp *(%edx) + + END_ENTRY __objc_msgForward_stret + + + ENTRY _objc_msgSend_debug + jmp _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + ENTRY _objc_msgSend_stret_debug + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_debug + + ENTRY _objc_msgSendSuper2_stret_debug + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_debug + + ENTRY _objc_msgSend_fpret_debug + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_debug + + + ENTRY _objc_msgSend_noarg + jmp _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + + ENTRY _method_invoke + + movl selector(%esp), %ecx + movl method_name(%ecx), %edx + movl method_imp(%ecx), %eax + movl %edx, selector(%esp) + jmp *%eax + + END_ENTRY _method_invoke + + + ENTRY _method_invoke_stret + + movl selector_stret(%esp), %ecx + movl method_name(%ecx), %edx + movl method_imp(%ecx), %eax + movl %edx, selector_stret(%esp) + jmp *%eax + + END_ENTRY _method_invoke_stret + + +.section __DATA,__objc_msg_break +.long 0 +.long 0 + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-simulator-x86_64.s b/runtime/Messengers.subproj/objc-msg-simulator-x86_64.s new file mode 100644 index 0000000..41b5aaa --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-simulator-x86_64.s @@ -0,0 +1,1215 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#if __x86_64__ && TARGET_OS_SIMULATOR + +/******************************************************************** + ******************************************************************** + ** + ** objc-msg-x86_64.s - x86-64 code to support objc messaging. + ** + ******************************************************************** + ********************************************************************/ + +.data + +// _objc_entryPoints and _objc_exitPoints are used by objc +// to get the critical regions for which method caches +// cannot be garbage collected. + +.align 4 +.private_extern _objc_entryPoints +_objc_entryPoints: + .quad _cache_getImp + .quad _objc_msgSend + .quad _objc_msgSend_fpret + .quad _objc_msgSend_fp2ret + .quad _objc_msgSend_stret + .quad _objc_msgSendSuper + .quad _objc_msgSendSuper_stret + .quad _objc_msgSendSuper2 + .quad _objc_msgSendSuper2_stret + .quad _objc_msgLookup + .quad _objc_msgLookup_fpret + .quad _objc_msgLookup_fp2ret + .quad _objc_msgLookup_stret + .quad _objc_msgLookupSuper2 + .quad _objc_msgLookupSuper2_stret + .quad 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .quad LExit_cache_getImp + .quad LExit_objc_msgSend + .quad LExit_objc_msgSend_fpret + .quad LExit_objc_msgSend_fp2ret + .quad LExit_objc_msgSend_stret + .quad LExit_objc_msgSendSuper + .quad LExit_objc_msgSendSuper_stret + .quad LExit_objc_msgSendSuper2 + .quad LExit_objc_msgSendSuper2_stret + .quad LExit_objc_msgLookup + .quad LExit_objc_msgLookup_fpret + .quad LExit_objc_msgLookup_fp2ret + .quad LExit_objc_msgLookup_stret + .quad LExit_objc_msgLookupSuper2 + .quad LExit_objc_msgLookupSuper2_stret + .quad 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad NIL_EXIT + .text +.endmacro + + +/******************************************************************** + * Recommended multi-byte NOP instructions + * (Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2B) + ********************************************************************/ +#define nop1 .byte 0x90 +#define nop2 .byte 0x66,0x90 +#define nop3 .byte 0x0F,0x1F,0x00 +#define nop4 .byte 0x0F,0x1F,0x40,0x00 +#define nop5 .byte 0x0F,0x1F,0x44,0x00,0x00 +#define nop6 .byte 0x66,0x0F,0x1F,0x44,0x00,0x00 +#define nop7 .byte 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00 +#define nop8 .byte 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00 +#define nop9 .byte 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00 + + +/******************************************************************** + * Names for parameter registers. + ********************************************************************/ + +#define a1 rdi +#define a1d edi +#define a1b dil +#define a2 rsi +#define a2d esi +#define a2b sil +#define a3 rdx +#define a3d edx +#define a4 rcx +#define a4d ecx +#define a5 r8 +#define a5d r8d +#define a6 r9 +#define a6d r9d + + +/******************************************************************** + * Names for relative labels + * DO NOT USE THESE LABELS ELSEWHERE + * Reserved labels: 6: 7: 8: 9: + ********************************************************************/ +#define LCacheMiss 6 +#define LCacheMiss_f 6f +#define LCacheMiss_b 6b +#define LGetIsaDone 7 +#define LGetIsaDone_f 7f +#define LGetIsaDone_b 7b +#define LNilOrTagged 8 +#define LNilOrTagged_f 8f +#define LNilOrTagged_b 8b +#define LNil 9 +#define LNil_f 9f +#define LNil_b 9b + +/******************************************************************** + * Macro parameters + ********************************************************************/ + +#define NORMAL 0 +#define FPRET 1 +#define FP2RET 2 +#define STRET 3 + +#define CALL 100 +#define GETIMP 101 +#define LOOKUP 102 + + +/******************************************************************** + * + * Structure definitions. + * + ********************************************************************/ + +// objc_super parameter to sendSuper +#define receiver 0 +#define class 8 + +// Selected field offsets in class structure +// #define isa 0 USE GetIsa INSTEAD + +// Method descriptor +#define method_name 0 +#define method_imp 16 + + +////////////////////////////////////////////////////////////////////// +// +// ENTRY functionName +// +// Assembly directives to begin an exported function. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro ENTRY + .text + .globl $0 + .align 6, 0x90 +$0: +.endmacro + +.macro STATIC_ENTRY + .text + .private_extern $0 + .align 2, 0x90 +$0: +.endmacro + +////////////////////////////////////////////////////////////////////// +// +// END_ENTRY functionName +// +// Assembly directives to end an exported function. Just a placeholder, +// a close-parenthesis for ENTRY, until it is needed for something. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro END_ENTRY +LExit$0: +.endmacro + + + /******************************************************************** + * UNWIND name, flags + * Unwind info generation + ********************************************************************/ +.macro UNWIND + .section __LD,__compact_unwind,regular,debug + .quad $0 + .set LUnwind$0, LExit$0 - $0 + .long LUnwind$0 + .long $1 + .quad 0 /* no personality */ + .quad 0 /* no LSDA */ + .text +.endmacro + +#define NoFrame 0x02010000 // no frame, no SP adjustment except return address +#define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves + + +///////////////////////////////////////////////////////////////////// +// +// CacheLookup return-type, caller +// +// Locate the implementation for a class in a selector's method cache. +// +// Takes: +// $0 = NORMAL, FPRET, FP2RET, STRET +// $1 = CALL, LOOKUP, GETIMP +// a1 or a2 (STRET) = receiver +// a2 or a3 (STRET) = selector +// r10 = class to search +// +// On exit: r10 clobbered +// (found) calls or returns IMP in r11, eq/ne set for forwarding +// (not found) jumps to LCacheMiss, class still in r10 +// +///////////////////////////////////////////////////////////////////// + +.macro CacheHit + + // CacheHit must always be preceded by a not-taken `jne` instruction + // in order to set the correct flags for _objc_msgForward_impcache. + + // r11 = found bucket + +.if $1 == GETIMP + movq 8(%r11), %rax // return imp + ret + +.else + +.if $0 != STRET + // eq already set for forwarding by `jne` +.else + test %r11, %r11 // set ne for stret forwarding +.endif + +.if $1 == CALL + MESSENGER_END_FAST + jmp *8(%r11) // call imp + +.elseif $1 == LOOKUP + movq 8(%r11), %r11 // return imp + ret + +.else +.abort oops +.endif + +.endif + +.endmacro + + +.macro CacheLookup +.if $0 != STRET + movq %a2, %r11 // r11 = _cmd +.else + movq %a3, %r11 // r11 = _cmd +.endif + andl 24(%r10), %r11d // r11 = _cmd & class->cache.mask + shlq $$4, %r11 // r11 = offset = (_cmd & mask)<<4 + addq 16(%r10), %r11 // r11 = class->cache.buckets + offset + +.if $0 != STRET + cmpq (%r11), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r11), %a3 // if (bucket->sel != _cmd) +.endif + jne 1f // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0, $1 // call or return imp + +1: + // loop + cmpq $$1, (%r11) + jbe 3f // if (bucket->sel <= 1) wrap or miss + + addq $$16, %r11 // bucket++ +2: +.if $0 != STRET + cmpq (%r11), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r11), %a3 // if (bucket->sel != _cmd) +.endif + jne 1b // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0, $1 // call or return imp + +3: + // wrap or miss + jb LCacheMiss_f // if (bucket->sel < 1) cache miss + // wrap + movq 8(%r11), %r11 // bucket->imp is really first bucket + jmp 2f + + // Clone scanning loop to miss instead of hang when cache is corrupt. + // The slow path may detect any corruption and halt later. + +1: + // loop + cmpq $$1, (%r11) + jbe 3f // if (bucket->sel <= 1) wrap or miss + + addq $$16, %r11 // bucket++ +2: +.if $0 != STRET + cmpq (%r11), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r11), %a3 // if (bucket->sel != _cmd) +.endif + jne 1b // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0, $1 // call or return imp + +3: + // double wrap or miss + jmp LCacheMiss_f + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// MethodTableLookup NORMAL|STRET +// +// Takes: a1 or a2 (STRET) = receiver +// a2 or a3 (STRET) = selector to search for +// r10 = class to search +// +// On exit: imp in %r11, eq/ne set for forwarding +// +///////////////////////////////////////////////////////////////////// + +.macro MethodTableLookup + + push %rbp + mov %rsp, %rbp + + sub $$0x80+8, %rsp // +8 for alignment + + movdqa %xmm0, -0x80(%rbp) + push %rax // might be xmm parameter count + movdqa %xmm1, -0x70(%rbp) + push %a1 + movdqa %xmm2, -0x60(%rbp) + push %a2 + movdqa %xmm3, -0x50(%rbp) + push %a3 + movdqa %xmm4, -0x40(%rbp) + push %a4 + movdqa %xmm5, -0x30(%rbp) + push %a5 + movdqa %xmm6, -0x20(%rbp) + push %a6 + movdqa %xmm7, -0x10(%rbp) + + // _class_lookupMethodAndLoadCache3(receiver, selector, class) + +.if $0 == NORMAL + // receiver already in a1 + // selector already in a2 +.else + movq %a2, %a1 + movq %a3, %a2 +.endif + movq %r10, %a3 + call __class_lookupMethodAndLoadCache3 + + // IMP is now in %rax + movq %rax, %r11 + + movdqa -0x80(%rbp), %xmm0 + pop %a6 + movdqa -0x70(%rbp), %xmm1 + pop %a5 + movdqa -0x60(%rbp), %xmm2 + pop %a4 + movdqa -0x50(%rbp), %xmm3 + pop %a3 + movdqa -0x40(%rbp), %xmm4 + pop %a2 + movdqa -0x30(%rbp), %xmm5 + pop %a1 + movdqa -0x20(%rbp), %xmm6 + pop %rax + movdqa -0x10(%rbp), %xmm7 + +.if $0 == NORMAL + cmp %r11, %r11 // set eq for nonstret forwarding +.else + test %r11, %r11 // set ne for stret forwarding +.endif + + leave + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// GetIsaCheckNil return-type +// GetIsaSupport return-type +// NilTestReturnZero return-type +// NilTestReturnIMP return-type +// +// Sets r10 = obj->isa. +// Looks up the real class if receiver is a tagged pointer object. +// Returns zero or a zero-returning IMP if obj is nil. +// +// Takes: $0 = NORMAL or FPRET or FP2RET or STRET +// a1 or a2 (STRET) = receiver +// +// On exit from GetIsaCheckNil: +// r10 = receiver->isa +// r11 is clobbered +// +///////////////////////////////////////////////////////////////////// + +.macro ZeroReturn + xorl %eax, %eax + xorl %edx, %edx + xorps %xmm0, %xmm0 + xorps %xmm1, %xmm1 +.endmacro + +.macro ZeroReturnFPRET + fldz + ZeroReturn +.endmacro + +.macro ZeroReturnFP2RET + fldz + fldz + ZeroReturn +.endmacro + +.macro ZeroReturnSTRET + // rax gets the struct-return address as passed in rdi + movq %rdi, %rax +.endmacro + + STATIC_ENTRY __objc_msgNil + ZeroReturn + ret + END_ENTRY __objc_msgNil + + STATIC_ENTRY __objc_msgNil_fpret + ZeroReturnFPRET + ret + END_ENTRY __objc_msgNil_fpret + + STATIC_ENTRY __objc_msgNil_fp2ret + ZeroReturnFP2RET + ret + END_ENTRY __objc_msgNil_fp2ret + + STATIC_ENTRY __objc_msgNil_stret + ZeroReturnSTRET + ret + END_ENTRY __objc_msgNil_stret + + +.macro GetIsaCheckNil +.if $0 != STRET + testq %a1, %a1 +.else + testq %a2, %a2 +.endif + jle LNilOrTagged_f // MSB tagged pointer looks negative + +.if $0 != STRET + movq (%a1), %r10 // r10 = isa +.else + movq (%a2), %r10 // r10 = isa +.endif + +LGetIsaDone: +.endmacro + + +.macro GetIsaSupport + .align 3 +LNilOrTagged: + jz LNil_f // flags set by GetIsaCheckNil +.if $0 != STRET + movq %a1, %r11 +.else + movq %a2, %r11 +.endif + shrq $$60, %r11 + cmpl $$0xf, %r11d + je 1f + // basic tagged + leaq _objc_debug_taggedpointer_classes(%rip), %r10 + movq (%r10, %r11, 8), %r10 // read isa from table + jmp LGetIsaDone_b +1: + // ext tagged +.if $0 != STRET + movq %a1, %r11 +.else + movq %a2, %r11 +.endif + shrq $$52, %r11 + andl $$0xff, %r11d + leaq _objc_debug_taggedpointer_ext_classes(%rip), %r10 + movq (%r10, %r11, 8), %r10 // read isa from table + jmp LGetIsaDone_b +.endmacro + + +.macro NilTestReturnZero +LNil: +.if $0 == NORMAL + ZeroReturn +.elseif $0 == FPRET + ZeroReturnFPRET +.elseif $0 == FP2RET + ZeroReturnFP2RET +.elseif $0 == STRET + ZeroReturnSTRET +.else +.abort oops +.endif + MESSENGER_END_NIL + ret +.endmacro + + +.macro NilTestReturnIMP +LNil: +.if $0 == NORMAL + leaq __objc_msgNil(%rip), %r11 +.elseif $0 == FPRET + leaq __objc_msgNil_fpret(%rip), %r11 +.elseif $0 == FP2RET + leaq __objc_msgNil_fp2ret(%rip), %r11 +.elseif $0 == STRET + leaq __objc_msgNil_stret(%rip), %r11 +.else +.abort oops +.endif + ret +.endmacro + + +/******************************************************************** + * IMP cache_getImp(Class cls, SEL sel) + * + * On entry: a1 = class whose cache is to be searched + * a2 = selector to search for + * + * If found, returns method implementation. + * If not found, returns NULL. + ********************************************************************/ + + STATIC_ENTRY _cache_getImp + +// do lookup + movq %a1, %r10 // move class to r10 for CacheLookup + CacheLookup NORMAL, GETIMP // returns IMP on success + +LCacheMiss: +// cache miss, return nil + xorl %eax, %eax + ret + + END_ENTRY _cache_getImp + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd,...); + * IMP objc_msgLookup(id self, SEL _cmd, ...); + * + * objc_msgLookup ABI: + * IMP returned in r11 + * Forwarding returned in Z flag + * r10 reserved for our use but not used + * + ********************************************************************/ + + .data + .align 3 + .globl _objc_debug_taggedpointer_classes +_objc_debug_taggedpointer_classes: + .fill 16, 8, 0 + .globl _objc_debug_taggedpointer_ext_classes +_objc_debug_taggedpointer_ext_classes: + .fill 256, 8, 0 + + ENTRY _objc_msgSend + UNWIND _objc_msgSend, NoFrame + MESSENGER_START + + GetIsaCheckNil NORMAL // r10 = self->isa, or return zero + CacheLookup NORMAL, CALL // calls IMP on success + + GetIsaSupport NORMAL + NilTestReturnZero NORMAL + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgLookup + + GetIsaCheckNil NORMAL // r10 = self->isa, or return zero IMP + CacheLookup NORMAL, LOOKUP // returns IMP on success + + GetIsaSupport NORMAL + NilTestReturnIMP NORMAL + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup + + + ENTRY _objc_msgSend_fixup + int3 + END_ENTRY _objc_msgSend_fixup + + + STATIC_ENTRY _objc_msgSend_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend + END_ENTRY _objc_msgSend_fixedup + + +/******************************************************************** + * + * id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + ********************************************************************/ + + ENTRY _objc_msgSendSuper + UNWIND _objc_msgSendSuper, NoFrame + MESSENGER_START + +// search the cache (objc_super in %a1) + movq class(%a1), %r10 // class = objc_super->class + movq receiver(%a1), %a1 // load real receiver + CacheLookup NORMAL, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper + + +/******************************************************************** + * id objc_msgSendSuper2 + ********************************************************************/ + + ENTRY _objc_msgSendSuper2 + UNWIND _objc_msgSendSuper2, NoFrame + MESSENGER_START + + // objc_super->class is superclass of class to search + +// search the cache (objc_super in %a1) + movq class(%a1), %r10 // cls = objc_super->class + movq receiver(%a1), %a1 // load real receiver + movq 8(%r10), %r10 // cls = class->superclass + CacheLookup NORMAL, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgLookupSuper2 + + // objc_super->class is superclass of class to search + +// search the cache (objc_super in %a1) + movq class(%a1), %r10 // cls = objc_super->class + movq receiver(%a1), %a1 // load real receiver + movq 8(%r10), %r10 // cls = class->superclass + CacheLookup NORMAL, LOOKUP // returns IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookupSuper2 + + + ENTRY _objc_msgSendSuper2_fixup + int3 + END_ENTRY _objc_msgSendSuper2_fixup + + + STATIC_ENTRY _objc_msgSendSuper2_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_fixedup + + +/******************************************************************** + * + * double objc_msgSend_fpret(id self, SEL _cmd,...); + * Used for `long double` return only. `float` and `double` use objc_msgSend. + * + ********************************************************************/ + + ENTRY _objc_msgSend_fpret + UNWIND _objc_msgSend_fpret, NoFrame + MESSENGER_START + + GetIsaCheckNil FPRET // r10 = self->isa, or return zero + CacheLookup FPRET, CALL // calls IMP on success + + GetIsaSupport FPRET + NilTestReturnZero FPRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend_fpret + + + ENTRY _objc_msgLookup_fpret + + GetIsaCheckNil FPRET // r10 = self->isa, or return zero IMP + CacheLookup FPRET, LOOKUP // returns IMP on success + + GetIsaSupport FPRET + NilTestReturnIMP FPRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup_fpret + + + ENTRY _objc_msgSend_fpret_fixup + int3 + END_ENTRY _objc_msgSend_fpret_fixup + + + STATIC_ENTRY _objc_msgSend_fpret_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_fixedup + + +/******************************************************************** + * + * double objc_msgSend_fp2ret(id self, SEL _cmd,...); + * Used for `complex long double` return only. + * + ********************************************************************/ + + ENTRY _objc_msgSend_fp2ret + UNWIND _objc_msgSend_fp2ret, NoFrame + MESSENGER_START + + GetIsaCheckNil FP2RET // r10 = self->isa, or return zero + CacheLookup FP2RET, CALL // calls IMP on success + + GetIsaSupport FP2RET + NilTestReturnZero FP2RET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend_fp2ret + + + ENTRY _objc_msgLookup_fp2ret + + GetIsaCheckNil FP2RET // r10 = self->isa, or return zero IMP + CacheLookup FP2RET, LOOKUP // returns IMP on success + + GetIsaSupport FP2RET + NilTestReturnIMP FP2RET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup_fp2ret + + + ENTRY _objc_msgSend_fp2ret_fixup + int3 + END_ENTRY _objc_msgSend_fp2ret_fixup + + + STATIC_ENTRY _objc_msgSend_fp2ret_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend_fp2ret + END_ENTRY _objc_msgSend_fp2ret_fixedup + + +/******************************************************************** + * + * void objc_msgSend_stret(void *st_addr, id self, SEL _cmd, ...); + * + * objc_msgSend_stret is the struct-return form of msgSend. + * The ABI calls for %a1 to be used as the address of the structure + * being returned, with the parameters in the succeeding locations. + * + * On entry: %a1 is the address where the structure is returned, + * %a2 is the message receiver, + * %a3 is the selector + ********************************************************************/ + + ENTRY _objc_msgSend_stret + UNWIND _objc_msgSend_stret, NoFrame + MESSENGER_START + + GetIsaCheckNil STRET // r10 = self->isa, or return zero + CacheLookup STRET, CALL // calls IMP on success + + GetIsaSupport STRET + NilTestReturnZero STRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSend_stret + + + ENTRY _objc_msgLookup_stret + + GetIsaCheckNil STRET // r10 = self->isa, or return zero IMP + CacheLookup STRET, LOOKUP // returns IMP on success + + GetIsaSupport STRET + NilTestReturnIMP STRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookup_stret + + + ENTRY _objc_msgSend_stret_fixup + int3 + END_ENTRY _objc_msgSend_stret_fixup + + + STATIC_ENTRY _objc_msgSend_stret_fixedup + // Load _cmd from the message_ref + movq 8(%a3), %a3 + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_fixedup + + +/******************************************************************** + * + * void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + * + * objc_msgSendSuper_stret is the struct-return form of msgSendSuper. + * The ABI calls for (sp+4) to be used as the address of the structure + * being returned, with the parameters in the succeeding registers. + * + * On entry: %a1 is the address where the structure is returned, + * %a2 is the address of the objc_super structure, + * %a3 is the selector + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper_stret + UNWIND _objc_msgSendSuper_stret, NoFrame + MESSENGER_START + +// search the cache (objc_super in %a2) + movq class(%a2), %r10 // class = objc_super->class + movq receiver(%a2), %a2 // load real receiver + CacheLookup STRET, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper_stret + + +/******************************************************************** + * id objc_msgSendSuper2_stret + ********************************************************************/ + + ENTRY _objc_msgSendSuper2_stret + UNWIND _objc_msgSendSuper2_stret, NoFrame + MESSENGER_START + +// search the cache (objc_super in %a2) + movq class(%a2), %r10 // class = objc_super->class + movq receiver(%a2), %a2 // load real receiver + movq 8(%r10), %r10 // class = class->superclass + CacheLookup STRET, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper2_stret + + + ENTRY _objc_msgLookupSuper2_stret + +// search the cache (objc_super in %a2) + movq class(%a2), %r10 // class = objc_super->class + movq receiver(%a2), %a2 // load real receiver + movq 8(%r10), %r10 // class = class->superclass + CacheLookup STRET, LOOKUP // returns IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookupSuper2_stret + + + ENTRY _objc_msgSendSuper2_stret_fixup + int3 + END_ENTRY _objc_msgSendSuper2_stret_fixup + + + STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup + // Load _cmd from the message_ref + movq 8(%a3), %a3 + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_fixedup + + +/******************************************************************** + * + * _objc_msgSend_uncached + * _objc_msgSend_stret_uncached + * The uncached method lookup. + * + ********************************************************************/ + + STATIC_ENTRY __objc_msgSend_uncached + UNWIND __objc_msgSend_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup NORMAL // r11 = IMP + jmp *%r11 // goto *imp + + END_ENTRY __objc_msgSend_uncached + + + STATIC_ENTRY __objc_msgSend_stret_uncached + UNWIND __objc_msgSend_stret_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup STRET // r11 = IMP + jmp *%r11 // goto *imp + + END_ENTRY __objc_msgSend_stret_uncached + + + STATIC_ENTRY __objc_msgLookup_uncached + UNWIND __objc_msgLookup_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup NORMAL // r11 = IMP + ret + + END_ENTRY __objc_msgLookup_uncached + + + STATIC_ENTRY __objc_msgLookup_stret_uncached + UNWIND __objc_msgLookup_stret_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup STRET // r11 = IMP + ret + + END_ENTRY __objc_msgLookup_stret_uncached + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + STATIC_ENTRY __objc_msgForward_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band condition register is NE for stret, EQ otherwise. + + MESSENGER_START + nop + MESSENGER_END_SLOW + + jne __objc_msgForward_stret + jmp __objc_msgForward + + END_ENTRY __objc_msgForward_impcache + + + ENTRY __objc_msgForward + // Non-stret version + + movq __objc_forward_handler(%rip), %r11 + jmp *%r11 + + END_ENTRY __objc_msgForward + + + ENTRY __objc_msgForward_stret + // Struct-return version + + movq __objc_forward_stret_handler(%rip), %r11 + jmp *%r11 + + END_ENTRY __objc_msgForward_stret + + + ENTRY _objc_msgSend_debug + jmp _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + ENTRY _objc_msgSend_stret_debug + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_debug + + ENTRY _objc_msgSendSuper2_stret_debug + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_debug + + ENTRY _objc_msgSend_fpret_debug + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_debug + + ENTRY _objc_msgSend_fp2ret_debug + jmp _objc_msgSend_fp2ret + END_ENTRY _objc_msgSend_fp2ret_debug + + + ENTRY _objc_msgSend_noarg + jmp _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + + ENTRY _method_invoke + + movq method_imp(%a2), %r11 + movq method_name(%a2), %a2 + jmp *%r11 + + END_ENTRY _method_invoke + + + ENTRY _method_invoke_stret + + movq method_imp(%a3), %r11 + movq method_name(%a3), %a3 + jmp *%r11 + + END_ENTRY _method_invoke_stret + + +.section __DATA,__objc_msg_break +.quad 0 +.quad 0 + +#endif diff --git a/runtime/Messengers.subproj/objc-msg-win32.m b/runtime/Messengers.subproj/objc-msg-win32.m new file mode 100644 index 0000000..8821b0a --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-win32.m @@ -0,0 +1,520 @@ +#include "objc-private.h" + +// out-of-band parameter to objc_msgForward +#define kFwdMsgSend 1 +#define kFwdMsgSendStret 0 + +// objc_msgSend parameters +#define SELF 8[ebp] +#define SUPER 8[ebp] +#define SELECTOR 12[ebp] +#define FIRST_ARG 16[ebp] + +// objc_msgSend_stret parameters +#define STRUCT_ADDR 8[ebp] +#define SELF_STRET 12[ebp] +#define SUPER_STRET 12[ebp] +#define SELECTOR_STRET 16[ebp] + +// objc_super parameter to sendSuper +#define super_receiver 0 +#define super_class 4 + +// struct objc_class fields +#define isa 0 +#define cache 32 + +// struct objc_method fields +#define method_name 0 +#define method_imp 8 + +// struct objc_cache fields +#define mask 0 +#define occupied 4 +#define buckets 8 + +void *_objc_forward_handler = NULL; +void *_objc_forward_stret_handler = NULL; + +__declspec(naked) Method _cache_getMethod(Class cls, SEL sel, IMP objc_msgForward_imp) +{ + __asm { + push ebp + mov ebp, esp + + mov ecx, SELECTOR + mov edx, SELF + +// CacheLookup WORD_RETURN, CACHE_GET + push edi + mov edi, cache[edx] + + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +MISS: + xor eax, eax + pop esi + pop edi + leave + ret + +HIT: + mov ecx, FIRST_ARG + cmp ecx, method_imp[eax] + je MISS + pop esi + pop edi + leave + ret + } +} + +__declspec(naked) IMP _cache_getImp(Class cls, SEL sel) +{ + __asm { + push ebp + mov ebp, esp + + mov ecx, SELECTOR + mov edx, SELF + +// CacheLookup WORD_RETURN, CACHE_GET + push edi + mov edi, cache[edx] + + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +MISS: + pop esi + pop edi + xor eax, eax + leave + ret + +HIT: + pop esi + pop edi + mov eax, method_imp[eax] + leave + ret + } +} + + +OBJC_EXPORT __declspec(naked) id objc_msgSend(id a, SEL b, ...) +{ + __asm { + push ebp + mov ebp, esp + + // load receiver and selector + mov ecx, SELECTOR + mov eax, SELF + + // check whether receiver is nil + test eax, eax + je NIL + + // receiver (in eax) is non-nil: search the cache + mov edx, isa[eax] + + // CacheLookup WORD_RETURN, MSG_SEND + push edi + mov edi, cache[edx] + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +HIT: + mov eax, method_imp[eax] + pop esi + pop edi + mov edx, kFwdMsgSend + leave + jmp eax + + // cache miss: search method lists +MISS: + pop esi + pop edi + mov edx, SELF + mov eax, isa[edx] + + // MethodTableLookup WORD_RETURN, MSG_SEND + push eax + push ecx + push edx + call _class_lookupMethodAndLoadCache3 + + mov edx, kFwdMsgSend + leave + jmp eax + + // message send to nil: return zero +NIL: + // eax is already zero + mov edx, 0 + leave + ret + } +} + + +OBJC_EXPORT __declspec(naked) double objc_msgSend_fpret(id a, SEL b, ...) +{ + __asm { + push ebp + mov ebp, esp + + // load receiver and selector + mov ecx, SELECTOR + mov eax, SELF + + // check whether receiver is nil + test eax, eax + je NIL + + // receiver (in eax) is non-nil: search the cache + mov edx, isa[eax] + + // CacheLookup WORD_RETURN, MSG_SEND + push edi + mov edi, cache[edx] + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +HIT: + mov eax, method_imp[eax] + pop esi + pop edi + mov edx, kFwdMsgSend + leave + jmp eax + + // cache miss: search method lists +MISS: + pop esi + pop edi + mov edx, SELF + mov eax, isa[edx] + + // MethodTableLookup WORD_RETURN, MSG_SEND + push eax + push ecx + push edx + call _class_lookupMethodAndLoadCache3 + + mov edx, kFwdMsgSend + leave + jmp eax + + // message send to nil: return zero +NIL: + fldz + leave + ret + } +} + + +OBJC_EXPORT __declspec(naked) id objc_msgSendSuper(struct objc_super *a, SEL b, ...) +{ + __asm { + push ebp + mov ebp, esp + + // load class and selector + mov eax, SUPER + mov ecx, SELECTOR + mov edx, super_class[eax] + + // search the cache (class in edx) + // CacheLookup WORD_RETURN, MSG_SENDSUPER + push edi + mov edi, cache[edx] + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +HIT: + mov eax, method_imp[eax] + pop esi + pop edi + mov edx, SUPER + mov edx, super_receiver[edx] + mov SUPER, edx + mov edx, kFwdMsgSend + leave + jmp eax + + // cache miss: search method lists +MISS: + + pop esi + pop edi + mov eax, SUPER + mov edx, super_receiver[eax] + mov SUPER, edx + mov eax, super_class[eax] + + // MethodTableLookup WORD_RETURN, MSG_SENDSUPER + push eax + push ecx + push edx + call _class_lookupMethodAndLoadCache3 + + mov edx, kFwdMsgSend + leave + jmp eax + } +} + + +OBJC_EXPORT __declspec(naked) void objc_msgSend_stret(void) +{ + __asm { + push ebp + mov ebp, esp + + // load receiver and selector + mov ecx, SELECTOR_STRET + mov eax, SELF_STRET + + // check whether receiver is nil + test eax, eax + je NIL + + // receiver (in eax) is non-nil: search the cache + mov edx, isa[eax] + + // CacheLookup WORD_RETURN, MSG_SEND + push edi + mov edi, cache[edx] + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +HIT: + mov eax, method_imp[eax] + pop esi + pop edi + mov edx, kFwdMsgSendStret + leave + jmp eax + + // cache miss: search method lists +MISS: + pop esi + pop edi + mov edx, SELF_STRET + mov eax, isa[edx] + + // MethodTableLookup WORD_RETURN, MSG_SEND + push eax + push ecx + push edx + call _class_lookupMethodAndLoadCache3 + + mov edx, kFwdMsgSendStret + leave + jmp eax + + // message send to nil: return zero +NIL: + // eax is already zero + mov edx, 0 + leave + ret + } +} + + +OBJC_EXPORT __declspec(naked) id objc_msgSendSuper_stret(struct objc_super *a, SEL b, ...) +{ + __asm { + push ebp + mov ebp, esp + + // load class and selector + mov eax, SUPER_STRET + mov ecx, SELECTOR_STRET + mov edx, super_class[eax] + + // search the cache (class in edx) + // CacheLookup WORD_RETURN, MSG_SENDSUPER + push edi + mov edi, cache[edx] + push esi + mov esi, mask[edi] + mov edx, ecx + shr edx, 2 +SCAN: + and edx, esi + mov eax, buckets[edi][edx*4] + test eax, eax + je MISS + cmp ecx, method_name[eax] + je HIT + add edx, 1 + jmp SCAN + +HIT: + mov eax, method_imp[eax] + pop esi + pop edi + mov edx, SUPER_STRET + mov edx, super_receiver[edx] + mov SUPER_STRET, edx + mov edx, kFwdMsgSendStret + leave + jmp eax + + // cache miss: search method lists +MISS: + + pop esi + pop edi + mov eax, SUPER_STRET + mov edx, super_receiver[eax] + mov SUPER_STRET, edx + mov eax, super_class[eax] + + // MethodTableLookup WORD_RETURN, MSG_SENDSUPER + push eax + push ecx + push edx + call _class_lookupMethodAndLoadCache3 + + mov edx, kFwdMsgSendStret + leave + jmp eax + } +} + + +OBJC_EXPORT __declspec(naked) id _objc_msgForward(id a, SEL b, ...) +{ + __asm { + mov ecx, _objc_forward_handler + jmp ecx + } +} + +OBJC_EXPORT __declspec(naked) id _objc_msgForward_stret(id a, SEL b, ...) +{ + __asm { + mov ecx, _objc_forward_stret_handler + jmp ecx + } +} + + +__declspec(naked) id _objc_msgForward_cached(id a, SEL b, ...) +{ + __asm { + cmp edx, kFwdMsgSendStret + je STRET + jmp _objc_msgForward +STRET: + jmp _objc_msgForward_stret + } +} + + +OBJC_EXPORT __declspec(naked) void method_invoke(void) +{ + __asm { + push ebp + mov ebp, esp + + mov ecx, SELECTOR + mov edx, method_name[ecx] + mov eax, method_imp[ecx] + mov SELECTOR, edx + + leave + jmp eax + } +} + + +OBJC_EXPORT __declspec(naked) void method_invoke_stret(void) +{ + __asm { + push ebp + mov ebp, esp + + mov ecx, SELECTOR_STRET + mov edx, method_name[ecx] + mov eax, method_imp[ecx] + mov SELECTOR_STRET, edx + + leave + jmp eax + } +} diff --git a/runtime/Messengers.subproj/objc-msg-x86_64.s b/runtime/Messengers.subproj/objc-msg-x86_64.s new file mode 100644 index 0000000..343b300 --- /dev/null +++ b/runtime/Messengers.subproj/objc-msg-x86_64.s @@ -0,0 +1,1329 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#if __x86_64__ && !TARGET_OS_SIMULATOR + +/******************************************************************** + ******************************************************************** + ** + ** objc-msg-x86_64.s - x86-64 code to support objc messaging. + ** + ******************************************************************** + ********************************************************************/ + +.data + +// _objc_entryPoints and _objc_exitPoints are used by objc +// to get the critical regions for which method caches +// cannot be garbage collected. + +.align 4 +.private_extern _objc_entryPoints +_objc_entryPoints: + .quad _cache_getImp + .quad _objc_msgSend + .quad _objc_msgSend_fpret + .quad _objc_msgSend_fp2ret + .quad _objc_msgSend_stret + .quad _objc_msgSendSuper + .quad _objc_msgSendSuper_stret + .quad _objc_msgSendSuper2 + .quad _objc_msgSendSuper2_stret + .quad _objc_msgLookup + .quad _objc_msgLookup_fpret + .quad _objc_msgLookup_fp2ret + .quad _objc_msgLookup_stret + .quad _objc_msgLookupSuper2 + .quad _objc_msgLookupSuper2_stret + .quad 0 + +.private_extern _objc_exitPoints +_objc_exitPoints: + .quad LExit_cache_getImp + .quad LExit_objc_msgSend + .quad LExit_objc_msgSend_fpret + .quad LExit_objc_msgSend_fp2ret + .quad LExit_objc_msgSend_stret + .quad LExit_objc_msgSendSuper + .quad LExit_objc_msgSendSuper_stret + .quad LExit_objc_msgSendSuper2 + .quad LExit_objc_msgSendSuper2_stret + .quad LExit_objc_msgLookup + .quad LExit_objc_msgLookup_fpret + .quad LExit_objc_msgLookup_fp2ret + .quad LExit_objc_msgLookup_stret + .quad LExit_objc_msgLookupSuper2 + .quad LExit_objc_msgLookupSuper2_stret + .quad 0 + + +/******************************************************************** +* List every exit insn from every messenger for debugger use. +* Format: +* ( +* 1 word instruction's address +* 1 word type (ENTER or FAST_EXIT or SLOW_EXIT or NIL_EXIT) +* ) +* 1 word zero +* +* ENTER is the start of a dispatcher +* FAST_EXIT is method dispatch +* SLOW_EXIT is uncached method lookup +* NIL_EXIT is returning zero from a message sent to nil +* These must match objc-gdb.h. +********************************************************************/ + +#define ENTER 1 +#define FAST_EXIT 2 +#define SLOW_EXIT 3 +#define NIL_EXIT 4 + +.section __DATA,__objc_msg_break +.globl _gdb_objc_messenger_breakpoints +_gdb_objc_messenger_breakpoints: +// contents populated by the macros below + +.macro MESSENGER_START +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad ENTER + .text +.endmacro +.macro MESSENGER_END_FAST +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad FAST_EXIT + .text +.endmacro +.macro MESSENGER_END_SLOW +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad SLOW_EXIT + .text +.endmacro +.macro MESSENGER_END_NIL +4: + .section __DATA,__objc_msg_break + .quad 4b + .quad NIL_EXIT + .text +.endmacro + + +/******************************************************************** + * Recommended multi-byte NOP instructions + * (Intel 64 and IA-32 Architectures Software Developer's Manual Volume 2B) + ********************************************************************/ +#define nop1 .byte 0x90 +#define nop2 .byte 0x66,0x90 +#define nop3 .byte 0x0F,0x1F,0x00 +#define nop4 .byte 0x0F,0x1F,0x40,0x00 +#define nop5 .byte 0x0F,0x1F,0x44,0x00,0x00 +#define nop6 .byte 0x66,0x0F,0x1F,0x44,0x00,0x00 +#define nop7 .byte 0x0F,0x1F,0x80,0x00,0x00,0x00,0x00 +#define nop8 .byte 0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00 +#define nop9 .byte 0x66,0x0F,0x1F,0x84,0x00,0x00,0x00,0x00,0x00 + + +/******************************************************************** + * Harmless branch prefix hint for instruction alignment + ********************************************************************/ + +#define PN .byte 0x2e + + +/******************************************************************** + * Names for parameter registers. + ********************************************************************/ + +#define a1 rdi +#define a1d edi +#define a1b dil +#define a2 rsi +#define a2d esi +#define a2b sil +#define a3 rdx +#define a3d edx +#define a4 rcx +#define a4d ecx +#define a5 r8 +#define a5d r8d +#define a6 r9 +#define a6d r9d + + +/******************************************************************** + * Names for relative labels + * DO NOT USE THESE LABELS ELSEWHERE + * Reserved labels: 6: 7: 8: 9: + ********************************************************************/ +#define LCacheMiss 6 +#define LCacheMiss_f 6f +#define LCacheMiss_b 6b +#define LNilTestSlow 7 +#define LNilTestSlow_f 7f +#define LNilTestSlow_b 7b +#define LGetIsaDone 8 +#define LGetIsaDone_f 8f +#define LGetIsaDone_b 8b +#define LGetIsaSlow 9 +#define LGetIsaSlow_f 9f +#define LGetIsaSlow_b 9b + +/******************************************************************** + * Macro parameters + ********************************************************************/ + +#define NORMAL 0 +#define FPRET 1 +#define FP2RET 2 +#define STRET 3 + +#define CALL 100 +#define GETIMP 101 +#define LOOKUP 102 + + +/******************************************************************** + * + * Structure definitions. + * + ********************************************************************/ + +// objc_super parameter to sendSuper +#define receiver 0 +#define class 8 + +// Selected field offsets in class structure +// #define isa 0 USE GetIsa INSTEAD + +// Method descriptor +#define method_name 0 +#define method_imp 16 + + +////////////////////////////////////////////////////////////////////// +// +// ENTRY functionName +// +// Assembly directives to begin an exported function. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro ENTRY + .text + .globl $0 + .align 6, 0x90 +$0: +.endmacro + +.macro STATIC_ENTRY + .text + .private_extern $0 + .align 2, 0x90 +$0: +.endmacro + +////////////////////////////////////////////////////////////////////// +// +// END_ENTRY functionName +// +// Assembly directives to end an exported function. Just a placeholder, +// a close-parenthesis for ENTRY, until it is needed for something. +// +// Takes: functionName - name of the exported function +////////////////////////////////////////////////////////////////////// + +.macro END_ENTRY +LExit$0: +.endmacro + + + /******************************************************************** + * UNWIND name, flags + * Unwind info generation + ********************************************************************/ +.macro UNWIND + .section __LD,__compact_unwind,regular,debug + .quad $0 + .set LUnwind$0, LExit$0 - $0 + .long LUnwind$0 + .long $1 + .quad 0 /* no personality */ + .quad 0 /* no LSDA */ + .text +.endmacro + +#define NoFrame 0x02010000 // no frame, no SP adjustment except return address +#define FrameWithNoSaves 0x01000000 // frame, no non-volatile saves + + +///////////////////////////////////////////////////////////////////// +// +// CacheLookup return-type, caller +// +// Locate the implementation for a class in a selector's method cache. +// +// Takes: +// $0 = NORMAL, FPRET, FP2RET, STRET +// $1 = CALL, LOOKUP, GETIMP +// a1 or a2 (STRET) = receiver +// a2 or a3 (STRET) = selector +// r10 = class to search +// +// On exit: r10 clobbered +// (found) calls or returns IMP in r11, eq/ne set for forwarding +// (not found) jumps to LCacheMiss, class still in r10 +// +///////////////////////////////////////////////////////////////////// + +.macro CacheHit + + // CacheHit must always be preceded by a not-taken `jne` instruction + // in order to set the correct flags for _objc_msgForward_impcache. + + // r11 = found bucket + +.if $1 == GETIMP + movq 8(%r11), %rax // return imp + ret + +.else + +.if $0 != STRET + // eq already set for forwarding by `jne` +.else + test %r11, %r11 // set ne for stret forwarding +.endif + +.if $1 == CALL + MESSENGER_END_FAST + jmp *8(%r11) // call imp + +.elseif $1 == LOOKUP + movq 8(%r11), %r11 // return imp + ret + +.else +.abort oops +.endif + +.endif + +.endmacro + + +.macro CacheLookup +.if $0 != STRET + movq %a2, %r11 // r11 = _cmd +.else + movq %a3, %r11 // r11 = _cmd +.endif + andl 24(%r10), %r11d // r11 = _cmd & class->cache.mask + shlq $$4, %r11 // r11 = offset = (_cmd & mask)<<4 + addq 16(%r10), %r11 // r11 = class->cache.buckets + offset + +.if $0 != STRET + cmpq (%r11), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r11), %a3 // if (bucket->sel != _cmd) +.endif + jne 1f // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0, $1 // call or return imp + +1: + // loop + cmpq $$1, (%r11) + jbe 3f // if (bucket->sel <= 1) wrap or miss + + addq $$16, %r11 // bucket++ +2: +.if $0 != STRET + cmpq (%r11), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r11), %a3 // if (bucket->sel != _cmd) +.endif + jne 1b // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0, $1 // call or return imp + +3: + // wrap or miss + jb LCacheMiss_f // if (bucket->sel < 1) cache miss + // wrap + movq 8(%r11), %r11 // bucket->imp is really first bucket + jmp 2f + + // Clone scanning loop to miss instead of hang when cache is corrupt. + // The slow path may detect any corruption and halt later. + +1: + // loop + cmpq $$1, (%r11) + jbe 3f // if (bucket->sel <= 1) wrap or miss + + addq $$16, %r11 // bucket++ +2: +.if $0 != STRET + cmpq (%r11), %a2 // if (bucket->sel != _cmd) +.else + cmpq (%r11), %a3 // if (bucket->sel != _cmd) +.endif + jne 1b // scan more + // CacheHit must always be preceded by a not-taken `jne` instruction + CacheHit $0, $1 // call or return imp + +3: + // double wrap or miss + jmp LCacheMiss_f + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// MethodTableLookup NORMAL|STRET +// +// Takes: a1 or a2 (STRET) = receiver +// a2 or a3 (STRET) = selector to search for +// r10 = class to search +// +// On exit: imp in %r11, eq/ne set for forwarding +// +///////////////////////////////////////////////////////////////////// + +.macro MethodTableLookup + + push %rbp + mov %rsp, %rbp + + sub $$0x80+8, %rsp // +8 for alignment + + movdqa %xmm0, -0x80(%rbp) + push %rax // might be xmm parameter count + movdqa %xmm1, -0x70(%rbp) + push %a1 + movdqa %xmm2, -0x60(%rbp) + push %a2 + movdqa %xmm3, -0x50(%rbp) + push %a3 + movdqa %xmm4, -0x40(%rbp) + push %a4 + movdqa %xmm5, -0x30(%rbp) + push %a5 + movdqa %xmm6, -0x20(%rbp) + push %a6 + movdqa %xmm7, -0x10(%rbp) + + // _class_lookupMethodAndLoadCache3(receiver, selector, class) + +.if $0 == NORMAL + // receiver already in a1 + // selector already in a2 +.else + movq %a2, %a1 + movq %a3, %a2 +.endif + movq %r10, %a3 + call __class_lookupMethodAndLoadCache3 + + // IMP is now in %rax + movq %rax, %r11 + + movdqa -0x80(%rbp), %xmm0 + pop %a6 + movdqa -0x70(%rbp), %xmm1 + pop %a5 + movdqa -0x60(%rbp), %xmm2 + pop %a4 + movdqa -0x50(%rbp), %xmm3 + pop %a3 + movdqa -0x40(%rbp), %xmm4 + pop %a2 + movdqa -0x30(%rbp), %xmm5 + pop %a1 + movdqa -0x20(%rbp), %xmm6 + pop %rax + movdqa -0x10(%rbp), %xmm7 + +.if $0 == NORMAL + cmp %r11, %r11 // set eq for nonstret forwarding +.else + test %r11, %r11 // set ne for stret forwarding +.endif + + leave + +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// GetIsaFast return-type +// GetIsaSupport return-type +// +// Sets r10 = obj->isa. Consults the tagged isa table if necessary. +// +// Takes: $0 = NORMAL or FPRET or FP2RET or STRET +// a1 or a2 (STRET) = receiver +// +// On exit: r10 = receiver->isa +// r11 is clobbered +// +///////////////////////////////////////////////////////////////////// + +.macro GetIsaFast +.if $0 != STRET + testb $$1, %a1b + PN + jnz LGetIsaSlow_f + movq $$0x00007ffffffffff8, %r10 + andq (%a1), %r10 +.else + testb $$1, %a2b + PN + jnz LGetIsaSlow_f + movq $$0x00007ffffffffff8, %r10 + andq (%a2), %r10 +.endif +LGetIsaDone: +.endmacro + +.macro GetIsaSupport +LGetIsaSlow: +.if $0 != STRET + movl %a1d, %r11d +.else + movl %a2d, %r11d +.endif + andl $$0xF, %r11d + cmp $$0xF, %r11d + je 1f + // basic tagged + leaq _objc_debug_taggedpointer_classes(%rip), %r10 + movq (%r10, %r11, 8), %r10 // read isa from table + jmp LGetIsaDone_b +1: + // extended tagged +.if $0 != STRET + movl %a1d, %r11d +.else + movl %a2d, %r11d +.endif + shrl $$4, %r11d + andl $$0xFF, %r11d + leaq _objc_debug_taggedpointer_ext_classes(%rip), %r10 + movq (%r10, %r11, 8), %r10 // read isa from table + jmp LGetIsaDone_b +.endmacro + + +///////////////////////////////////////////////////////////////////// +// +// NilTest return-type +// +// Takes: $0 = NORMAL or FPRET or FP2RET or STRET +// %a1 or %a2 (STRET) = receiver +// +// On exit: Loads non-nil receiver in %a1 or %a2 (STRET) +// or returns. +// +// NilTestReturnZero return-type +// +// Takes: $0 = NORMAL or FPRET or FP2RET or STRET +// %a1 or %a2 (STRET) = receiver +// +// On exit: Loads non-nil receiver in %a1 or %a2 (STRET) +// or returns zero. +// +// NilTestReturnIMP return-type +// +// Takes: $0 = NORMAL or FPRET or FP2RET or STRET +// %a1 or %a2 (STRET) = receiver +// +// On exit: Loads non-nil receiver in %a1 or %a2 (STRET) +// or returns an IMP in r11 that returns zero. +// +///////////////////////////////////////////////////////////////////// + +.macro ZeroReturn + xorl %eax, %eax + xorl %edx, %edx + xorps %xmm0, %xmm0 + xorps %xmm1, %xmm1 +.endmacro + +.macro ZeroReturnFPRET + fldz + ZeroReturn +.endmacro + +.macro ZeroReturnFP2RET + fldz + fldz + ZeroReturn +.endmacro + +.macro ZeroReturnSTRET + // rax gets the struct-return address as passed in rdi + movq %rdi, %rax +.endmacro + + STATIC_ENTRY __objc_msgNil + ZeroReturn + ret + END_ENTRY __objc_msgNil + + STATIC_ENTRY __objc_msgNil_fpret + ZeroReturnFPRET + ret + END_ENTRY __objc_msgNil_fpret + + STATIC_ENTRY __objc_msgNil_fp2ret + ZeroReturnFP2RET + ret + END_ENTRY __objc_msgNil_fp2ret + + STATIC_ENTRY __objc_msgNil_stret + ZeroReturnSTRET + ret + END_ENTRY __objc_msgNil_stret + + +.macro NilTest +.if $0 != STRET + testq %a1, %a1 +.else + testq %a2, %a2 +.endif + PN + jz LNilTestSlow_f +.endmacro + + +.macro NilTestReturnZero + .align 3 +LNilTestSlow: + +.if $0 == NORMAL + ZeroReturn +.elseif $0 == FPRET + ZeroReturnFPRET +.elseif $0 == FP2RET + ZeroReturnFP2RET +.elseif $0 == STRET + ZeroReturnSTRET +.else +.abort oops +.endif + MESSENGER_END_NIL + ret +.endmacro + + +.macro NilTestReturnIMP + .align 3 +LNilTestSlow: + +.if $0 == NORMAL + leaq __objc_msgNil(%rip), %r11 +.elseif $0 == FPRET + leaq __objc_msgNil_fpret(%rip), %r11 +.elseif $0 == FP2RET + leaq __objc_msgNil_fp2ret(%rip), %r11 +.elseif $0 == STRET + leaq __objc_msgNil_stret(%rip), %r11 +.else +.abort oops +.endif + ret +.endmacro + + +/******************************************************************** + * IMP cache_getImp(Class cls, SEL sel) + * + * On entry: a1 = class whose cache is to be searched + * a2 = selector to search for + * + * If found, returns method implementation. + * If not found, returns NULL. + ********************************************************************/ + + STATIC_ENTRY _cache_getImp + +// do lookup + movq %a1, %r10 // move class to r10 for CacheLookup + CacheLookup NORMAL, GETIMP // returns IMP on success + +LCacheMiss: +// cache miss, return nil + xorl %eax, %eax + ret + + END_ENTRY _cache_getImp + + +/******************************************************************** + * + * id objc_msgSend(id self, SEL _cmd,...); + * IMP objc_msgLookup(id self, SEL _cmd, ...); + * + * objc_msgLookup ABI: + * IMP returned in r11 + * Forwarding returned in Z flag + * r10 reserved for our use but not used + * + ********************************************************************/ + + .data + .align 3 + .globl _objc_debug_taggedpointer_classes +_objc_debug_taggedpointer_classes: + .fill 16, 8, 0 + .globl _objc_debug_taggedpointer_ext_classes +_objc_debug_taggedpointer_ext_classes: + .fill 256, 8, 0 + + ENTRY _objc_msgSend + UNWIND _objc_msgSend, NoFrame + MESSENGER_START + + NilTest NORMAL + + GetIsaFast NORMAL // r10 = self->isa + CacheLookup NORMAL, CALL // calls IMP on success + + NilTestReturnZero NORMAL + + GetIsaSupport NORMAL + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend + + + ENTRY _objc_msgLookup + + NilTest NORMAL + + GetIsaFast NORMAL // r10 = self->isa + CacheLookup NORMAL, LOOKUP // returns IMP on success + + NilTestReturnIMP NORMAL + + GetIsaSupport NORMAL + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup + + + ENTRY _objc_msgSend_fixup + int3 + END_ENTRY _objc_msgSend_fixup + + + STATIC_ENTRY _objc_msgSend_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend + END_ENTRY _objc_msgSend_fixedup + + +/******************************************************************** + * + * id objc_msgSendSuper(struct objc_super *super, SEL _cmd,...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + ********************************************************************/ + + ENTRY _objc_msgSendSuper + UNWIND _objc_msgSendSuper, NoFrame + MESSENGER_START + +// search the cache (objc_super in %a1) + movq class(%a1), %r10 // class = objc_super->class + movq receiver(%a1), %a1 // load real receiver + CacheLookup NORMAL, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper + + +/******************************************************************** + * id objc_msgSendSuper2 + ********************************************************************/ + + ENTRY _objc_msgSendSuper2 + UNWIND _objc_msgSendSuper2, NoFrame + MESSENGER_START + + // objc_super->class is superclass of class to search + +// search the cache (objc_super in %a1) + movq class(%a1), %r10 // cls = objc_super->class + movq receiver(%a1), %a1 // load real receiver + movq 8(%r10), %r10 // cls = class->superclass + CacheLookup NORMAL, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSendSuper2 + + + ENTRY _objc_msgLookupSuper2 + + // objc_super->class is superclass of class to search + +// search the cache (objc_super in %a1) + movq class(%a1), %r10 // cls = objc_super->class + movq receiver(%a1), %a1 // load real receiver + movq 8(%r10), %r10 // cls = class->superclass + CacheLookup NORMAL, LOOKUP // returns IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookupSuper2 + + + ENTRY _objc_msgSendSuper2_fixup + int3 + END_ENTRY _objc_msgSendSuper2_fixup + + + STATIC_ENTRY _objc_msgSendSuper2_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_fixedup + + +/******************************************************************** + * + * double objc_msgSend_fpret(id self, SEL _cmd,...); + * Used for `long double` return only. `float` and `double` use objc_msgSend. + * + ********************************************************************/ + + ENTRY _objc_msgSend_fpret + UNWIND _objc_msgSend_fpret, NoFrame + MESSENGER_START + + NilTest FPRET + + GetIsaFast FPRET // r10 = self->isa + CacheLookup FPRET, CALL // calls IMP on success + + NilTestReturnZero FPRET + + GetIsaSupport FPRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend_fpret + + + ENTRY _objc_msgLookup_fpret + + NilTest FPRET + + GetIsaFast FPRET // r10 = self->isa + CacheLookup FPRET, LOOKUP // returns IMP on success + + NilTestReturnIMP FPRET + + GetIsaSupport FPRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup_fpret + + + ENTRY _objc_msgSend_fpret_fixup + int3 + END_ENTRY _objc_msgSend_fpret_fixup + + + STATIC_ENTRY _objc_msgSend_fpret_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_fixedup + + +/******************************************************************** + * + * double objc_msgSend_fp2ret(id self, SEL _cmd,...); + * Used for `complex long double` return only. + * + ********************************************************************/ + + ENTRY _objc_msgSend_fp2ret + UNWIND _objc_msgSend_fp2ret, NoFrame + MESSENGER_START + + NilTest FP2RET + + GetIsaFast FP2RET // r10 = self->isa + CacheLookup FP2RET, CALL // calls IMP on success + + NilTestReturnZero FP2RET + + GetIsaSupport FP2RET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_uncached + + END_ENTRY _objc_msgSend_fp2ret + + + ENTRY _objc_msgLookup_fp2ret + + NilTest FP2RET + + GetIsaFast FP2RET // r10 = self->isa + CacheLookup FP2RET, LOOKUP // returns IMP on success + + NilTestReturnIMP FP2RET + + GetIsaSupport FP2RET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_uncached + + END_ENTRY _objc_msgLookup_fp2ret + + + ENTRY _objc_msgSend_fp2ret_fixup + int3 + END_ENTRY _objc_msgSend_fp2ret_fixup + + + STATIC_ENTRY _objc_msgSend_fp2ret_fixedup + // Load _cmd from the message_ref + movq 8(%a2), %a2 + jmp _objc_msgSend_fp2ret + END_ENTRY _objc_msgSend_fp2ret_fixedup + + +/******************************************************************** + * + * void objc_msgSend_stret(void *st_addr, id self, SEL _cmd, ...); + * + * objc_msgSend_stret is the struct-return form of msgSend. + * The ABI calls for %a1 to be used as the address of the structure + * being returned, with the parameters in the succeeding locations. + * + * On entry: %a1 is the address where the structure is returned, + * %a2 is the message receiver, + * %a3 is the selector + ********************************************************************/ + + ENTRY _objc_msgSend_stret + UNWIND _objc_msgSend_stret, NoFrame + MESSENGER_START + + NilTest STRET + + GetIsaFast STRET // r10 = self->isa + CacheLookup STRET, CALL // calls IMP on success + + NilTestReturnZero STRET + + GetIsaSupport STRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSend_stret + + + ENTRY _objc_msgLookup_stret + + NilTest STRET + + GetIsaFast STRET // r10 = self->isa + CacheLookup STRET, LOOKUP // returns IMP on success + + NilTestReturnIMP STRET + + GetIsaSupport STRET + +// cache miss: go search the method lists +LCacheMiss: + // isa still in r10 + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookup_stret + + + ENTRY _objc_msgSend_stret_fixup + int3 + END_ENTRY _objc_msgSend_stret_fixup + + + STATIC_ENTRY _objc_msgSend_stret_fixedup + // Load _cmd from the message_ref + movq 8(%a3), %a3 + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_fixedup + + +/******************************************************************** + * + * void objc_msgSendSuper_stret(void *st_addr, struct objc_super *super, SEL _cmd, ...); + * + * struct objc_super { + * id receiver; + * Class class; + * }; + * + * objc_msgSendSuper_stret is the struct-return form of msgSendSuper. + * The ABI calls for (sp+4) to be used as the address of the structure + * being returned, with the parameters in the succeeding registers. + * + * On entry: %a1 is the address where the structure is returned, + * %a2 is the address of the objc_super structure, + * %a3 is the selector + * + ********************************************************************/ + + ENTRY _objc_msgSendSuper_stret + UNWIND _objc_msgSendSuper_stret, NoFrame + MESSENGER_START + +// search the cache (objc_super in %a2) + movq class(%a2), %r10 // class = objc_super->class + movq receiver(%a2), %a2 // load real receiver + CacheLookup STRET, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // class still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper_stret + + +/******************************************************************** + * id objc_msgSendSuper2_stret + ********************************************************************/ + + ENTRY _objc_msgSendSuper2_stret + UNWIND _objc_msgSendSuper2_stret, NoFrame + MESSENGER_START + +// search the cache (objc_super in %a2) + movq class(%a2), %r10 // class = objc_super->class + movq receiver(%a2), %a2 // load real receiver + movq 8(%r10), %r10 // class = class->superclass + CacheLookup STRET, CALL // calls IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + MESSENGER_END_SLOW + jmp __objc_msgSend_stret_uncached + + END_ENTRY _objc_msgSendSuper2_stret + + + ENTRY _objc_msgLookupSuper2_stret + +// search the cache (objc_super in %a2) + movq class(%a2), %r10 // class = objc_super->class + movq receiver(%a2), %a2 // load real receiver + movq 8(%r10), %r10 // class = class->superclass + CacheLookup STRET, LOOKUP // returns IMP on success + +// cache miss: go search the method lists +LCacheMiss: + // superclass still in r10 + jmp __objc_msgLookup_stret_uncached + + END_ENTRY _objc_msgLookupSuper2_stret + + + ENTRY _objc_msgSendSuper2_stret_fixup + int3 + END_ENTRY _objc_msgSendSuper2_stret_fixup + + + STATIC_ENTRY _objc_msgSendSuper2_stret_fixedup + // Load _cmd from the message_ref + movq 8(%a3), %a3 + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_fixedup + + +/******************************************************************** + * + * _objc_msgSend_uncached + * _objc_msgSend_stret_uncached + * _objc_msgLookup_uncached + * _objc_msgLookup_stret_uncached + * + * The uncached method lookup. + * + ********************************************************************/ + + STATIC_ENTRY __objc_msgSend_uncached + UNWIND __objc_msgSend_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup NORMAL // r11 = IMP + jmp *%r11 // goto *imp + + END_ENTRY __objc_msgSend_uncached + + + STATIC_ENTRY __objc_msgSend_stret_uncached + UNWIND __objc_msgSend_stret_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup STRET // r11 = IMP + jmp *%r11 // goto *imp + + END_ENTRY __objc_msgSend_stret_uncached + + + STATIC_ENTRY __objc_msgLookup_uncached + UNWIND __objc_msgLookup_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup NORMAL // r11 = IMP + ret + + END_ENTRY __objc_msgLookup_uncached + + + STATIC_ENTRY __objc_msgLookup_stret_uncached + UNWIND __objc_msgLookup_stret_uncached, FrameWithNoSaves + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band r10 is the searched class + + // r10 is already the class to search + MethodTableLookup STRET // r11 = IMP + ret + + END_ENTRY __objc_msgLookup_stret_uncached + + +/******************************************************************** +* +* id _objc_msgForward(id self, SEL _cmd,...); +* +* _objc_msgForward and _objc_msgForward_stret are the externally-callable +* functions returned by things like method_getImplementation(). +* _objc_msgForward_impcache is the function pointer actually stored in +* method caches. +* +********************************************************************/ + + STATIC_ENTRY __objc_msgForward_impcache + // Method cache version + + // THIS IS NOT A CALLABLE C FUNCTION + // Out-of-band condition register is NE for stret, EQ otherwise. + + MESSENGER_START + nop + MESSENGER_END_SLOW + + jne __objc_msgForward_stret + jmp __objc_msgForward + + END_ENTRY __objc_msgForward_impcache + + + ENTRY __objc_msgForward + // Non-stret version + + movq __objc_forward_handler(%rip), %r11 + jmp *%r11 + + END_ENTRY __objc_msgForward + + + ENTRY __objc_msgForward_stret + // Struct-return version + + movq __objc_forward_stret_handler(%rip), %r11 + jmp *%r11 + + END_ENTRY __objc_msgForward_stret + + + ENTRY _objc_msgSend_debug + jmp _objc_msgSend + END_ENTRY _objc_msgSend_debug + + ENTRY _objc_msgSendSuper2_debug + jmp _objc_msgSendSuper2 + END_ENTRY _objc_msgSendSuper2_debug + + ENTRY _objc_msgSend_stret_debug + jmp _objc_msgSend_stret + END_ENTRY _objc_msgSend_stret_debug + + ENTRY _objc_msgSendSuper2_stret_debug + jmp _objc_msgSendSuper2_stret + END_ENTRY _objc_msgSendSuper2_stret_debug + + ENTRY _objc_msgSend_fpret_debug + jmp _objc_msgSend_fpret + END_ENTRY _objc_msgSend_fpret_debug + + ENTRY _objc_msgSend_fp2ret_debug + jmp _objc_msgSend_fp2ret + END_ENTRY _objc_msgSend_fp2ret_debug + + + ENTRY _objc_msgSend_noarg + jmp _objc_msgSend + END_ENTRY _objc_msgSend_noarg + + + ENTRY _method_invoke + + movq method_imp(%a2), %r11 + movq method_name(%a2), %a2 + jmp *%r11 + + END_ENTRY _method_invoke + + + ENTRY _method_invoke_stret + + movq method_imp(%a3), %r11 + movq method_name(%a3), %a3 + jmp *%r11 + + END_ENTRY _method_invoke_stret + + +.section __DATA,__objc_msg_break +.quad 0 +.quad 0 + + + // Workaround for Skype evil (rdar://19715989) + + .text + .align 4 + .private_extern _map_images + .private_extern _map_2_images + .private_extern _hax +_hax: + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop +_map_images: + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + nop + jmp _map_2_images + +#endif diff --git a/runtime/NSObjCRuntime.h b/runtime/NSObjCRuntime.h new file mode 100644 index 0000000..d111e0e --- /dev/null +++ b/runtime/NSObjCRuntime.h @@ -0,0 +1,33 @@ +/* NSObjCRuntime.h + Copyright (c) 1994-2012, Apple Inc. All rights reserved. +*/ + +#ifndef _OBJC_NSOBJCRUNTIME_H_ +#define _OBJC_NSOBJCRUNTIME_H_ + +#include +#include + +#if __LP64__ || (TARGET_OS_EMBEDDED && !TARGET_OS_IPHONE) || TARGET_OS_WIN32 || NS_BUILD_32_LIKE_64 +typedef long NSInteger; +typedef unsigned long NSUInteger; +#else +typedef int NSInteger; +typedef unsigned int NSUInteger; +#endif + +#define NSIntegerMax LONG_MAX +#define NSIntegerMin LONG_MIN +#define NSUIntegerMax ULONG_MAX + +#define NSINTEGER_DEFINED 1 + +#ifndef NS_DESIGNATED_INITIALIZER +#if __has_attribute(objc_designated_initializer) +#define NS_DESIGNATED_INITIALIZER __attribute__((objc_designated_initializer)) +#else +#define NS_DESIGNATED_INITIALIZER +#endif +#endif + +#endif diff --git a/runtime/NSObject.h b/runtime/NSObject.h new file mode 100644 index 0000000..f42b446 --- /dev/null +++ b/runtime/NSObject.h @@ -0,0 +1,109 @@ +/* NSObject.h + Copyright (c) 1994-2012, Apple Inc. All rights reserved. +*/ + +#ifndef _OBJC_NSOBJECT_H_ +#define _OBJC_NSOBJECT_H_ + +#if __OBJC__ + +#include +#include + +@class NSString, NSMethodSignature, NSInvocation; + +@protocol NSObject + +- (BOOL)isEqual:(id)object; +@property (readonly) NSUInteger hash; + +@property (readonly) Class superclass; +- (Class)class OBJC_SWIFT_UNAVAILABLE("use 'anObject.dynamicType' instead"); +- (instancetype)self; + +- (id)performSelector:(SEL)aSelector; +- (id)performSelector:(SEL)aSelector withObject:(id)object; +- (id)performSelector:(SEL)aSelector withObject:(id)object1 withObject:(id)object2; + +- (BOOL)isProxy; + +- (BOOL)isKindOfClass:(Class)aClass; +- (BOOL)isMemberOfClass:(Class)aClass; +- (BOOL)conformsToProtocol:(Protocol *)aProtocol; + +- (BOOL)respondsToSelector:(SEL)aSelector; + +- (instancetype)retain OBJC_ARC_UNAVAILABLE; +- (oneway void)release OBJC_ARC_UNAVAILABLE; +- (instancetype)autorelease OBJC_ARC_UNAVAILABLE; +- (NSUInteger)retainCount OBJC_ARC_UNAVAILABLE; + +- (struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; + +@property (readonly, copy) NSString *description; +@optional +@property (readonly, copy) NSString *debugDescription; + +@end + + +OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) +OBJC_ROOT_CLASS +OBJC_EXPORT +@interface NSObject { + Class isa OBJC_ISA_AVAILABILITY; +} + ++ (void)load; + ++ (void)initialize; +- (instancetype)init +#if NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER + NS_DESIGNATED_INITIALIZER +#endif + ; + ++ (instancetype)new OBJC_SWIFT_UNAVAILABLE("use object initializers instead"); ++ (instancetype)allocWithZone:(struct _NSZone *)zone OBJC_SWIFT_UNAVAILABLE("use object initializers instead"); ++ (instancetype)alloc OBJC_SWIFT_UNAVAILABLE("use object initializers instead"); +- (void)dealloc OBJC_SWIFT_UNAVAILABLE("use 'deinit' to define a de-initializer"); + +- (void)finalize OBJC_DEPRECATED("Objective-C garbage collection is no longer supported"); + +- (id)copy; +- (id)mutableCopy; + ++ (id)copyWithZone:(struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; ++ (id)mutableCopyWithZone:(struct _NSZone *)zone OBJC_ARC_UNAVAILABLE; + ++ (BOOL)instancesRespondToSelector:(SEL)aSelector; ++ (BOOL)conformsToProtocol:(Protocol *)protocol; +- (IMP)methodForSelector:(SEL)aSelector; ++ (IMP)instanceMethodForSelector:(SEL)aSelector; +- (void)doesNotRecognizeSelector:(SEL)aSelector; + +- (id)forwardingTargetForSelector:(SEL)aSelector OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +- (void)forwardInvocation:(NSInvocation *)anInvocation OBJC_SWIFT_UNAVAILABLE(""); +- (NSMethodSignature *)methodSignatureForSelector:(SEL)aSelector OBJC_SWIFT_UNAVAILABLE(""); + ++ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)aSelector OBJC_SWIFT_UNAVAILABLE(""); + +- (BOOL)allowsWeakReference UNAVAILABLE_ATTRIBUTE; +- (BOOL)retainWeakReference UNAVAILABLE_ATTRIBUTE; + ++ (BOOL)isSubclassOfClass:(Class)aClass; + ++ (BOOL)resolveClassMethod:(SEL)sel OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); ++ (BOOL)resolveInstanceMethod:(SEL)sel OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + ++ (NSUInteger)hash; ++ (Class)superclass; ++ (Class)class OBJC_SWIFT_UNAVAILABLE("use 'aClass.self' instead"); ++ (NSString *)description; ++ (NSString *)debugDescription; + +@end + +#endif + +#endif diff --git a/runtime/NSObject.mm b/runtime/NSObject.mm new file mode 100644 index 0000000..c7e7f43 --- /dev/null +++ b/runtime/NSObject.mm @@ -0,0 +1,2352 @@ +/* + * Copyright (c) 2010-2012 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "objc-private.h" +#include "NSObject.h" + +#include "objc-weak.h" +#include "llvm-DenseMap.h" +#include "NSObject.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +@interface NSInvocation +- (SEL)selector; +@end + + +#if TARGET_OS_MAC + +// NSObject used to be in Foundation/CoreFoundation. + +#define SYMBOL_ELSEWHERE_IN_3(sym, vers, n) \ + OBJC_EXPORT const char elsewhere_ ##n __asm__("$ld$hide$os" #vers "$" #sym); const char elsewhere_ ##n = 0 +#define SYMBOL_ELSEWHERE_IN_2(sym, vers, n) \ + SYMBOL_ELSEWHERE_IN_3(sym, vers, n) +#define SYMBOL_ELSEWHERE_IN(sym, vers) \ + SYMBOL_ELSEWHERE_IN_2(sym, vers, __COUNTER__) + +#if __OBJC2__ +# define NSOBJECT_ELSEWHERE_IN(vers) \ + SYMBOL_ELSEWHERE_IN(_OBJC_CLASS_$_NSObject, vers); \ + SYMBOL_ELSEWHERE_IN(_OBJC_METACLASS_$_NSObject, vers); \ + SYMBOL_ELSEWHERE_IN(_OBJC_IVAR_$_NSObject.isa, vers) +#else +# define NSOBJECT_ELSEWHERE_IN(vers) \ + SYMBOL_ELSEWHERE_IN(.objc_class_name_NSObject, vers) +#endif + +#if TARGET_OS_IOS + NSOBJECT_ELSEWHERE_IN(5.1); + NSOBJECT_ELSEWHERE_IN(5.0); + NSOBJECT_ELSEWHERE_IN(4.3); + NSOBJECT_ELSEWHERE_IN(4.2); + NSOBJECT_ELSEWHERE_IN(4.1); + NSOBJECT_ELSEWHERE_IN(4.0); + NSOBJECT_ELSEWHERE_IN(3.2); + NSOBJECT_ELSEWHERE_IN(3.1); + NSOBJECT_ELSEWHERE_IN(3.0); + NSOBJECT_ELSEWHERE_IN(2.2); + NSOBJECT_ELSEWHERE_IN(2.1); + NSOBJECT_ELSEWHERE_IN(2.0); +#elif TARGET_OS_OSX + NSOBJECT_ELSEWHERE_IN(10.7); + NSOBJECT_ELSEWHERE_IN(10.6); + NSOBJECT_ELSEWHERE_IN(10.5); + NSOBJECT_ELSEWHERE_IN(10.4); + NSOBJECT_ELSEWHERE_IN(10.3); + NSOBJECT_ELSEWHERE_IN(10.2); + NSOBJECT_ELSEWHERE_IN(10.1); + NSOBJECT_ELSEWHERE_IN(10.0); +#else + // NSObject has always been in libobjc on these platforms. +#endif + +// TARGET_OS_MAC +#endif + + +/*********************************************************************** +* Weak ivar support +**********************************************************************/ + +static id defaultBadAllocHandler(Class cls) +{ + _objc_fatal("attempt to allocate object of class '%s' failed", + cls->nameForLogging()); +} + +static id(*badAllocHandler)(Class) = &defaultBadAllocHandler; + +static id callBadAllocHandler(Class cls) +{ + // fixme add re-entrancy protection in case allocation fails inside handler + return (*badAllocHandler)(cls); +} + +void _objc_setBadAllocHandler(id(*newHandler)(Class)) +{ + badAllocHandler = newHandler; +} + + +namespace { + +// The order of these bits is important. +#define SIDE_TABLE_WEAKLY_REFERENCED (1UL<<0) +#define SIDE_TABLE_DEALLOCATING (1UL<<1) // MSB-ward of weak bit +#define SIDE_TABLE_RC_ONE (1UL<<2) // MSB-ward of deallocating bit +#define SIDE_TABLE_RC_PINNED (1UL<<(WORD_BITS-1)) + +#define SIDE_TABLE_RC_SHIFT 2 +#define SIDE_TABLE_FLAG_MASK (SIDE_TABLE_RC_ONE-1) + +// RefcountMap disguises its pointers because we +// don't want the table to act as a root for `leaks`. +typedef objc::DenseMap,size_t,true> RefcountMap; + +struct SideTable { + spinlock_t slock; + RefcountMap refcnts; + weak_table_t weak_table; + + SideTable() { + memset(&weak_table, 0, sizeof(weak_table)); + } + + ~SideTable() { + _objc_fatal("Do not delete SideTable."); + } + + void lock() { slock.lock(); } + void unlock() { slock.unlock(); } + + // Address-ordered lock discipline for a pair of side tables. + + template + static void lockTwo(SideTable *lock1, SideTable *lock2); + template + static void unlockTwo(SideTable *lock1, SideTable *lock2); +}; + + +template<> +void SideTable::lockTwo(SideTable *lock1, SideTable *lock2) { + spinlock_t::lockTwo(&lock1->slock, &lock2->slock); +} + +template<> +void SideTable::lockTwo(SideTable *lock1, SideTable *) { + lock1->lock(); +} + +template<> +void SideTable::lockTwo(SideTable *, SideTable *lock2) { + lock2->lock(); +} + +template<> +void SideTable::unlockTwo(SideTable *lock1, SideTable *lock2) { + spinlock_t::unlockTwo(&lock1->slock, &lock2->slock); +} + +template<> +void SideTable::unlockTwo(SideTable *lock1, SideTable *) { + lock1->unlock(); +} + +template<> +void SideTable::unlockTwo(SideTable *, SideTable *lock2) { + lock2->unlock(); +} + + + +// We cannot use a C++ static initializer to initialize SideTables because +// libc calls us before our C++ initializers run. We also don't want a global +// pointer to this struct because of the extra indirection. +// Do it the hard way. +alignas(StripedMap) static uint8_t + SideTableBuf[sizeof(StripedMap)]; + +static void SideTableInit() { + new (SideTableBuf) StripedMap(); +} + +static StripedMap& SideTables() { + return *reinterpret_cast*>(SideTableBuf); +} + +// anonymous namespace +}; + + +// +// The -fobjc-arc flag causes the compiler to issue calls to objc_{retain/release/autorelease/retain_block} +// + +id objc_retainBlock(id x) { + return (id)_Block_copy(x); +} + +// +// The following SHOULD be called by the compiler directly, but the request hasn't been made yet :-) +// + +BOOL objc_should_deallocate(id object) { + return YES; +} + +id +objc_retain_autorelease(id obj) +{ + return objc_autorelease(objc_retain(obj)); +} + + +void +objc_storeStrong(id *location, id obj) +{ + id prev = *location; + if (obj == prev) { + return; + } + objc_retain(obj); + *location = obj; + objc_release(prev); +} + + +// Update a weak variable. +// If HaveOld is true, the variable has an existing value +// that needs to be cleaned up. This value might be nil. +// If HaveNew is true, there is a new value that needs to be +// assigned into the variable. This value might be nil. +// If CrashIfDeallocating is true, the process is halted if newObj is +// deallocating or newObj's class does not support weak references. +// If CrashIfDeallocating is false, nil is stored instead. +template +static id +storeWeak(id *location, objc_object *newObj) +{ + assert(HaveOld || HaveNew); + if (!HaveNew) assert(newObj == nil); + + Class previouslyInitializedClass = nil; + id oldObj; + SideTable *oldTable; + SideTable *newTable; + + // Acquire locks for old and new values. + // Order by lock address to prevent lock ordering problems. + // Retry if the old value changes underneath us. + retry: + if (HaveOld) { + oldObj = *location; + oldTable = &SideTables()[oldObj]; + } else { + oldTable = nil; + } + if (HaveNew) { + newTable = &SideTables()[newObj]; + } else { + newTable = nil; + } + + SideTable::lockTwo(oldTable, newTable); + + if (HaveOld && *location != oldObj) { + SideTable::unlockTwo(oldTable, newTable); + goto retry; + } + + // Prevent a deadlock between the weak reference machinery + // and the +initialize machinery by ensuring that no + // weakly-referenced object has an un-+initialized isa. + if (HaveNew && newObj) { + Class cls = newObj->getIsa(); + if (cls != previouslyInitializedClass && + !((objc_class *)cls)->isInitialized()) + { + SideTable::unlockTwo(oldTable, newTable); + _class_initialize(_class_getNonMetaClass(cls, (id)newObj)); + + // If this class is finished with +initialize then we're good. + // If this class is still running +initialize on this thread + // (i.e. +initialize called storeWeak on an instance of itself) + // then we may proceed but it will appear initializing and + // not yet initialized to the check above. + // Instead set previouslyInitializedClass to recognize it on retry. + previouslyInitializedClass = cls; + + goto retry; + } + } + + // Clean up old value, if any. + if (HaveOld) { + weak_unregister_no_lock(&oldTable->weak_table, oldObj, location); + } + + // Assign new value, if any. + if (HaveNew) { + newObj = (objc_object *)weak_register_no_lock(&newTable->weak_table, + (id)newObj, location, + CrashIfDeallocating); + // weak_register_no_lock returns nil if weak store should be rejected + + // Set is-weakly-referenced bit in refcount table. + if (newObj && !newObj->isTaggedPointer()) { + newObj->setWeaklyReferenced_nolock(); + } + + // Do not set *location anywhere else. That would introduce a race. + *location = (id)newObj; + } + else { + // No new value. The storage is not changed. + } + + SideTable::unlockTwo(oldTable, newTable); + + return (id)newObj; +} + + +/** + * This function stores a new value into a __weak variable. It would + * be used anywhere a __weak variable is the target of an assignment. + * + * @param location The address of the weak pointer itself + * @param newObj The new object this weak ptr should now point to + * + * @return \e newObj + */ +id +objc_storeWeak(id *location, id newObj) +{ + return storeWeak + (location, (objc_object *)newObj); +} + + +/** + * This function stores a new value into a __weak variable. + * If the new object is deallocating or the new object's class + * does not support weak references, stores nil instead. + * + * @param location The address of the weak pointer itself + * @param newObj The new object this weak ptr should now point to + * + * @return The value stored (either the new object or nil) + */ +id +objc_storeWeakOrNil(id *location, id newObj) +{ + return storeWeak + (location, (objc_object *)newObj); +} + + +/** + * Initialize a fresh weak pointer to some object location. + * It would be used for code like: + * + * (The nil case) + * __weak id weakPtr; + * (The non-nil case) + * NSObject *o = ...; + * __weak id weakPtr = o; + * + * This function IS NOT thread-safe with respect to concurrent + * modifications to the weak variable. (Concurrent weak clear is safe.) + * + * @param location Address of __weak ptr. + * @param newObj Object ptr. + */ +id +objc_initWeak(id *location, id newObj) +{ + if (!newObj) { + *location = nil; + return nil; + } + + return storeWeak + (location, (objc_object*)newObj); +} + +id +objc_initWeakOrNil(id *location, id newObj) +{ + if (!newObj) { + *location = nil; + return nil; + } + + return storeWeak + (location, (objc_object*)newObj); +} + + +/** + * Destroys the relationship between a weak pointer + * and the object it is referencing in the internal weak + * table. If the weak pointer is not referencing anything, + * there is no need to edit the weak table. + * + * This function IS NOT thread-safe with respect to concurrent + * modifications to the weak variable. (Concurrent weak clear is safe.) + * + * @param location The weak pointer address. + */ +void +objc_destroyWeak(id *location) +{ + (void)storeWeak + (location, nil); +} + + +/* + Once upon a time we eagerly cleared *location if we saw the object + was deallocating. This confuses code like NSPointerFunctions which + tries to pre-flight the raw storage and assumes if the storage is + zero then the weak system is done interfering. That is false: the + weak system is still going to check and clear the storage later. + This can cause objc_weak_error complaints and crashes. + So we now don't touch the storage until deallocation completes. +*/ + +id +objc_loadWeakRetained(id *location) +{ + id obj; + id result; + Class cls; + + SideTable *table; + + retry: + // fixme std::atomic this load + obj = *location; + if (!obj) return nil; + if (obj->isTaggedPointer()) return obj; + + table = &SideTables()[obj]; + + table->lock(); + if (*location != obj) { + table->unlock(); + goto retry; + } + + result = obj; + + cls = obj->ISA(); + if (! cls->hasCustomRR()) { + // Fast case. We know +initialize is complete because + // default-RR can never be set before then. + assert(cls->isInitialized()); + if (! obj->rootTryRetain()) { + result = nil; + } + } + else { + // Slow case. We must check for +initialize and call it outside + // the lock if necessary in order to avoid deadlocks. + if (cls->isInitialized() || _thisThreadIsInitializingClass(cls)) { + BOOL (*tryRetain)(id, SEL) = (BOOL(*)(id, SEL)) + class_getMethodImplementation(cls, SEL_retainWeakReference); + if ((IMP)tryRetain == _objc_msgForward) { + result = nil; + } + else if (! (*tryRetain)(obj, SEL_retainWeakReference)) { + result = nil; + } + } + else { + table->unlock(); + _class_initialize(cls); + goto retry; + } + } + + table->unlock(); + return result; +} + +/** + * This loads the object referenced by a weak pointer and returns it, after + * retaining and autoreleasing the object to ensure that it stays alive + * long enough for the caller to use it. This function would be used + * anywhere a __weak variable is used in an expression. + * + * @param location The weak pointer address + * + * @return The object pointed to by \e location, or \c nil if \e location is \c nil. + */ +id +objc_loadWeak(id *location) +{ + if (!*location) return nil; + return objc_autorelease(objc_loadWeakRetained(location)); +} + + +/** + * This function copies a weak pointer from one location to another, + * when the destination doesn't already contain a weak pointer. It + * would be used for code like: + * + * __weak id src = ...; + * __weak id dst = src; + * + * This function IS NOT thread-safe with respect to concurrent + * modifications to the destination variable. (Concurrent weak clear is safe.) + * + * @param dst The destination variable. + * @param src The source variable. + */ +void +objc_copyWeak(id *dst, id *src) +{ + id obj = objc_loadWeakRetained(src); + objc_initWeak(dst, obj); + objc_release(obj); +} + +/** + * Move a weak pointer from one location to another. + * Before the move, the destination must be uninitialized. + * After the move, the source is nil. + * + * This function IS NOT thread-safe with respect to concurrent + * modifications to either weak variable. (Concurrent weak clear is safe.) + * + */ +void +objc_moveWeak(id *dst, id *src) +{ + objc_copyWeak(dst, src); + objc_destroyWeak(src); + *src = nil; +} + + +/*********************************************************************** + Autorelease pool implementation + + A thread's autorelease pool is a stack of pointers. + Each pointer is either an object to release, or POOL_BOUNDARY which is + an autorelease pool boundary. + A pool token is a pointer to the POOL_BOUNDARY for that pool. When + the pool is popped, every object hotter than the sentinel is released. + The stack is divided into a doubly-linked list of pages. Pages are added + and deleted as necessary. + Thread-local storage points to the hot page, where newly autoreleased + objects are stored. +**********************************************************************/ + +// Set this to 1 to mprotect() autorelease pool contents +#define PROTECT_AUTORELEASEPOOL 0 + +// Set this to 1 to validate the entire autorelease pool header all the time +// (i.e. use check() instead of fastcheck() everywhere) +#define CHECK_AUTORELEASEPOOL (DEBUG) + +BREAKPOINT_FUNCTION(void objc_autoreleaseNoPool(id obj)); +BREAKPOINT_FUNCTION(void objc_autoreleasePoolInvalid(const void *token)); + +namespace { + +struct magic_t { + static const uint32_t M0 = 0xA1A1A1A1; +# define M1 "AUTORELEASE!" + static const size_t M1_len = 12; + uint32_t m[4]; + + magic_t() { + assert(M1_len == strlen(M1)); + assert(M1_len == 3 * sizeof(m[1])); + + m[0] = M0; + strncpy((char *)&m[1], M1, M1_len); + } + + ~magic_t() { + m[0] = m[1] = m[2] = m[3] = 0; + } + + bool check() const { + return (m[0] == M0 && 0 == strncmp((char *)&m[1], M1, M1_len)); + } + + bool fastcheck() const { +#if CHECK_AUTORELEASEPOOL + return check(); +#else + return (m[0] == M0); +#endif + } + +# undef M1 +}; + + +class AutoreleasePoolPage +{ + // EMPTY_POOL_PLACEHOLDER is stored in TLS when exactly one pool is + // pushed and it has never contained any objects. This saves memory + // when the top level (i.e. libdispatch) pushes and pops pools but + // never uses them. +# define EMPTY_POOL_PLACEHOLDER ((id*)1) + +# define POOL_BOUNDARY nil + static pthread_key_t const key = AUTORELEASE_POOL_KEY; + static uint8_t const SCRIBBLE = 0xA3; // 0xA3A3A3A3 after releasing + static size_t const SIZE = +#if PROTECT_AUTORELEASEPOOL + PAGE_MAX_SIZE; // must be multiple of vm page size +#else + PAGE_MAX_SIZE; // size and alignment, power of 2 +#endif + static size_t const COUNT = SIZE / sizeof(id); + + magic_t const magic; + id *next; + pthread_t const thread; + AutoreleasePoolPage * const parent; + AutoreleasePoolPage *child; + uint32_t const depth; + uint32_t hiwat; + + // SIZE-sizeof(*this) bytes of contents follow + + static void * operator new(size_t size) { + return malloc_zone_memalign(malloc_default_zone(), SIZE, SIZE); + } + static void operator delete(void * p) { + return free(p); + } + + inline void protect() { +#if PROTECT_AUTORELEASEPOOL + mprotect(this, SIZE, PROT_READ); + check(); +#endif + } + + inline void unprotect() { +#if PROTECT_AUTORELEASEPOOL + check(); + mprotect(this, SIZE, PROT_READ | PROT_WRITE); +#endif + } + + AutoreleasePoolPage(AutoreleasePoolPage *newParent) + : magic(), next(begin()), thread(pthread_self()), + parent(newParent), child(nil), + depth(parent ? 1+parent->depth : 0), + hiwat(parent ? parent->hiwat : 0) + { + if (parent) { + parent->check(); + assert(!parent->child); + parent->unprotect(); + parent->child = this; + parent->protect(); + } + protect(); + } + + ~AutoreleasePoolPage() + { + check(); + unprotect(); + assert(empty()); + + // Not recursive: we don't want to blow out the stack + // if a thread accumulates a stupendous amount of garbage + assert(!child); + } + + + void busted(bool die = true) + { + magic_t right; + (die ? _objc_fatal : _objc_inform) + ("autorelease pool page %p corrupted\n" + " magic 0x%08x 0x%08x 0x%08x 0x%08x\n" + " should be 0x%08x 0x%08x 0x%08x 0x%08x\n" + " pthread %p\n" + " should be %p\n", + this, + magic.m[0], magic.m[1], magic.m[2], magic.m[3], + right.m[0], right.m[1], right.m[2], right.m[3], + this->thread, pthread_self()); + } + + void check(bool die = true) + { + if (!magic.check() || !pthread_equal(thread, pthread_self())) { + busted(die); + } + } + + void fastcheck(bool die = true) + { +#if CHECK_AUTORELEASEPOOL + check(die); +#else + if (! magic.fastcheck()) { + busted(die); + } +#endif + } + + + id * begin() { + return (id *) ((uint8_t *)this+sizeof(*this)); + } + + id * end() { + return (id *) ((uint8_t *)this+SIZE); + } + + bool empty() { + return next == begin(); + } + + bool full() { + return next == end(); + } + + bool lessThanHalfFull() { + return (next - begin() < (end() - begin()) / 2); + } + + id *add(id obj) + { + assert(!full()); + unprotect(); + id *ret = next; // faster than `return next-1` because of aliasing + *next++ = obj; + protect(); + return ret; + } + + void releaseAll() + { + releaseUntil(begin()); + } + + void releaseUntil(id *stop) + { + // Not recursive: we don't want to blow out the stack + // if a thread accumulates a stupendous amount of garbage + + while (this->next != stop) { + // Restart from hotPage() every time, in case -release + // autoreleased more objects + AutoreleasePoolPage *page = hotPage(); + + // fixme I think this `while` can be `if`, but I can't prove it + while (page->empty()) { + page = page->parent; + setHotPage(page); + } + + page->unprotect(); + id obj = *--page->next; + memset((void*)page->next, SCRIBBLE, sizeof(*page->next)); + page->protect(); + + if (obj != POOL_BOUNDARY) { + objc_release(obj); + } + } + + setHotPage(this); + +#if DEBUG + // we expect any children to be completely empty + for (AutoreleasePoolPage *page = child; page; page = page->child) { + assert(page->empty()); + } +#endif + } + + void kill() + { + // Not recursive: we don't want to blow out the stack + // if a thread accumulates a stupendous amount of garbage + AutoreleasePoolPage *page = this; + while (page->child) page = page->child; + + AutoreleasePoolPage *deathptr; + do { + deathptr = page; + page = page->parent; + if (page) { + page->unprotect(); + page->child = nil; + page->protect(); + } + delete deathptr; + } while (deathptr != this); + } + + static void tls_dealloc(void *p) + { + if (p == (void*)EMPTY_POOL_PLACEHOLDER) { + // No objects or pool pages to clean up here. + return; + } + + // reinstate TLS value while we work + setHotPage((AutoreleasePoolPage *)p); + + if (AutoreleasePoolPage *page = coldPage()) { + if (!page->empty()) pop(page->begin()); // pop all of the pools + if (DebugMissingPools || DebugPoolAllocation) { + // pop() killed the pages already + } else { + page->kill(); // free all of the pages + } + } + + // clear TLS value so TLS destruction doesn't loop + setHotPage(nil); + } + + static AutoreleasePoolPage *pageForPointer(const void *p) + { + return pageForPointer((uintptr_t)p); + } + + static AutoreleasePoolPage *pageForPointer(uintptr_t p) + { + AutoreleasePoolPage *result; + uintptr_t offset = p % SIZE; + + assert(offset >= sizeof(AutoreleasePoolPage)); + + result = (AutoreleasePoolPage *)(p - offset); + result->fastcheck(); + + return result; + } + + + static inline bool haveEmptyPoolPlaceholder() + { + id *tls = (id *)tls_get_direct(key); + return (tls == EMPTY_POOL_PLACEHOLDER); + } + + static inline id* setEmptyPoolPlaceholder() + { + assert(tls_get_direct(key) == nil); + tls_set_direct(key, (void *)EMPTY_POOL_PLACEHOLDER); + return EMPTY_POOL_PLACEHOLDER; + } + + static inline AutoreleasePoolPage *hotPage() + { + AutoreleasePoolPage *result = (AutoreleasePoolPage *) + tls_get_direct(key); + if ((id *)result == EMPTY_POOL_PLACEHOLDER) return nil; + if (result) result->fastcheck(); + return result; + } + + static inline void setHotPage(AutoreleasePoolPage *page) + { + if (page) page->fastcheck(); + tls_set_direct(key, (void *)page); + } + + static inline AutoreleasePoolPage *coldPage() + { + AutoreleasePoolPage *result = hotPage(); + if (result) { + while (result->parent) { + result = result->parent; + result->fastcheck(); + } + } + return result; + } + + + static inline id *autoreleaseFast(id obj) + { + AutoreleasePoolPage *page = hotPage(); + if (page && !page->full()) { + return page->add(obj); + } else if (page) { + return autoreleaseFullPage(obj, page); + } else { + return autoreleaseNoPage(obj); + } + } + + static __attribute__((noinline)) + id *autoreleaseFullPage(id obj, AutoreleasePoolPage *page) + { + // The hot page is full. + // Step to the next non-full page, adding a new page if necessary. + // Then add the object to that page. + assert(page == hotPage()); + assert(page->full() || DebugPoolAllocation); + + do { + if (page->child) page = page->child; + else page = new AutoreleasePoolPage(page); + } while (page->full()); + + setHotPage(page); + return page->add(obj); + } + + static __attribute__((noinline)) + id *autoreleaseNoPage(id obj) + { + // "No page" could mean no pool has been pushed + // or an empty placeholder pool has been pushed and has no contents yet + assert(!hotPage()); + + bool pushExtraBoundary = false; + if (haveEmptyPoolPlaceholder()) { + // We are pushing a second pool over the empty placeholder pool + // or pushing the first object into the empty placeholder pool. + // Before doing that, push a pool boundary on behalf of the pool + // that is currently represented by the empty placeholder. + pushExtraBoundary = true; + } + else if (obj != POOL_BOUNDARY && DebugMissingPools) { + // We are pushing an object with no pool in place, + // and no-pool debugging was requested by environment. + _objc_inform("MISSING POOLS: (%p) Object %p of class %s " + "autoreleased with no pool in place - " + "just leaking - break on " + "objc_autoreleaseNoPool() to debug", + pthread_self(), (void*)obj, object_getClassName(obj)); + objc_autoreleaseNoPool(obj); + return nil; + } + else if (obj == POOL_BOUNDARY && !DebugPoolAllocation) { + // We are pushing a pool with no pool in place, + // and alloc-per-pool debugging was not requested. + // Install and return the empty pool placeholder. + return setEmptyPoolPlaceholder(); + } + + // We are pushing an object or a non-placeholder'd pool. + + // Install the first page. + AutoreleasePoolPage *page = new AutoreleasePoolPage(nil); + setHotPage(page); + + // Push a boundary on behalf of the previously-placeholder'd pool. + if (pushExtraBoundary) { + page->add(POOL_BOUNDARY); + } + + // Push the requested object or pool. + return page->add(obj); + } + + + static __attribute__((noinline)) + id *autoreleaseNewPage(id obj) + { + AutoreleasePoolPage *page = hotPage(); + if (page) return autoreleaseFullPage(obj, page); + else return autoreleaseNoPage(obj); + } + +public: + static inline id autorelease(id obj) + { + assert(obj); + assert(!obj->isTaggedPointer()); + id *dest __unused = autoreleaseFast(obj); + assert(!dest || dest == EMPTY_POOL_PLACEHOLDER || *dest == obj); + return obj; + } + + + static inline void *push() + { + id *dest; + if (DebugPoolAllocation) { + // Each autorelease pool starts on a new pool page. + dest = autoreleaseNewPage(POOL_BOUNDARY); + } else { + dest = autoreleaseFast(POOL_BOUNDARY); + } + assert(dest == EMPTY_POOL_PLACEHOLDER || *dest == POOL_BOUNDARY); + return dest; + } + + static void badPop(void *token) + { + // Error. For bincompat purposes this is not + // fatal in executables built with old SDKs. + + if (DebugPoolAllocation || sdkIsAtLeast(10_12, 10_0, 10_0, 3_0)) { + // OBJC_DEBUG_POOL_ALLOCATION or new SDK. Bad pop is fatal. + _objc_fatal + ("Invalid or prematurely-freed autorelease pool %p.", token); + } + + // Old SDK. Bad pop is warned once. + static bool complained = false; + if (!complained) { + complained = true; + _objc_inform_now_and_on_crash + ("Invalid or prematurely-freed autorelease pool %p. " + "Set a breakpoint on objc_autoreleasePoolInvalid to debug. " + "Proceeding anyway because the app is old " + "(SDK version " SDK_FORMAT "). Memory errors are likely.", + token, FORMAT_SDK(sdkVersion())); + } + objc_autoreleasePoolInvalid(token); + } + + static inline void pop(void *token) + { + AutoreleasePoolPage *page; + id *stop; + + if (token == (void*)EMPTY_POOL_PLACEHOLDER) { + // Popping the top-level placeholder pool. + if (hotPage()) { + // Pool was used. Pop its contents normally. + // Pool pages remain allocated for re-use as usual. + pop(coldPage()->begin()); + } else { + // Pool was never used. Clear the placeholder. + setHotPage(nil); + } + return; + } + + page = pageForPointer(token); + stop = (id *)token; + if (*stop != POOL_BOUNDARY) { + if (stop == page->begin() && !page->parent) { + // Start of coldest page may correctly not be POOL_BOUNDARY: + // 1. top-level pool is popped, leaving the cold page in place + // 2. an object is autoreleased with no pool + } else { + // Error. For bincompat purposes this is not + // fatal in executables built with old SDKs. + return badPop(token); + } + } + + if (PrintPoolHiwat) printHiwat(); + + page->releaseUntil(stop); + + // memory: delete empty children + if (DebugPoolAllocation && page->empty()) { + // special case: delete everything during page-per-pool debugging + AutoreleasePoolPage *parent = page->parent; + page->kill(); + setHotPage(parent); + } else if (DebugMissingPools && page->empty() && !page->parent) { + // special case: delete everything for pop(top) + // when debugging missing autorelease pools + page->kill(); + setHotPage(nil); + } + else if (page->child) { + // hysteresis: keep one empty child if page is more than half full + if (page->lessThanHalfFull()) { + page->child->kill(); + } + else if (page->child->child) { + page->child->child->kill(); + } + } + } + + static void init() + { + int r __unused = pthread_key_init_np(AutoreleasePoolPage::key, + AutoreleasePoolPage::tls_dealloc); + assert(r == 0); + } + + void print() + { + _objc_inform("[%p] ................ PAGE %s %s %s", this, + full() ? "(full)" : "", + this == hotPage() ? "(hot)" : "", + this == coldPage() ? "(cold)" : ""); + check(false); + for (id *p = begin(); p < next; p++) { + if (*p == POOL_BOUNDARY) { + _objc_inform("[%p] ################ POOL %p", p, p); + } else { + _objc_inform("[%p] %#16lx %s", + p, (unsigned long)*p, object_getClassName(*p)); + } + } + } + + static void printAll() + { + _objc_inform("##############"); + _objc_inform("AUTORELEASE POOLS for thread %p", pthread_self()); + + AutoreleasePoolPage *page; + ptrdiff_t objects = 0; + for (page = coldPage(); page; page = page->child) { + objects += page->next - page->begin(); + } + _objc_inform("%llu releases pending.", (unsigned long long)objects); + + if (haveEmptyPoolPlaceholder()) { + _objc_inform("[%p] ................ PAGE (placeholder)", + EMPTY_POOL_PLACEHOLDER); + _objc_inform("[%p] ################ POOL (placeholder)", + EMPTY_POOL_PLACEHOLDER); + } + else { + for (page = coldPage(); page; page = page->child) { + page->print(); + } + } + + _objc_inform("##############"); + } + + static void printHiwat() + { + // Check and propagate high water mark + // Ignore high water marks under 256 to suppress noise. + AutoreleasePoolPage *p = hotPage(); + uint32_t mark = p->depth*COUNT + (uint32_t)(p->next - p->begin()); + if (mark > p->hiwat && mark > 256) { + for( ; p; p = p->parent) { + p->unprotect(); + p->hiwat = mark; + p->protect(); + } + + _objc_inform("POOL HIGHWATER: new high water mark of %u " + "pending releases for thread %p:", + mark, pthread_self()); + + void *stack[128]; + int count = backtrace(stack, sizeof(stack)/sizeof(stack[0])); + char **sym = backtrace_symbols(stack, count); + for (int i = 0; i < count; i++) { + _objc_inform("POOL HIGHWATER: %s", sym[i]); + } + free(sym); + } + } + +#undef POOL_BOUNDARY +}; + +// anonymous namespace +}; + + +/*********************************************************************** +* Slow paths for inline control +**********************************************************************/ + +#if SUPPORT_NONPOINTER_ISA + +NEVER_INLINE id +objc_object::rootRetain_overflow(bool tryRetain) +{ + return rootRetain(tryRetain, true); +} + + +NEVER_INLINE bool +objc_object::rootRelease_underflow(bool performDealloc) +{ + return rootRelease(performDealloc, true); +} + + +// Slow path of clearDeallocating() +// for objects with nonpointer isa +// that were ever weakly referenced +// or whose retain count ever overflowed to the side table. +NEVER_INLINE void +objc_object::clearDeallocating_slow() +{ + assert(isa.nonpointer && (isa.weakly_referenced || isa.has_sidetable_rc)); + + SideTable& table = SideTables()[this]; + table.lock(); + if (isa.weakly_referenced) { + weak_clear_no_lock(&table.weak_table, (id)this); + } + if (isa.has_sidetable_rc) { + table.refcnts.erase(this); + } + table.unlock(); +} + +#endif + +__attribute__((noinline,used)) +id +objc_object::rootAutorelease2() +{ + assert(!isTaggedPointer()); + return AutoreleasePoolPage::autorelease((id)this); +} + + +BREAKPOINT_FUNCTION( + void objc_overrelease_during_dealloc_error(void) +); + + +NEVER_INLINE +bool +objc_object::overrelease_error() +{ + _objc_inform_now_and_on_crash("%s object %p overreleased while already deallocating; break on objc_overrelease_during_dealloc_error to debug", object_getClassName((id)this), this); + objc_overrelease_during_dealloc_error(); + return false; // allow rootRelease() to tail-call this +} + + +/*********************************************************************** +* Retain count operations for side table. +**********************************************************************/ + + +#if DEBUG +// Used to assert that an object is not present in the side table. +bool +objc_object::sidetable_present() +{ + bool result = false; + SideTable& table = SideTables()[this]; + + table.lock(); + + RefcountMap::iterator it = table.refcnts.find(this); + if (it != table.refcnts.end()) result = true; + + if (weak_is_registered_no_lock(&table.weak_table, (id)this)) result = true; + + table.unlock(); + + return result; +} +#endif + +#if SUPPORT_NONPOINTER_ISA + +void +objc_object::sidetable_lock() +{ + SideTable& table = SideTables()[this]; + table.lock(); +} + +void +objc_object::sidetable_unlock() +{ + SideTable& table = SideTables()[this]; + table.unlock(); +} + + +// Move the entire retain count to the side table, +// as well as isDeallocating and weaklyReferenced. +void +objc_object::sidetable_moveExtraRC_nolock(size_t extra_rc, + bool isDeallocating, + bool weaklyReferenced) +{ + assert(!isa.nonpointer); // should already be changed to raw pointer + SideTable& table = SideTables()[this]; + + size_t& refcntStorage = table.refcnts[this]; + size_t oldRefcnt = refcntStorage; + // not deallocating - that was in the isa + assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0); + assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); + + uintptr_t carry; + size_t refcnt = addc(oldRefcnt, extra_rc << SIDE_TABLE_RC_SHIFT, 0, &carry); + if (carry) refcnt = SIDE_TABLE_RC_PINNED; + if (isDeallocating) refcnt |= SIDE_TABLE_DEALLOCATING; + if (weaklyReferenced) refcnt |= SIDE_TABLE_WEAKLY_REFERENCED; + + refcntStorage = refcnt; +} + + +// Move some retain counts to the side table from the isa field. +// Returns true if the object is now pinned. +bool +objc_object::sidetable_addExtraRC_nolock(size_t delta_rc) +{ + assert(isa.nonpointer); + SideTable& table = SideTables()[this]; + + size_t& refcntStorage = table.refcnts[this]; + size_t oldRefcnt = refcntStorage; + // isa-side bits should not be set here + assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0); + assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); + + if (oldRefcnt & SIDE_TABLE_RC_PINNED) return true; + + uintptr_t carry; + size_t newRefcnt = + addc(oldRefcnt, delta_rc << SIDE_TABLE_RC_SHIFT, 0, &carry); + if (carry) { + refcntStorage = + SIDE_TABLE_RC_PINNED | (oldRefcnt & SIDE_TABLE_FLAG_MASK); + return true; + } + else { + refcntStorage = newRefcnt; + return false; + } +} + + +// Move some retain counts from the side table to the isa field. +// Returns the actual count subtracted, which may be less than the request. +size_t +objc_object::sidetable_subExtraRC_nolock(size_t delta_rc) +{ + assert(isa.nonpointer); + SideTable& table = SideTables()[this]; + + RefcountMap::iterator it = table.refcnts.find(this); + if (it == table.refcnts.end() || it->second == 0) { + // Side table retain count is zero. Can't borrow. + return 0; + } + size_t oldRefcnt = it->second; + + // isa-side bits should not be set here + assert((oldRefcnt & SIDE_TABLE_DEALLOCATING) == 0); + assert((oldRefcnt & SIDE_TABLE_WEAKLY_REFERENCED) == 0); + + size_t newRefcnt = oldRefcnt - (delta_rc << SIDE_TABLE_RC_SHIFT); + assert(oldRefcnt > newRefcnt); // shouldn't underflow + it->second = newRefcnt; + return delta_rc; +} + + +size_t +objc_object::sidetable_getExtraRC_nolock() +{ + assert(isa.nonpointer); + SideTable& table = SideTables()[this]; + RefcountMap::iterator it = table.refcnts.find(this); + if (it == table.refcnts.end()) return 0; + else return it->second >> SIDE_TABLE_RC_SHIFT; +} + + +// SUPPORT_NONPOINTER_ISA +#endif + + +id +objc_object::sidetable_retain() +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.nonpointer); +#endif + SideTable& table = SideTables()[this]; + + table.lock(); + size_t& refcntStorage = table.refcnts[this]; + if (! (refcntStorage & SIDE_TABLE_RC_PINNED)) { + refcntStorage += SIDE_TABLE_RC_ONE; + } + table.unlock(); + + return (id)this; +} + + +bool +objc_object::sidetable_tryRetain() +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.nonpointer); +#endif + SideTable& table = SideTables()[this]; + + // NO SPINLOCK HERE + // _objc_rootTryRetain() is called exclusively by _objc_loadWeak(), + // which already acquired the lock on our behalf. + + // fixme can't do this efficiently with os_lock_handoff_s + // if (table.slock == 0) { + // _objc_fatal("Do not call -_tryRetain."); + // } + + bool result = true; + RefcountMap::iterator it = table.refcnts.find(this); + if (it == table.refcnts.end()) { + table.refcnts[this] = SIDE_TABLE_RC_ONE; + } else if (it->second & SIDE_TABLE_DEALLOCATING) { + result = false; + } else if (! (it->second & SIDE_TABLE_RC_PINNED)) { + it->second += SIDE_TABLE_RC_ONE; + } + + return result; +} + + +uintptr_t +objc_object::sidetable_retainCount() +{ + SideTable& table = SideTables()[this]; + + size_t refcnt_result = 1; + + table.lock(); + RefcountMap::iterator it = table.refcnts.find(this); + if (it != table.refcnts.end()) { + // this is valid for SIDE_TABLE_RC_PINNED too + refcnt_result += it->second >> SIDE_TABLE_RC_SHIFT; + } + table.unlock(); + return refcnt_result; +} + + +bool +objc_object::sidetable_isDeallocating() +{ + SideTable& table = SideTables()[this]; + + // NO SPINLOCK HERE + // _objc_rootIsDeallocating() is called exclusively by _objc_storeWeak(), + // which already acquired the lock on our behalf. + + + // fixme can't do this efficiently with os_lock_handoff_s + // if (table.slock == 0) { + // _objc_fatal("Do not call -_isDeallocating."); + // } + + RefcountMap::iterator it = table.refcnts.find(this); + return (it != table.refcnts.end()) && (it->second & SIDE_TABLE_DEALLOCATING); +} + + +bool +objc_object::sidetable_isWeaklyReferenced() +{ + bool result = false; + + SideTable& table = SideTables()[this]; + table.lock(); + + RefcountMap::iterator it = table.refcnts.find(this); + if (it != table.refcnts.end()) { + result = it->second & SIDE_TABLE_WEAKLY_REFERENCED; + } + + table.unlock(); + + return result; +} + + +void +objc_object::sidetable_setWeaklyReferenced_nolock() +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.nonpointer); +#endif + + SideTable& table = SideTables()[this]; + + table.refcnts[this] |= SIDE_TABLE_WEAKLY_REFERENCED; +} + + +// rdar://20206767 +// return uintptr_t instead of bool so that the various raw-isa +// -release paths all return zero in eax +uintptr_t +objc_object::sidetable_release(bool performDealloc) +{ +#if SUPPORT_NONPOINTER_ISA + assert(!isa.nonpointer); +#endif + SideTable& table = SideTables()[this]; + + bool do_dealloc = false; + + table.lock(); + RefcountMap::iterator it = table.refcnts.find(this); + if (it == table.refcnts.end()) { + do_dealloc = true; + table.refcnts[this] = SIDE_TABLE_DEALLOCATING; + } else if (it->second < SIDE_TABLE_DEALLOCATING) { + // SIDE_TABLE_WEAKLY_REFERENCED may be set. Don't change it. + do_dealloc = true; + it->second |= SIDE_TABLE_DEALLOCATING; + } else if (! (it->second & SIDE_TABLE_RC_PINNED)) { + it->second -= SIDE_TABLE_RC_ONE; + } + table.unlock(); + if (do_dealloc && performDealloc) { + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc); + } + return do_dealloc; +} + + +void +objc_object::sidetable_clearDeallocating() +{ + SideTable& table = SideTables()[this]; + + // clear any weak table items + // clear extra retain count and deallocating bit + // (fixme warn or abort if extra retain count == 0 ?) + table.lock(); + RefcountMap::iterator it = table.refcnts.find(this); + if (it != table.refcnts.end()) { + if (it->second & SIDE_TABLE_WEAKLY_REFERENCED) { + weak_clear_no_lock(&table.weak_table, (id)this); + } + table.refcnts.erase(it); + } + table.unlock(); +} + + +/*********************************************************************** +* Optimized retain/release/autorelease entrypoints +**********************************************************************/ + + +#if __OBJC2__ + +__attribute__((aligned(16))) +id +objc_retain(id obj) +{ + if (!obj) return obj; + if (obj->isTaggedPointer()) return obj; + return obj->retain(); +} + + +__attribute__((aligned(16))) +void +objc_release(id obj) +{ + if (!obj) return; + if (obj->isTaggedPointer()) return; + return obj->release(); +} + + +__attribute__((aligned(16))) +id +objc_autorelease(id obj) +{ + if (!obj) return obj; + if (obj->isTaggedPointer()) return obj; + return obj->autorelease(); +} + + +// OBJC2 +#else +// not OBJC2 + + +id objc_retain(id obj) { return [obj retain]; } +void objc_release(id obj) { [obj release]; } +id objc_autorelease(id obj) { return [obj autorelease]; } + + +#endif + + +/*********************************************************************** +* Basic operations for root class implementations a.k.a. _objc_root*() +**********************************************************************/ + +bool +_objc_rootTryRetain(id obj) +{ + assert(obj); + + return obj->rootTryRetain(); +} + +bool +_objc_rootIsDeallocating(id obj) +{ + assert(obj); + + return obj->rootIsDeallocating(); +} + + +void +objc_clear_deallocating(id obj) +{ + assert(obj); + + if (obj->isTaggedPointer()) return; + obj->clearDeallocating(); +} + + +bool +_objc_rootReleaseWasZero(id obj) +{ + assert(obj); + + return obj->rootReleaseShouldDealloc(); +} + + +id +_objc_rootAutorelease(id obj) +{ + assert(obj); + return obj->rootAutorelease(); +} + +uintptr_t +_objc_rootRetainCount(id obj) +{ + assert(obj); + + return obj->rootRetainCount(); +} + + +id +_objc_rootRetain(id obj) +{ + assert(obj); + + return obj->rootRetain(); +} + +void +_objc_rootRelease(id obj) +{ + assert(obj); + + obj->rootRelease(); +} + + +id +_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone) +{ + id obj; + +#if __OBJC2__ + // allocWithZone under __OBJC2__ ignores the zone parameter + (void)zone; + obj = class_createInstance(cls, 0); +#else + if (!zone) { + obj = class_createInstance(cls, 0); + } + else { + obj = class_createInstanceFromZone(cls, 0, zone); + } +#endif + + if (slowpath(!obj)) obj = callBadAllocHandler(cls); + return obj; +} + + +// Call [cls alloc] or [cls allocWithZone:nil], with appropriate +// shortcutting optimizations. +static ALWAYS_INLINE id +callAlloc(Class cls, bool checkNil, bool allocWithZone=false) +{ + if (slowpath(checkNil && !cls)) return nil; + +#if __OBJC2__ + if (fastpath(!cls->ISA()->hasCustomAWZ())) { + // No alloc/allocWithZone implementation. Go straight to the allocator. + // fixme store hasCustomAWZ in the non-meta class and + // add it to canAllocFast's summary + if (fastpath(cls->canAllocFast())) { + // No ctors, raw isa, etc. Go straight to the metal. + bool dtor = cls->hasCxxDtor(); + id obj = (id)calloc(1, cls->bits.fastInstanceSize()); + if (slowpath(!obj)) return callBadAllocHandler(cls); + obj->initInstanceIsa(cls, dtor); + return obj; + } + else { + // Has ctor or raw isa or something. Use the slower path. + id obj = class_createInstance(cls, 0); + if (slowpath(!obj)) return callBadAllocHandler(cls); + return obj; + } + } +#endif + + // No shortcuts available. + if (allocWithZone) return [cls allocWithZone:nil]; + return [cls alloc]; +} + + +// Base class implementation of +alloc. cls is not nil. +// Calls [cls allocWithZone:nil]. +id +_objc_rootAlloc(Class cls) +{ + return callAlloc(cls, false/*checkNil*/, true/*allocWithZone*/); +} + +// Calls [cls alloc]. +id +objc_alloc(Class cls) +{ + return callAlloc(cls, true/*checkNil*/, false/*allocWithZone*/); +} + +// Calls [cls allocWithZone:nil]. +id +objc_allocWithZone(Class cls) +{ + return callAlloc(cls, true/*checkNil*/, true/*allocWithZone*/); +} + + +void +_objc_rootDealloc(id obj) +{ + assert(obj); + + obj->rootDealloc(); +} + +void +_objc_rootFinalize(id obj __unused) +{ + assert(obj); + _objc_fatal("_objc_rootFinalize called with garbage collection off"); +} + + +id +_objc_rootInit(id obj) +{ + // In practice, it will be hard to rely on this function. + // Many classes do not properly chain -init calls. + return obj; +} + + +malloc_zone_t * +_objc_rootZone(id obj) +{ + (void)obj; +#if __OBJC2__ + // allocWithZone under __OBJC2__ ignores the zone parameter + return malloc_default_zone(); +#else + malloc_zone_t *rval = malloc_zone_from_ptr(obj); + return rval ? rval : malloc_default_zone(); +#endif +} + +uintptr_t +_objc_rootHash(id obj) +{ + return (uintptr_t)obj; +} + +void * +objc_autoreleasePoolPush(void) +{ + return AutoreleasePoolPage::push(); +} + +void +objc_autoreleasePoolPop(void *ctxt) +{ + AutoreleasePoolPage::pop(ctxt); +} + + +void * +_objc_autoreleasePoolPush(void) +{ + return objc_autoreleasePoolPush(); +} + +void +_objc_autoreleasePoolPop(void *ctxt) +{ + objc_autoreleasePoolPop(ctxt); +} + +void +_objc_autoreleasePoolPrint(void) +{ + AutoreleasePoolPage::printAll(); +} + + +// Same as objc_release but suitable for tail-calling +// if you need the value back and don't want to push a frame before this point. +__attribute__((noinline)) +static id +objc_releaseAndReturn(id obj) +{ + objc_release(obj); + return obj; +} + +// Same as objc_retainAutorelease but suitable for tail-calling +// if you don't want to push a frame before this point. +__attribute__((noinline)) +static id +objc_retainAutoreleaseAndReturn(id obj) +{ + return objc_retainAutorelease(obj); +} + + +// Prepare a value at +1 for return through a +0 autoreleasing convention. +id +objc_autoreleaseReturnValue(id obj) +{ + if (prepareOptimizedReturn(ReturnAtPlus1)) return obj; + + return objc_autorelease(obj); +} + +// Prepare a value at +0 for return through a +0 autoreleasing convention. +id +objc_retainAutoreleaseReturnValue(id obj) +{ + if (prepareOptimizedReturn(ReturnAtPlus0)) return obj; + + // not objc_autoreleaseReturnValue(objc_retain(obj)) + // because we don't need another optimization attempt + return objc_retainAutoreleaseAndReturn(obj); +} + +// Accept a value returned through a +0 autoreleasing convention for use at +1. +id +objc_retainAutoreleasedReturnValue(id obj) +{ + if (acceptOptimizedReturn() == ReturnAtPlus1) return obj; + + return objc_retain(obj); +} + +// Accept a value returned through a +0 autoreleasing convention for use at +0. +id +objc_unsafeClaimAutoreleasedReturnValue(id obj) +{ + if (acceptOptimizedReturn() == ReturnAtPlus0) return obj; + + return objc_releaseAndReturn(obj); +} + +id +objc_retainAutorelease(id obj) +{ + return objc_autorelease(objc_retain(obj)); +} + +void +_objc_deallocOnMainThreadHelper(void *context) +{ + id obj = (id)context; + [obj dealloc]; +} + +// convert objc_objectptr_t to id, callee must take ownership. +id objc_retainedObject(objc_objectptr_t pointer) { return (id)pointer; } + +// convert objc_objectptr_t to id, without ownership transfer. +id objc_unretainedObject(objc_objectptr_t pointer) { return (id)pointer; } + +// convert id to objc_objectptr_t, no ownership transfer. +objc_objectptr_t objc_unretainedPointer(id object) { return object; } + + +void arr_init(void) +{ + AutoreleasePoolPage::init(); + SideTableInit(); +} + + +#if SUPPORT_TAGGED_POINTERS + +// Placeholder for old debuggers. When they inspect an +// extended tagged pointer object they will see this isa. + +@interface __NSUnrecognizedTaggedPointer : NSObject +@end + +@implementation __NSUnrecognizedTaggedPointer ++(void) load { } +-(id) retain { return self; } +-(oneway void) release { } +-(id) autorelease { return self; } +@end + +#endif + + +@implementation NSObject + ++ (void)load { +} + ++ (void)initialize { +} + ++ (id)self { + return (id)self; +} + +- (id)self { + return self; +} + ++ (Class)class { + return self; +} + +- (Class)class { + return object_getClass(self); +} + ++ (Class)superclass { + return self->superclass; +} + +- (Class)superclass { + return [self class]->superclass; +} + ++ (BOOL)isMemberOfClass:(Class)cls { + return object_getClass((id)self) == cls; +} + +- (BOOL)isMemberOfClass:(Class)cls { + return [self class] == cls; +} + ++ (BOOL)isKindOfClass:(Class)cls { + for (Class tcls = object_getClass((id)self); tcls; tcls = tcls->superclass) { + if (tcls == cls) return YES; + } + return NO; +} + +- (BOOL)isKindOfClass:(Class)cls { + for (Class tcls = [self class]; tcls; tcls = tcls->superclass) { + if (tcls == cls) return YES; + } + return NO; +} + ++ (BOOL)isSubclassOfClass:(Class)cls { + for (Class tcls = self; tcls; tcls = tcls->superclass) { + if (tcls == cls) return YES; + } + return NO; +} + ++ (BOOL)isAncestorOfObject:(NSObject *)obj { + for (Class tcls = [obj class]; tcls; tcls = tcls->superclass) { + if (tcls == self) return YES; + } + return NO; +} + ++ (BOOL)instancesRespondToSelector:(SEL)sel { + if (!sel) return NO; + return class_respondsToSelector(self, sel); +} + ++ (BOOL)respondsToSelector:(SEL)sel { + if (!sel) return NO; + return class_respondsToSelector_inst(object_getClass(self), sel, self); +} + +- (BOOL)respondsToSelector:(SEL)sel { + if (!sel) return NO; + return class_respondsToSelector_inst([self class], sel, self); +} + ++ (BOOL)conformsToProtocol:(Protocol *)protocol { + if (!protocol) return NO; + for (Class tcls = self; tcls; tcls = tcls->superclass) { + if (class_conformsToProtocol(tcls, protocol)) return YES; + } + return NO; +} + +- (BOOL)conformsToProtocol:(Protocol *)protocol { + if (!protocol) return NO; + for (Class tcls = [self class]; tcls; tcls = tcls->superclass) { + if (class_conformsToProtocol(tcls, protocol)) return YES; + } + return NO; +} + ++ (NSUInteger)hash { + return _objc_rootHash(self); +} + +- (NSUInteger)hash { + return _objc_rootHash(self); +} + ++ (BOOL)isEqual:(id)obj { + return obj == (id)self; +} + +- (BOOL)isEqual:(id)obj { + return obj == self; +} + + ++ (BOOL)isFault { + return NO; +} + +- (BOOL)isFault { + return NO; +} + ++ (BOOL)isProxy { + return NO; +} + +- (BOOL)isProxy { + return NO; +} + + ++ (IMP)instanceMethodForSelector:(SEL)sel { + if (!sel) [self doesNotRecognizeSelector:sel]; + return class_getMethodImplementation(self, sel); +} + ++ (IMP)methodForSelector:(SEL)sel { + if (!sel) [self doesNotRecognizeSelector:sel]; + return object_getMethodImplementation((id)self, sel); +} + +- (IMP)methodForSelector:(SEL)sel { + if (!sel) [self doesNotRecognizeSelector:sel]; + return object_getMethodImplementation(self, sel); +} + ++ (BOOL)resolveClassMethod:(SEL)sel { + return NO; +} + ++ (BOOL)resolveInstanceMethod:(SEL)sel { + return NO; +} + +// Replaced by CF (throws an NSException) ++ (void)doesNotRecognizeSelector:(SEL)sel { + _objc_fatal("+[%s %s]: unrecognized selector sent to instance %p", + class_getName(self), sel_getName(sel), self); +} + +// Replaced by CF (throws an NSException) +- (void)doesNotRecognizeSelector:(SEL)sel { + _objc_fatal("-[%s %s]: unrecognized selector sent to instance %p", + object_getClassName(self), sel_getName(sel), self); +} + + ++ (id)performSelector:(SEL)sel { + if (!sel) [self doesNotRecognizeSelector:sel]; + return ((id(*)(id, SEL))objc_msgSend)((id)self, sel); +} + ++ (id)performSelector:(SEL)sel withObject:(id)obj { + if (!sel) [self doesNotRecognizeSelector:sel]; + return ((id(*)(id, SEL, id))objc_msgSend)((id)self, sel, obj); +} + ++ (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 { + if (!sel) [self doesNotRecognizeSelector:sel]; + return ((id(*)(id, SEL, id, id))objc_msgSend)((id)self, sel, obj1, obj2); +} + +- (id)performSelector:(SEL)sel { + if (!sel) [self doesNotRecognizeSelector:sel]; + return ((id(*)(id, SEL))objc_msgSend)(self, sel); +} + +- (id)performSelector:(SEL)sel withObject:(id)obj { + if (!sel) [self doesNotRecognizeSelector:sel]; + return ((id(*)(id, SEL, id))objc_msgSend)(self, sel, obj); +} + +- (id)performSelector:(SEL)sel withObject:(id)obj1 withObject:(id)obj2 { + if (!sel) [self doesNotRecognizeSelector:sel]; + return ((id(*)(id, SEL, id, id))objc_msgSend)(self, sel, obj1, obj2); +} + + +// Replaced by CF (returns an NSMethodSignature) ++ (NSMethodSignature *)instanceMethodSignatureForSelector:(SEL)sel { + _objc_fatal("+[NSObject instanceMethodSignatureForSelector:] " + "not available without CoreFoundation"); +} + +// Replaced by CF (returns an NSMethodSignature) ++ (NSMethodSignature *)methodSignatureForSelector:(SEL)sel { + _objc_fatal("+[NSObject methodSignatureForSelector:] " + "not available without CoreFoundation"); +} + +// Replaced by CF (returns an NSMethodSignature) +- (NSMethodSignature *)methodSignatureForSelector:(SEL)sel { + _objc_fatal("-[NSObject methodSignatureForSelector:] " + "not available without CoreFoundation"); +} + ++ (void)forwardInvocation:(NSInvocation *)invocation { + [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)]; +} + +- (void)forwardInvocation:(NSInvocation *)invocation { + [self doesNotRecognizeSelector:(invocation ? [invocation selector] : 0)]; +} + ++ (id)forwardingTargetForSelector:(SEL)sel { + return nil; +} + +- (id)forwardingTargetForSelector:(SEL)sel { + return nil; +} + + +// Replaced by CF (returns an NSString) ++ (NSString *)description { + return nil; +} + +// Replaced by CF (returns an NSString) +- (NSString *)description { + return nil; +} + ++ (NSString *)debugDescription { + return [self description]; +} + +- (NSString *)debugDescription { + return [self description]; +} + + ++ (id)new { + return [callAlloc(self, false/*checkNil*/) init]; +} + ++ (id)retain { + return (id)self; +} + +// Replaced by ObjectAlloc +- (id)retain { + return ((id)self)->rootRetain(); +} + + ++ (BOOL)_tryRetain { + return YES; +} + +// Replaced by ObjectAlloc +- (BOOL)_tryRetain { + return ((id)self)->rootTryRetain(); +} + ++ (BOOL)_isDeallocating { + return NO; +} + +- (BOOL)_isDeallocating { + return ((id)self)->rootIsDeallocating(); +} + ++ (BOOL)allowsWeakReference { + return YES; +} + ++ (BOOL)retainWeakReference { + return YES; +} + +- (BOOL)allowsWeakReference { + return ! [self _isDeallocating]; +} + +- (BOOL)retainWeakReference { + return [self _tryRetain]; +} + ++ (oneway void)release { +} + +// Replaced by ObjectAlloc +- (oneway void)release { + ((id)self)->rootRelease(); +} + ++ (id)autorelease { + return (id)self; +} + +// Replaced by ObjectAlloc +- (id)autorelease { + return ((id)self)->rootAutorelease(); +} + ++ (NSUInteger)retainCount { + return ULONG_MAX; +} + +- (NSUInteger)retainCount { + return ((id)self)->rootRetainCount(); +} + ++ (id)alloc { + return _objc_rootAlloc(self); +} + +// Replaced by ObjectAlloc ++ (id)allocWithZone:(struct _NSZone *)zone { + return _objc_rootAllocWithZone(self, (malloc_zone_t *)zone); +} + +// Replaced by CF (throws an NSException) ++ (id)init { + return (id)self; +} + +- (id)init { + return _objc_rootInit(self); +} + +// Replaced by CF (throws an NSException) ++ (void)dealloc { +} + + +// Replaced by NSZombies +- (void)dealloc { + _objc_rootDealloc(self); +} + +// Previously used by GC. Now a placeholder for binary compatibility. +- (void) finalize { +} + ++ (struct _NSZone *)zone { + return (struct _NSZone *)_objc_rootZone(self); +} + +- (struct _NSZone *)zone { + return (struct _NSZone *)_objc_rootZone(self); +} + ++ (id)copy { + return (id)self; +} + ++ (id)copyWithZone:(struct _NSZone *)zone { + return (id)self; +} + +- (id)copy { + return [(id)self copyWithZone:nil]; +} + ++ (id)mutableCopy { + return (id)self; +} + ++ (id)mutableCopyWithZone:(struct _NSZone *)zone { + return (id)self; +} + +- (id)mutableCopy { + return [(id)self mutableCopyWithZone:nil]; +} + +@end + + diff --git a/runtime/Object.h b/runtime/Object.h new file mode 100644 index 0000000..5c857ac --- /dev/null +++ b/runtime/Object.h @@ -0,0 +1,167 @@ +/* + * Copyright (c) 1999-2003, 2005-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Object.h + Copyright 1988-1996 NeXT Software, Inc. + + DEFINED AS: A common class + HEADER FILES: + +*/ + +#ifndef _OBJC_OBJECT_H_ +#define _OBJC_OBJECT_H_ + +#include +#include + +#if __OBJC__ && !__OBJC2__ + +__OSX_AVAILABLE(10.0) +__IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +OBJC_ROOT_CLASS +@interface Object +{ + Class isa; /* A pointer to the instance's class structure */ +} + +/* Initializing classes and instances */ + ++ (id)initialize; +- (id)init; + +/* Creating, copying, and freeing instances */ + ++ (id)new; ++ (id)free; +- (id)free; ++ (id)alloc; +- (id)copy; ++ (id)allocFromZone:(void *)zone; +- (id)copyFromZone:(void *)zone; +- (void *)zone; + +/* Identifying classes */ + ++ (id)class; ++ (id)superclass; ++ (const char *) name; +- (id)class; +- (id)superclass; +- (const char *) name; + +/* Identifying and comparing instances */ + +- (id)self; +- (unsigned int) hash; +- (BOOL) isEqual:anObject; + +/* Testing inheritance relationships */ + +- (BOOL) isKindOf: aClassObject; +- (BOOL) isMemberOf: aClassObject; +- (BOOL) isKindOfClassNamed: (const char *)aClassName; +- (BOOL) isMemberOfClassNamed: (const char *)aClassName; + +/* Testing class functionality */ + ++ (BOOL) instancesRespondTo:(SEL)aSelector; +- (BOOL) respondsTo:(SEL)aSelector; + +/* Testing protocol conformance */ + +- (BOOL) conformsTo: (Protocol *)aProtocolObject; ++ (BOOL) conformsTo: (Protocol *)aProtocolObject; + +/* Obtaining method descriptors from protocols */ + +- (struct objc_method_description *) descriptionForMethod:(SEL)aSel; ++ (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSel; + +/* Obtaining method handles */ + +- (IMP) methodFor:(SEL)aSelector; ++ (IMP) instanceMethodFor:(SEL)aSelector; + +/* Sending messages determined at run time */ + +- (id)perform:(SEL)aSelector; +- (id)perform:(SEL)aSelector with:anObject; +- (id)perform:(SEL)aSelector with:object1 with:object2; + +/* Posing */ + ++ (id)poseAs: aClassObject; + +/* Enforcing intentions */ + +- (id)subclassResponsibility:(SEL)aSelector; +- (id)notImplemented:(SEL)aSelector; + +/* Error handling */ + +- (id)doesNotRecognize:(SEL)aSelector; +- (id)error:(const char *)aString, ...; + +/* Debugging */ + +- (void) printForDebugger:(void *)stream; + +/* Archiving */ + +- (id)awake; +- (id)write:(void *)stream; +- (id)read:(void *)stream; ++ (int) version; ++ (id)setVersion: (int) aVersion; + +/* Forwarding */ + +- (id)forward: (SEL)sel : (marg_list)args; +- (id)performv: (SEL)sel : (marg_list)args; + +@end + +/* Abstract Protocol for Archiving */ + +@interface Object (Archiving) + +- (id)startArchiving: (void *)stream; +- (id)finishUnarchiving; + +@end + +/* Abstract Protocol for Dynamic Loading */ + +@interface Object (DynamicLoading) + +//+ finishLoading:(headerType *)header; +struct mach_header; ++ (id)finishLoading:(struct mach_header *)header; ++ (id)startUnloading; + +@end + +#endif + +#endif /* _OBJC_OBJECT_H_ */ diff --git a/runtime/Object.mm b/runtime/Object.mm new file mode 100644 index 0000000..3ec14be --- /dev/null +++ b/runtime/Object.mm @@ -0,0 +1,558 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Object.m + Copyright 1988-1996 NeXT Software, Inc. +*/ + +#include "objc-private.h" + +#undef id +#undef Class + +typedef struct objc_class *Class; +typedef struct objc_object *id; + +#if __OBJC2__ + +__OSX_AVAILABLE(10.0) +__IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +OBJC_ROOT_CLASS +@interface Object { + Class isa; +} +@end + +@implementation Object + ++ (id)initialize +{ + return self; +} + ++ (id)class +{ + return self; +} + +-(id) retain +{ + return _objc_rootRetain(self); +} + +-(void) release +{ + _objc_rootRelease(self); +} + +-(id) autorelease +{ + return _objc_rootAutorelease(self); +} + ++(id) retain +{ + return self; +} + ++(void) release +{ +} + ++(id) autorelease +{ + return self; +} + + +@end + + +// __OBJC2__ +#else +// not __OBJC2__ + +#include +#include +#include +#include + +#include "Object.h" +#include "Protocol.h" +#include "objc-runtime.h" + + +// Error Messages +static const char + _errShouldHaveImp[] = "should have implemented the '%s' method.", + _errShouldNotImp[] = "should NOT have implemented the '%s' method.", + _errLeftUndone[] = "method '%s' not implemented", + _errBadSel[] = "method %s given invalid selector %s", + _errDoesntRecognize[] = "does not recognize selector %c%s"; + + +@implementation Object + + ++ (id)initialize +{ + return self; +} + +- (id)awake +{ + return self; +} + ++ (id)poseAs: aFactory +{ + return class_poseAs(self, aFactory); +} + ++ (id)new +{ + id newObject = (*_alloc)((Class)self, 0); + Class metaClass = self->ISA(); + if (class_getVersion(metaClass) > 1) + return [newObject init]; + else + return newObject; +} + ++ (id)alloc +{ + return (*_zoneAlloc)((Class)self, 0, malloc_default_zone()); +} + ++ (id)allocFromZone:(void *) z +{ + return (*_zoneAlloc)((Class)self, 0, z); +} + +- (id)init +{ + return self; +} + +- (const char *)name +{ + return class_getName(isa); +} + ++ (const char *)name +{ + return class_getName((Class)self); +} + +- (unsigned)hash +{ + return (unsigned)(((uintptr_t)self) >> 2); +} + +- (BOOL)isEqual:anObject +{ + return anObject == self; +} + +- (id)free +{ + return (*_dealloc)(self); +} + ++ (id)free +{ + return nil; +} + +- (id)self +{ + return self; +} + + +-(id)class +{ + return (id)isa; +} + ++ (id)class +{ + return self; +} + +- (void *)zone +{ + void *z = malloc_zone_from_ptr(self); + return z ? z : malloc_default_zone(); +} + ++ (id)superclass +{ + return self->superclass; +} + +- (id)superclass +{ + return isa->superclass; +} + ++ (int) version +{ + return class_getVersion((Class)self); +} + ++ (id)setVersion: (int) aVersion +{ + class_setVersion((Class)self, aVersion); + return self; +} + +- (BOOL)isKindOf:aClass +{ + Class cls; + for (cls = isa; cls; cls = cls->superclass) + if (cls == (Class)aClass) + return YES; + return NO; +} + +- (BOOL)isMemberOf:aClass +{ + return isa == (Class)aClass; +} + +- (BOOL)isKindOfClassNamed:(const char *)aClassName +{ + Class cls; + for (cls = isa; cls; cls = cls->superclass) + if (strcmp(aClassName, class_getName(cls)) == 0) + return YES; + return NO; +} + +- (BOOL)isMemberOfClassNamed:(const char *)aClassName +{ + return strcmp(aClassName, class_getName(isa)) == 0; +} + ++ (BOOL)instancesRespondTo:(SEL)aSelector +{ + return class_respondsToMethod((Class)self, aSelector); +} + +- (BOOL)respondsTo:(SEL)aSelector +{ + return class_respondsToMethod(isa, aSelector); +} + +- (id)copy +{ + return [self copyFromZone: [self zone]]; +} + +- (id)copyFromZone:(void *)z +{ + return (*_zoneCopy)(self, 0, z); +} + +- (IMP)methodFor:(SEL)aSelector +{ + return class_lookupMethod(isa, aSelector); +} + ++ (IMP)instanceMethodFor:(SEL)aSelector +{ + return class_lookupMethod(self, aSelector); +} + +- (id)perform:(SEL)aSelector +{ + if (aSelector) + return ((id(*)(id, SEL))objc_msgSend)(self, aSelector); + else + return [self error:_errBadSel, sel_getName(_cmd), aSelector]; +} + +- (id)perform:(SEL)aSelector with:anObject +{ + if (aSelector) + return ((id(*)(id, SEL, id))objc_msgSend)(self, aSelector, anObject); + else + return [self error:_errBadSel, sel_getName(_cmd), aSelector]; +} + +- (id)perform:(SEL)aSelector with:obj1 with:obj2 +{ + if (aSelector) + return ((id(*)(id, SEL, id, id))objc_msgSend)(self, aSelector, obj1, obj2); + else + return [self error:_errBadSel, sel_getName(_cmd), aSelector]; +} + +- (id)subclassResponsibility:(SEL)aSelector +{ + return [self error:_errShouldHaveImp, sel_getName(aSelector)]; +} + +- (id)notImplemented:(SEL)aSelector +{ + return [self error:_errLeftUndone, sel_getName(aSelector)]; +} + +- (id)doesNotRecognize:(SEL)aMessage +{ + return [self error:_errDoesntRecognize, + class_isMetaClass(isa) ? '+' : '-', sel_getName(aMessage)]; +} + +- (id)error:(const char *)aCStr, ... +{ + va_list ap; + va_start(ap,aCStr); + (*_error)(self, aCStr, ap); + _objc_error (self, aCStr, ap); /* In case (*_error)() returns. */ + va_end(ap); + return nil; +} + +- (void) printForDebugger:(void *)stream +{ +} + +- (id)write:(void *) stream +{ + return self; +} + +- (id)read:(void *) stream +{ + return self; +} + +- (id)forward: (SEL) sel : (marg_list) args +{ + return [self doesNotRecognize: sel]; +} + +/* this method is not part of the published API */ + +- (unsigned)methodArgSize:(SEL)sel +{ + Method method = class_getInstanceMethod((Class)isa, sel); + if (! method) return 0; + return method_getSizeOfArguments(method); +} + +- (id)performv: (SEL) sel : (marg_list) args +{ + unsigned size; + + // Messages to nil object always return nil + if (! self) return nil; + + // Calculate size of the marg_list from the method's + // signature. This looks for the method in self + // and its superclasses. + size = [self methodArgSize: sel]; + + // If neither self nor its superclasses implement + // it, forward the message because self might know + // someone who does. This is a "chained" forward... + if (! size) return [self forward: sel: args]; + + // Message self with the specified selector and arguments + return objc_msgSendv (self, sel, size, args); +} + +/* Testing protocol conformance */ + +- (BOOL) conformsTo: (Protocol *)aProtocolObj +{ + return [(id)isa conformsTo:aProtocolObj]; +} + ++ (BOOL) conformsTo: (Protocol *)aProtocolObj +{ + Class cls; + for (cls = self; cls; cls = cls->superclass) + { + if (class_conformsToProtocol(cls, aProtocolObj)) return YES; + } + return NO; +} + + +/* Looking up information for a method */ + +- (struct objc_method_description *) descriptionForMethod:(SEL)aSelector +{ + Class cls; + struct objc_method_description *m; + + /* Look in the protocols first. */ + for (cls = isa; cls; cls = cls->superclass) + { + if (cls->ISA()->version >= 3) + { + struct objc_protocol_list *protocols = + (struct objc_protocol_list *)cls->protocols; + + while (protocols) + { + int i; + + for (i = 0; i < protocols->count; i++) + { + Protocol *p = protocols->list[i]; + + if (class_isMetaClass(cls)) + m = [p descriptionForClassMethod:aSelector]; + else + m = [p descriptionForInstanceMethod:aSelector]; + + if (m) { + return m; + } + } + + if (cls->ISA()->version <= 4) + break; + + protocols = protocols->next; + } + } + } + + /* Then try the class implementations. */ + for (cls = isa; cls; cls = cls->superclass) { + void *iterator = 0; + int i; + struct objc_method_list *mlist; + while ( (mlist = class_nextMethodList( cls, &iterator )) ) { + for (i = 0; i < mlist->method_count; i++) + if (mlist->method_list[i].method_name == aSelector) { + m = (struct objc_method_description *)&mlist->method_list[i]; + return m; + } + } + } + return 0; +} + ++ (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSelector +{ + Class cls; + + /* Look in the protocols first. */ + for (cls = self; cls; cls = cls->superclass) + { + if (cls->ISA()->version >= 3) + { + struct objc_protocol_list *protocols = + (struct objc_protocol_list *)cls->protocols; + + while (protocols) + { + int i; + + for (i = 0; i < protocols->count; i++) + { + Protocol *p = protocols->list[i]; + struct objc_method_description *m; + + if ((m = [p descriptionForInstanceMethod:aSelector])) + return m; + } + + if (cls->ISA()->version <= 4) + break; + + protocols = protocols->next; + } + } + } + + /* Then try the class implementations. */ + for (cls = self; cls; cls = cls->superclass) { + void *iterator = 0; + int i; + struct objc_method_list *mlist; + while ( (mlist = class_nextMethodList( cls, &iterator )) ) { + for (i = 0; i < mlist->method_count; i++) + if (mlist->method_list[i].method_name == aSelector) { + struct objc_method_description *m; + m = (struct objc_method_description *)&mlist->method_list[i]; + return m; + } + } + } + return 0; +} + + +/* Obsolete methods (for binary compatibility only). */ + ++ (id)superClass +{ + return [self superclass]; +} + +- (id)superClass +{ + return [self superclass]; +} + +- (BOOL)isKindOfGivenName:(const char *)aClassName +{ + return [self isKindOfClassNamed: aClassName]; +} + +- (BOOL)isMemberOfGivenName:(const char *)aClassName +{ + return [self isMemberOfClassNamed: aClassName]; +} + +- (struct objc_method_description *) methodDescFor:(SEL)aSelector +{ + return [self descriptionForMethod: aSelector]; +} + ++ (struct objc_method_description *) instanceMethodDescFor:(SEL)aSelector +{ + return [self descriptionForInstanceMethod: aSelector]; +} + +- (id)findClass:(const char *)aClassName +{ + return objc_lookUpClass(aClassName); +} + +- (id)shouldNotImplement:(SEL)aSelector +{ + return [self error:_errShouldNotImp, sel_getName(aSelector)]; +} + + +@end + +#endif diff --git a/runtime/OldClasses.subproj/List.h b/runtime/OldClasses.subproj/List.h new file mode 100644 index 0000000..d1f7ff6 --- /dev/null +++ b/runtime/OldClasses.subproj/List.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 1999-2002, 2005-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + List.h + Copyright 1988-1996 NeXT Software, Inc. + + DEFINED AS: A common class + HEADER FILES: objc/List.h + +*/ + +#ifndef _OBJC_LIST_H_ +#define _OBJC_LIST_H_ + +#if __OBJC__ && !__OBJC2__ && !__cplusplus && !__has_feature(objc_arc) + +#include +#include + +DEPRECATED_ATTRIBUTE +@interface List : Object +{ +@public + id *dataPtr DEPRECATED_ATTRIBUTE; /* data of the List object */ + unsigned numElements DEPRECATED_ATTRIBUTE; /* Actual number of elements */ + unsigned maxElements DEPRECATED_ATTRIBUTE; /* Total allocated elements */ +} + +/* Creating, freeing */ + +- (id)free DEPRECATED_ATTRIBUTE; +- (id)freeObjects DEPRECATED_ATTRIBUTE; +- (id)copyFromZone:(void *)z DEPRECATED_ATTRIBUTE; + +/* Initializing */ + +- (id)init DEPRECATED_ATTRIBUTE; +- (id)initCount:(unsigned)numSlots DEPRECATED_ATTRIBUTE; + +/* Comparing two lists */ + +- (BOOL)isEqual: anObject DEPRECATED_ATTRIBUTE; + +/* Managing the storage capacity */ + +- (unsigned)capacity DEPRECATED_ATTRIBUTE; +- (id)setAvailableCapacity:(unsigned)numSlots DEPRECATED_ATTRIBUTE; + +/* Manipulating objects by index */ + +- (unsigned)count DEPRECATED_ATTRIBUTE; +- (id)objectAt:(unsigned)index DEPRECATED_ATTRIBUTE; +- (id)lastObject DEPRECATED_ATTRIBUTE; +- (id)addObject:anObject DEPRECATED_ATTRIBUTE; +- (id)insertObject:anObject at:(unsigned)index DEPRECATED_ATTRIBUTE; +- (id)removeObjectAt:(unsigned)index DEPRECATED_ATTRIBUTE; +- (id)removeLastObject DEPRECATED_ATTRIBUTE; +- (id)replaceObjectAt:(unsigned)index with:newObject DEPRECATED_ATTRIBUTE; +- (id)appendList: (List *)otherList DEPRECATED_ATTRIBUTE; + +/* Manipulating objects by id */ + +- (unsigned)indexOf:anObject DEPRECATED_ATTRIBUTE; +- (id)addObjectIfAbsent:anObject DEPRECATED_ATTRIBUTE; +- (id)removeObject:anObject DEPRECATED_ATTRIBUTE; +- (id)replaceObject:anObject with:newObject DEPRECATED_ATTRIBUTE; + +/* Emptying the list */ + +- (id)empty DEPRECATED_ATTRIBUTE; + +/* Sending messages to elements of the list */ + +- (id)makeObjectsPerform:(SEL)aSelector DEPRECATED_ATTRIBUTE; +- (id)makeObjectsPerform:(SEL)aSelector with:anObject DEPRECATED_ATTRIBUTE; + +/* + * The following new... methods are now obsolete. They remain in this + * interface file for backward compatibility only. Use Object's alloc method + * and the init... methods defined in this class instead. + */ + ++ (id)new DEPRECATED_ATTRIBUTE; ++ (id)newCount:(unsigned)numSlots DEPRECATED_ATTRIBUTE; + +@end + +typedef struct { + @defs(List); +} NXListId DEPRECATED_ATTRIBUTE; + +#define NX_ADDRESS(x) (((NXListId *)(x))->dataPtr) + +#define NX_NOT_IN_LIST 0xffffffff + +#endif + +#endif /* _OBJC_LIST_H_ */ diff --git a/runtime/OldClasses.subproj/List.m b/runtime/OldClasses.subproj/List.m new file mode 100644 index 0000000..a46849c --- /dev/null +++ b/runtime/OldClasses.subproj/List.m @@ -0,0 +1,294 @@ +/* + * Copyright (c) 1999-2001, 2005-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + List.m + Copyright 1988-1996 NeXT Software, Inc. + Written by: Bryan Yamamoto + Responsibility: Bertrand Serlet +*/ + +#ifndef __OBJC2__ + +#include +#include +#include + +#include + +#define DATASIZE(count) ((count) * sizeof(id)) + +@implementation List + ++ (id)initialize +{ + [self setVersion: 1]; + return self; +} + +- (id)initCount:(unsigned)numSlots +{ + maxElements = numSlots; + if (maxElements) + dataPtr = (id *)malloc(DATASIZE(maxElements)); + return self; +} + ++ (id)newCount:(unsigned)numSlots +{ + return [[self alloc] initCount:numSlots]; +} + ++ (id)new +{ + return [self newCount:0]; +} + +- (id)init +{ + return [self initCount:0]; +} + +- (id)free +{ + free(dataPtr); + return [super free]; +} + +- (id)freeObjects +{ + id element; + while ((element = [self removeLastObject])) + [element free]; + return self; +} + +- (id)copyFromZone:(void *)z +{ + List *new = [[[self class] alloc] initCount: numElements]; + new->numElements = numElements; + bcopy ((const char*)dataPtr, (char*)new->dataPtr, DATASIZE(numElements)); + return new; +} + +- (BOOL) isEqual: anObject +{ + List *other; + if (! [anObject isKindOf: [self class]]) return NO; + other = (List *) anObject; + return (numElements == other->numElements) + && (bcmp ((const char*)dataPtr, (const char*)other->dataPtr, DATASIZE(numElements)) == 0); +} + +- (unsigned)capacity +{ + return maxElements; +} + +- (unsigned)count +{ + return numElements; +} + +- (id)objectAt:(unsigned)index +{ + if (index >= numElements) + return nil; + return dataPtr[index]; +} + +- (unsigned)indexOf:anObject +{ + register id *this = dataPtr; + register id *last = this + numElements; + while (this < last) { + if (*this == anObject) + return this - dataPtr; + this++; + } + return NX_NOT_IN_LIST; +} + +- (id)lastObject +{ + if (! numElements) + return nil; + return dataPtr[numElements - 1]; +} + +- (id)setAvailableCapacity:(unsigned)numSlots +{ + volatile id *tempDataPtr; + if (numSlots < numElements) return nil; + tempDataPtr = (id *) realloc (dataPtr, DATASIZE(numSlots)); + dataPtr = (id *)tempDataPtr; + maxElements = numSlots; + return self; +} + +- (id)insertObject:anObject at:(unsigned)index +{ + register id *this, *last, *prev; + if (! anObject) return nil; + if (index > numElements) + return nil; + if ((numElements + 1) > maxElements) { + volatile id *tempDataPtr; + /* we double the capacity, also a good size for malloc */ + maxElements += maxElements + 1; + tempDataPtr = (id *) realloc (dataPtr, DATASIZE(maxElements)); + dataPtr = (id*)tempDataPtr; + } + this = dataPtr + numElements; + prev = this - 1; + last = dataPtr + index; + while (this > last) + *this-- = *prev--; + *last = anObject; + numElements++; + return self; +} + +- (id)addObject:anObject +{ + return [self insertObject:anObject at:numElements]; + +} + + +- (id)addObjectIfAbsent:anObject +{ + register id *this, *last; + if (! anObject) return nil; + this = dataPtr; + last = dataPtr + numElements; + while (this < last) { + if (*this == anObject) + return self; + this++; + } + return [self insertObject:anObject at:numElements]; + +} + + +- (id)removeObjectAt:(unsigned)index +{ + register id *this, *last, *next; + id retval; + if (index >= numElements) + return nil; + this = dataPtr + index; + last = dataPtr + numElements; + next = this + 1; + retval = *this; + while (next < last) + *this++ = *next++; + numElements--; + return retval; +} + +- (id)removeObject:anObject +{ + register id *this, *last; + this = dataPtr; + last = dataPtr + numElements; + while (this < last) { + if (*this == anObject) + return [self removeObjectAt:this - dataPtr]; + this++; + } + return nil; +} + +- (id)removeLastObject +{ + if (! numElements) + return nil; + return [self removeObjectAt: numElements - 1]; +} + +- (id)empty +{ + numElements = 0; + return self; +} + +- (id)replaceObject:anObject with:newObject +{ + register id *this, *last; + if (! newObject) + return nil; + this = dataPtr; + last = dataPtr + numElements; + while (this < last) { + if (*this == anObject) { + *this = newObject; + return anObject; + } + this++; + } + return nil; +} + +- (id)replaceObjectAt:(unsigned)index with:newObject +{ + register id *this; + id retval; + if (! newObject) + return nil; + if (index >= numElements) + return nil; + this = dataPtr + index; + retval = *this; + *this = newObject; + return retval; +} + +- (id)makeObjectsPerform:(SEL)aSelector +{ + unsigned count = numElements; + while (count--) + [dataPtr[count] perform: aSelector]; + return self; +} + +- (id)makeObjectsPerform:(SEL)aSelector with:anObject +{ + unsigned count = numElements; + while (count--) + [dataPtr[count] perform: aSelector with: anObject]; + return self; +} + +-(id)appendList: (List *)otherList +{ + unsigned i, count; + + for (i = 0, count = [otherList count]; i < count; i++) + [self addObject: [otherList objectAt: i]]; + return self; +} + +@end + +#endif diff --git a/runtime/Protocol.h b/runtime/Protocol.h new file mode 100644 index 0000000..1f2a7b5 --- /dev/null +++ b/runtime/Protocol.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 1999-2003, 2006-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Protocol.h + Copyright 1991-1996 NeXT Software, Inc. +*/ + +#ifndef _OBJC_PROTOCOL_H_ +#define _OBJC_PROTOCOL_H_ + +#if !__OBJC__ + +// typedef Protocol is here: +#include + + +#elif __OBJC2__ + +#include + +// All methods of class Protocol are unavailable. +// Use the functions in objc/runtime.h instead. + +OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) +@interface Protocol : NSObject +@end + + +#else + +#include + +OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) +@interface Protocol : Object +{ +@private + char *protocol_name OBJC2_UNAVAILABLE; + struct objc_protocol_list *protocol_list OBJC2_UNAVAILABLE; + struct objc_method_description_list *instance_methods OBJC2_UNAVAILABLE; + struct objc_method_description_list *class_methods OBJC2_UNAVAILABLE; +} + +/* Obtaining attributes intrinsic to the protocol */ + +- (const char *)name OBJC2_UNAVAILABLE; + +/* Testing protocol conformance */ + +- (BOOL) conformsTo: (Protocol *)aProtocolObject OBJC2_UNAVAILABLE; + +/* Looking up information specific to a protocol */ + +- (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSel + __OSX_DEPRECATED(10.0, 10.5, "use protocol_getMethodDescription instead") + __IOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead") + __TVOS_DEPRECATED(9.0, 9.0, "use protocol_getMethodDescription instead") + __WATCHOS_DEPRECATED(1.0, 1.0, "use protocol_getMethodDescription instead"); +- (struct objc_method_description *) descriptionForClassMethod:(SEL)aSel + __OSX_DEPRECATED(10.0, 10.5, "use protocol_getMethodDescription instead") + __IOS_DEPRECATED(2.0, 2.0, "use protocol_getMethodDescription instead") + __TVOS_DEPRECATED(9.0, 9.0, "use protocol_getMethodDescription instead") + __WATCHOS_DEPRECATED(1.0, 1.0, "use protocol_getMethodDescription instead"); + +@end + +#endif + +#endif /* _OBJC_PROTOCOL_H_ */ diff --git a/runtime/Protocol.mm b/runtime/Protocol.mm new file mode 100644 index 0000000..2a2b3da --- /dev/null +++ b/runtime/Protocol.mm @@ -0,0 +1,128 @@ +/* + * Copyright (c) 1999-2001, 2005-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Protocol.h + Copyright 1991-1996 NeXT Software, Inc. +*/ + +#include "objc-private.h" + +#undef id +#undef Class + +#include +#include +#include +#include + +#include "Protocol.h" +#include "NSObject.h" + +// __IncompleteProtocol is used as the return type of objc_allocateProtocol(). + +// Old ABI uses NSObject as the superclass even though Protocol uses Object +// because the R/R implementation for class Protocol is added at runtime +// by CF, so __IncompleteProtocol would be left without an R/R implementation +// otherwise, which would break ARC. + +@interface __IncompleteProtocol : NSObject @end +@implementation __IncompleteProtocol +#if __OBJC2__ +// fixme hack - make __IncompleteProtocol a non-lazy class ++ (void) load { } +#endif +@end + + +@implementation Protocol + +#if __OBJC2__ +// fixme hack - make Protocol a non-lazy class ++ (void) load { } +#endif + + +- (BOOL) conformsTo: (Protocol *)aProtocolObj +{ + return protocol_conformsToProtocol(self, aProtocolObj); +} + +- (struct objc_method_description *) descriptionForInstanceMethod:(SEL)aSel +{ +#if !__OBJC2__ + return lookup_protocol_method((struct old_protocol *)self, aSel, + YES/*required*/, YES/*instance*/, + YES/*recursive*/); +#else + return method_getDescription(protocol_getMethod((struct protocol_t *)self, + aSel, YES, YES, YES)); +#endif +} + +- (struct objc_method_description *) descriptionForClassMethod:(SEL)aSel +{ +#if !__OBJC2__ + return lookup_protocol_method((struct old_protocol *)self, aSel, + YES/*required*/, NO/*instance*/, + YES/*recursive*/); +#else + return method_getDescription(protocol_getMethod((struct protocol_t *)self, + aSel, YES, NO, YES)); +#endif +} + +- (const char *)name +{ + return protocol_getName(self); +} + +- (BOOL)isEqual:other +{ +#if __OBJC2__ + // check isKindOf: + Class cls; + Class protoClass = objc_getClass("Protocol"); + for (cls = object_getClass(other); cls; cls = cls->superclass) { + if (cls == protoClass) break; + } + if (!cls) return NO; + // check equality + return protocol_isEqual(self, other); +#else + return [other isKindOf:[Protocol class]] && [self conformsTo: other] && [other conformsTo: self]; +#endif +} + +#if __OBJC2__ +- (NSUInteger)hash +{ + return 23; +} +#else +- (unsigned)hash +{ + return 23; +} +#endif + +@end diff --git a/runtime/a1a2-blocktramps-arm.s b/runtime/a1a2-blocktramps-arm.s new file mode 100644 index 0000000..9e54078 --- /dev/null +++ b/runtime/a1a2-blocktramps-arm.s @@ -0,0 +1,148 @@ +#if __arm__ + +#include +#include + +.syntax unified + +.text + + .private_extern __a1a2_tramphead + .private_extern __a1a2_firsttramp + .private_extern __a1a2_trampend + +// Trampoline machinery assumes the trampolines are Thumb function pointers +#if !__thumb2__ +# error sorry +#endif + +.thumb +.thumb_func __a1a2_tramphead +.thumb_func __a1a2_firsttramp +.thumb_func __a1a2_trampend + +.align PAGE_MAX_SHIFT +__a1a2_tramphead: + /* + r0 == self + r12 == pc of trampoline's first instruction + PC bias + lr == original return address + */ + + mov r1, r0 // _cmd = self + + // Trampoline's data is one page before the trampoline text. + // Also correct PC bias of 4 bytes. + sub r12, #PAGE_MAX_SIZE + ldr r0, [r12, #-4] // self = block object + ldr pc, [r0, #12] // tail call block->invoke + // not reached + + // Align trampolines to 8 bytes +.align 3 + +.macro TrampolineEntry + mov r12, pc + b __a1a2_tramphead +.align 3 +.endmacro + +.macro TrampolineEntryX16 + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry +.endmacro + +.macro TrampolineEntryX256 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 +.endmacro + +.private_extern __a1a2_firsttramp +__a1a2_firsttramp: + // 2048-2 trampolines to fill 16K page + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + // TrampolineEntry + // TrampolineEntry + +.private_extern __a1a2_trampend +__a1a2_trampend: + +#endif diff --git a/runtime/a1a2-blocktramps-arm64.s b/runtime/a1a2-blocktramps-arm64.s new file mode 100644 index 0000000..139df99 --- /dev/null +++ b/runtime/a1a2-blocktramps-arm64.s @@ -0,0 +1,134 @@ +#if __arm64__ + +#include + +.text + + .private_extern __a1a2_tramphead + .private_extern __a1a2_firsttramp + .private_extern __a1a2_trampend + +.align PAGE_MAX_SHIFT +__a1a2_tramphead: +L_a1a2_tramphead: + /* + x0 == self + x17 == address of called trampoline's data (1 page before its code) + lr == original return address + */ + + mov x1, x0 // _cmd = self + ldr x0, [x17] // self = block object + ldr x16, [x0, #16] // tail call block->invoke + br x16 + + // pad up to TrampolineBlockPagePair header size + nop + nop + +.macro TrampolineEntry + // load address of trampoline data (one page before this instruction) + adr x17, -PAGE_MAX_SIZE + b L_a1a2_tramphead +.endmacro + +.macro TrampolineEntryX16 + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry +.endmacro + +.macro TrampolineEntryX256 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 +.endmacro + +.align 3 +.private_extern __a1a2_firsttramp +__a1a2_firsttramp: + // 2048-3 trampolines to fill 16K page + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + // TrampolineEntry + // TrampolineEntry + // TrampolineEntry + +.private_extern __a1a2_trampend +__a1a2_trampend: + +#endif diff --git a/runtime/a1a2-blocktramps-i386.s b/runtime/a1a2-blocktramps-i386.s new file mode 100755 index 0000000..e4579c0 --- /dev/null +++ b/runtime/a1a2-blocktramps-i386.s @@ -0,0 +1,566 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifdef __i386__ + +#include + +.text + .private_extern __a1a2_tramphead + .private_extern __a1a2_firsttramp + .private_extern __a1a2_nexttramp + .private_extern __a1a2_trampend + +.align PAGE_SHIFT +__a1a2_tramphead: + popl %eax + andl $0xFFFFFFF8, %eax + subl $ PAGE_SIZE, %eax + movl 4(%esp), %ecx // self -> ecx + movl %ecx, 8(%esp) // ecx -> _cmd + movl (%eax), %ecx // blockPtr -> ecx + movl %ecx, 4(%esp) // ecx -> self + jmp *12(%ecx) // tail to block->invoke + +.macro TrampolineEntry + call __a1a2_tramphead + nop + nop + nop +.endmacro + +.align 5 +__a1a2_firsttramp: + TrampolineEntry +__a1a2_nexttramp: // used to calculate size of each trampoline + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + +__a1a2_trampend: + +#endif diff --git a/runtime/a1a2-blocktramps-x86_64.s b/runtime/a1a2-blocktramps-x86_64.s new file mode 100755 index 0000000..696eb59 --- /dev/null +++ b/runtime/a1a2-blocktramps-x86_64.s @@ -0,0 +1,564 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifdef __x86_64__ + +#include + + .text + .private_extern __a1a2_tramphead + .private_extern __a1a2_firsttramp + .private_extern __a1a2_nexttramp + .private_extern __a1a2_trampend + +.align PAGE_SHIFT +__a1a2_tramphead: + popq %r10 + andq $0xFFFFFFFFFFFFFFF8, %r10 + subq $ PAGE_SIZE, %r10 + movq %rdi, %rsi // arg1 -> arg2 + movq (%r10), %rdi // block -> arg1 + jmp *16(%rdi) + +.macro TrampolineEntry + callq __a1a2_tramphead + nop + nop + nop +.endmacro + +.align 5 +__a1a2_firsttramp: + TrampolineEntry +__a1a2_nexttramp: + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + +__a1a2_trampend: + +#endif diff --git a/runtime/a2a3-blocktramps-arm.s b/runtime/a2a3-blocktramps-arm.s new file mode 100644 index 0000000..ac0ce72 --- /dev/null +++ b/runtime/a2a3-blocktramps-arm.s @@ -0,0 +1,148 @@ +#if __arm__ + +#include +#include + +.syntax unified + +.text + + .private_extern __a2a3_tramphead + .private_extern __a2a3_firsttramp + .private_extern __a2a3_trampend + +// Trampoline machinery assumes the trampolines are Thumb function pointers +#if !__thumb2__ +# error sorry +#endif + +.thumb +.thumb_func __a2a3_tramphead +.thumb_func __a2a3_firsttramp +.thumb_func __a2a3_trampend + +.align PAGE_MAX_SHIFT +__a2a3_tramphead: + /* + r1 == self + r12 == pc of trampoline's first instruction + PC bias + lr == original return address + */ + + mov r2, r1 // _cmd = self + + // Trampoline's data is one page before the trampoline text. + // Also correct PC bias of 4 bytes. + sub r12, #PAGE_MAX_SIZE + ldr r1, [r12, #-4] // self = block object + ldr pc, [r1, #12] // tail call block->invoke + // not reached + + // Align trampolines to 8 bytes +.align 3 + +.macro TrampolineEntry + mov r12, pc + b __a2a3_tramphead +.align 3 +.endmacro + +.macro TrampolineEntryX16 + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry +.endmacro + +.macro TrampolineEntryX256 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 +.endmacro + +.private_extern __a2a3_firsttramp +__a2a3_firsttramp: + // 2048-2 trampolines to fill 16K page + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX256 + TrampolineEntryX256 + TrampolineEntryX256 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntryX16 + TrampolineEntryX16 + TrampolineEntryX16 + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + + TrampolineEntry + TrampolineEntry + // TrampolineEntry + // TrampolineEntry + +.private_extern __a2a3_trampend +__a2a3_trampend: + +#endif diff --git a/runtime/a2a3-blocktramps-i386.s b/runtime/a2a3-blocktramps-i386.s new file mode 100755 index 0000000..d9932f6 --- /dev/null +++ b/runtime/a2a3-blocktramps-i386.s @@ -0,0 +1,566 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifdef __i386__ + +#include + +.text + .private_extern __a2a3_tramphead + .private_extern __a2a3_firsttramp + .private_extern __a2a3_nexttramp + .private_extern __a2a3_trampend + +.align PAGE_SHIFT +__a2a3_tramphead: + popl %eax + andl $0xFFFFFFF8, %eax + subl $ PAGE_SIZE, %eax + movl 8(%esp), %ecx // self -> ecx + movl %ecx, 12(%esp) // ecx -> _cmd + movl (%eax), %ecx // blockPtr -> ecx + movl %ecx, 8(%esp) // ecx -> self + jmp *12(%ecx) // tail to block->invoke + +.macro TrampolineEntry + call __a2a3_tramphead + nop + nop + nop +.endmacro + +.align 5 +__a2a3_firsttramp: + TrampolineEntry +__a2a3_nexttramp: + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + +__a2a3_trampend: + +#endif diff --git a/runtime/a2a3-blocktramps-x86_64.s b/runtime/a2a3-blocktramps-x86_64.s new file mode 100755 index 0000000..4904ac4 --- /dev/null +++ b/runtime/a2a3-blocktramps-x86_64.s @@ -0,0 +1,565 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifdef __x86_64__ + +#include + + .text + .private_extern __a2a3_tramphead + .private_extern __a2a3_firsttramp + .private_extern __a2a3_nexttramp + .private_extern __a2a3_trampend + +.align PAGE_SHIFT +__a2a3_tramphead: + popq %r10 + andq $0xFFFFFFFFFFFFFFF8, %r10 + subq $ PAGE_SIZE, %r10 + // %rdi -- first arg -- is address of return value's space. Don't mess with it. + movq %rsi, %rdx // arg2 -> arg3 + movq (%r10), %rsi // block -> arg2 + jmp *16(%rsi) + +.macro TrampolineEntry + callq __a2a3_tramphead + nop + nop + nop +.endmacro + +.align 5 +__a2a3_firsttramp: + TrampolineEntry +__a2a3_nexttramp: + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + TrampolineEntry + +__a2a3_trampend: + +#endif diff --git a/runtime/hashtable.h b/runtime/hashtable.h new file mode 100644 index 0000000..7f62780 --- /dev/null +++ b/runtime/hashtable.h @@ -0,0 +1,2 @@ +#include + diff --git a/runtime/hashtable2.h b/runtime/hashtable2.h new file mode 100644 index 0000000..197a17f --- /dev/null +++ b/runtime/hashtable2.h @@ -0,0 +1,229 @@ +/* + * Copyright (c) 1999-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + hashtable2.h + Scalable hash table. + Copyright 1989-1996 NeXT Software, Inc. +*/ + +#ifndef _OBJC_LITTLE_HASHTABLE_H_ +#define _OBJC_LITTLE_HASHTABLE_H_ + +#ifndef _OBJC_PRIVATE_H_ +# define OBJC_HASH_AVAILABILITY \ + __OSX_DEPRECATED(10.0, 10.1, "NXHashTable is deprecated") \ + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +#else +# define OBJC_HASH_AVAILABILITY +#endif + +#include +#include +#include + +__BEGIN_DECLS + +/************************************************************************* + * Hash tables of arbitrary data + *************************************************************************/ + +/* This module allows hashing of arbitrary data. Such data must be pointers or integers, and client is responsible for allocating/deallocating this data. A deallocation call-back is provided. +The objective C class HashTable is preferred when dealing with (key, values) associations because it is easier to use in that situation. +As well-behaved scalable data structures, hash tables double in size when they start becoming full, thus guaranteeing both average constant time access and linear size. */ + +typedef struct { + uintptr_t (*hash)(const void *info, const void *data); + int (*isEqual)(const void *info, const void *data1, const void *data2); + void (*free)(const void *info, void *data); + int style; /* reserved for future expansion; currently 0 */ + } NXHashTablePrototype; + +/* the info argument allows a certain generality, such as freeing according to some owner information */ +/* invariants assumed by the implementation: + 1 - data1 = data2 => hash(data1) = hash(data2) + when data varies over time, hash(data) must remain invariant + e.g. if data hashes over a string key, the string must not be changed + 2- isEqual (data1, data2) => data1= data2 + */ + +typedef struct { + const NXHashTablePrototype *prototype OBJC_HASH_AVAILABILITY; + unsigned count OBJC_HASH_AVAILABILITY; + unsigned nbBuckets OBJC_HASH_AVAILABILITY; + void *buckets OBJC_HASH_AVAILABILITY; + const void *info OBJC_HASH_AVAILABILITY; + } NXHashTable OBJC_HASH_AVAILABILITY; + /* private data structure; may change */ + +OBJC_EXPORT NXHashTable *NXCreateHashTableFromZone (NXHashTablePrototype prototype, unsigned capacity, const void *info, void *z) OBJC_HASH_AVAILABILITY; +OBJC_EXPORT NXHashTable *NXCreateHashTable (NXHashTablePrototype prototype, unsigned capacity, const void *info) OBJC_HASH_AVAILABILITY; + /* if hash is 0, pointer hash is assumed */ + /* if isEqual is 0, pointer equality is assumed */ + /* if free is 0, elements are not freed */ + /* capacity is only a hint; 0 creates a small table */ + /* info allows call backs to be very general */ + +OBJC_EXPORT void NXFreeHashTable (NXHashTable *table) OBJC_HASH_AVAILABILITY; + /* calls free for each data, and recovers table */ + +OBJC_EXPORT void NXEmptyHashTable (NXHashTable *table) OBJC_HASH_AVAILABILITY; + /* does not deallocate table nor data; keeps current capacity */ + +OBJC_EXPORT void NXResetHashTable (NXHashTable *table) OBJC_HASH_AVAILABILITY; + /* frees each entry; keeps current capacity */ + +OBJC_EXPORT BOOL NXCompareHashTables (NXHashTable *table1, NXHashTable *table2) OBJC_HASH_AVAILABILITY; + /* Returns YES if the two sets are equal (each member of table1 in table2, and table have same size) */ + +OBJC_EXPORT NXHashTable *NXCopyHashTable (NXHashTable *table) OBJC_HASH_AVAILABILITY; + /* makes a fresh table, copying data pointers, not data itself. */ + +OBJC_EXPORT unsigned NXCountHashTable (NXHashTable *table) OBJC_HASH_AVAILABILITY; + /* current number of data in table */ + +OBJC_EXPORT int NXHashMember (NXHashTable *table, const void *data) OBJC_HASH_AVAILABILITY; + /* returns non-0 iff data is present in table. + Example of use when the hashed data is a struct containing the key, + and when the callee only has a key: + MyStruct pseudo; + pseudo.key = myKey; + return NXHashMember (myTable, &pseudo) + */ + +OBJC_EXPORT void *NXHashGet (NXHashTable *table, const void *data) OBJC_HASH_AVAILABILITY; + /* return original table data or NULL. + Example of use when the hashed data is a struct containing the key, + and when the callee only has a key: + MyStruct pseudo; + MyStruct *original; + pseudo.key = myKey; + original = NXHashGet (myTable, &pseudo) + */ + +OBJC_EXPORT void *NXHashInsert (NXHashTable *table, const void *data) OBJC_HASH_AVAILABILITY; + /* previous data or NULL is returned. */ + +OBJC_EXPORT void *NXHashInsertIfAbsent (NXHashTable *table, const void *data) OBJC_HASH_AVAILABILITY; + /* If data already in table, returns the one in table + else adds argument to table and returns argument. */ + +OBJC_EXPORT void *NXHashRemove (NXHashTable *table, const void *data) OBJC_HASH_AVAILABILITY; + /* previous data or NULL is returned */ + +/* Iteration over all elements of a table consists in setting up an iteration state and then to progress until all entries have been visited. An example of use for counting elements in a table is: + unsigned count = 0; + MyData *data; + NXHashState state = NXInitHashState(table); + while (NXNextHashState(table, &state, &data)) { + count++; + } +*/ + +typedef struct {int i; int j;} NXHashState OBJC_HASH_AVAILABILITY; + /* callers should not rely on actual contents of the struct */ + +OBJC_EXPORT NXHashState NXInitHashState(NXHashTable *table) OBJC_HASH_AVAILABILITY; + +OBJC_EXPORT int NXNextHashState(NXHashTable *table, NXHashState *state, void **data) OBJC_HASH_AVAILABILITY; + /* returns 0 when all elements have been visited */ + +/************************************************************************* + * Conveniences for writing hash, isEqual and free functions + * and common prototypes + *************************************************************************/ + +OBJC_EXPORT uintptr_t NXPtrHash(const void *info, const void *data) OBJC_HASH_AVAILABILITY; + /* scrambles the address bits; info unused */ +OBJC_EXPORT uintptr_t NXStrHash(const void *info, const void *data) OBJC_HASH_AVAILABILITY; + /* string hashing; info unused */ +OBJC_EXPORT int NXPtrIsEqual(const void *info, const void *data1, const void *data2) OBJC_HASH_AVAILABILITY; + /* pointer comparison; info unused */ +OBJC_EXPORT int NXStrIsEqual(const void *info, const void *data1, const void *data2) OBJC_HASH_AVAILABILITY; + /* string comparison; NULL ok; info unused */ +OBJC_EXPORT void NXNoEffectFree(const void *info, void *data) OBJC_HASH_AVAILABILITY; + /* no effect; info unused */ +OBJC_EXPORT void NXReallyFree(const void *info, void *data) OBJC_HASH_AVAILABILITY; + /* frees it; info unused */ + +/* The two following prototypes are useful for manipulating set of pointers or set of strings; For them free is defined as NXNoEffectFree */ +OBJC_EXPORT const NXHashTablePrototype NXPtrPrototype OBJC_HASH_AVAILABILITY; + /* prototype when data is a pointer (void *) */ +OBJC_EXPORT const NXHashTablePrototype NXStrPrototype OBJC_HASH_AVAILABILITY; + /* prototype when data is a string (char *) */ + +/* following prototypes help describe mappings where the key is the first element of a struct and is either a pointer or a string. +For example NXStrStructKeyPrototype can be used to hash pointers to Example, where Example is: + typedef struct { + char *key; + int data1; + ... + } Example + +For the following prototypes, free is defined as NXReallyFree. + */ +OBJC_EXPORT const NXHashTablePrototype NXPtrStructKeyPrototype OBJC_HASH_AVAILABILITY; +OBJC_EXPORT const NXHashTablePrototype NXStrStructKeyPrototype OBJC_HASH_AVAILABILITY; + + +#if !__OBJC2__ && !TARGET_OS_WIN32 + +/************************************************************************* + * Unique strings and buffers + *************************************************************************/ + +/* Unique strings allows C users to enjoy the benefits of Lisp's atoms: +A unique string is a string that is allocated once for all (never de-allocated) and that has only one representant (thus allowing comparison with == instead of strcmp). A unique string should never be modified (and in fact some memory protection is done to ensure that). In order to more explicitly insist on the fact that the string has been uniqued, a synonym of (const char *) has been added, NXAtom. */ + +typedef const char *NXAtom OBJC_HASH_AVAILABILITY; + +OBJC_EXPORT NXAtom NXUniqueString(const char *buffer) OBJC_HASH_AVAILABILITY; + /* assumes that buffer is \0 terminated, and returns + a previously created string or a new string that is a copy of buffer. + If NULL is passed returns NULL. + Returned string should never be modified. To ensure this invariant, + allocations are made in a special read only zone. */ + +OBJC_EXPORT NXAtom NXUniqueStringWithLength(const char *buffer, int length) OBJC_HASH_AVAILABILITY; + /* assumes that buffer is a non NULL buffer of at least + length characters. Returns a previously created string or + a new string that is a copy of buffer. + If buffer contains \0, string will be truncated. + As for NXUniqueString, returned string should never be modified. */ + +OBJC_EXPORT NXAtom NXUniqueStringNoCopy(const char *string) OBJC_HASH_AVAILABILITY; + /* If there is already a unique string equal to string, returns the original. + Otherwise, string is entered in the table, without making a copy. Argument should then never be modified. */ + +OBJC_EXPORT char *NXCopyStringBuffer(const char *buffer) OBJC_HASH_AVAILABILITY; + /* given a buffer, allocates a new string copy of buffer. + Buffer should be \0 terminated; returned string is \0 terminated. */ + +OBJC_EXPORT char *NXCopyStringBufferFromZone(const char *buffer, void *z) OBJC_HASH_AVAILABILITY; + /* given a buffer, allocates a new string copy of buffer. + Buffer should be \0 terminated; returned string is \0 terminated. */ + +#endif + +__END_DECLS + +#endif /* _OBJC_LITTLE_HASHTABLE_H_ */ diff --git a/runtime/hashtable2.mm b/runtime/hashtable2.mm new file mode 100644 index 0000000..238f381 --- /dev/null +++ b/runtime/hashtable2.mm @@ -0,0 +1,646 @@ +/* + * Copyright (c) 1999-2008 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + hashtable2.m + Copyright 1989-1996 NeXT Software, Inc. + Created by Bertrand Serlet, Feb 89 + */ + +#include "objc-private.h" +#include "hashtable2.h" + +/* In order to improve efficiency, buckets contain a pointer to an array or directly the data when the array size is 1 */ +typedef union { + const void *one; + const void **many; + } oneOrMany; + /* an optimization consists of storing directly data when count = 1 */ + +typedef struct { + unsigned count; + oneOrMany elements; + } HashBucket; + /* private data structure; may change */ + +/************************************************************************* + * + * Macros and utilities + * + *************************************************************************/ + +#define PTRSIZE sizeof(void *) + +#if !SUPPORT_ZONES +# define DEFAULT_ZONE NULL +# define ZONE_FROM_PTR(p) NULL +# define ALLOCTABLE(z) ((NXHashTable *) malloc (sizeof (NXHashTable))) +# define ALLOCBUCKETS(z,nb)((HashBucket *) calloc (nb, sizeof (HashBucket))) +/* Return interior pointer so a table of classes doesn't look like objects */ +# define ALLOCPAIRS(z,nb) (1+(const void **) calloc (nb+1, sizeof (void *))) +# define FREEPAIRS(p) (free((void*)(-1+p))) +#else +# define DEFAULT_ZONE malloc_default_zone() +# define ZONE_FROM_PTR(p) malloc_zone_from_ptr(p) +# define ALLOCTABLE(z) ((NXHashTable *) malloc_zone_malloc ((malloc_zone_t *)z,sizeof (NXHashTable))) +# define ALLOCBUCKETS(z,nb)((HashBucket *) malloc_zone_calloc ((malloc_zone_t *)z, nb, sizeof (HashBucket))) +/* Return interior pointer so a table of classes doesn't look like objects */ +# define ALLOCPAIRS(z,nb) (1+(const void **) malloc_zone_calloc ((malloc_zone_t *)z, nb+1, sizeof (void *))) +# define FREEPAIRS(p) (free((void*)(-1+p))) +#endif + +#if !SUPPORT_MOD + /* nbBuckets must be a power of 2 */ +# define BUCKETOF(table, data) (((HashBucket *)table->buckets)+((*table->prototype->hash)(table->info, data) & (table->nbBuckets-1))) +# define GOOD_CAPACITY(c) (c <= 1 ? 1 : 1 << (log2u (c-1)+1)) +# define MORE_CAPACITY(b) (b*2) +#else + /* iff necessary this modulo can be optimized since the nbBuckets is of the form 2**n-1 */ +# define BUCKETOF(table, data) (((HashBucket *)table->buckets)+((*table->prototype->hash)(table->info, data) % table->nbBuckets)) +# define GOOD_CAPACITY(c) (exp2m1u (log2u (c)+1)) +# define MORE_CAPACITY(b) (b*2+1) +#endif + +#define ISEQUAL(table, data1, data2) ((data1 == data2) || (*table->prototype->isEqual)(table->info, data1, data2)) + /* beware of double evaluation */ + +/************************************************************************* + * + * Global data and bootstrap + * + *************************************************************************/ + +static int isEqualPrototype (const void *info, const void *data1, const void *data2) { + NXHashTablePrototype *proto1 = (NXHashTablePrototype *) data1; + NXHashTablePrototype *proto2 = (NXHashTablePrototype *) data2; + + return (proto1->hash == proto2->hash) && (proto1->isEqual == proto2->isEqual) && (proto1->free == proto2->free) && (proto1->style == proto2->style); + }; + +static uintptr_t hashPrototype (const void *info, const void *data) { + NXHashTablePrototype *proto = (NXHashTablePrototype *) data; + + return NXPtrHash(info, (void*)proto->hash) ^ NXPtrHash(info, (void*)proto->isEqual) ^ NXPtrHash(info, (void*)proto->free) ^ (uintptr_t) proto->style; + }; + +void NXNoEffectFree (const void *info, void *data) {}; + +static NXHashTablePrototype protoPrototype = { + hashPrototype, isEqualPrototype, NXNoEffectFree, 0 + }; + +static NXHashTable *prototypes = NULL; + /* table of all prototypes */ + +static void bootstrap (void) { + free(malloc(8)); + prototypes = ALLOCTABLE (DEFAULT_ZONE); + prototypes->prototype = &protoPrototype; + prototypes->count = 1; + prototypes->nbBuckets = 1; /* has to be 1 so that the right bucket is 0 */ + prototypes->buckets = ALLOCBUCKETS(DEFAULT_ZONE, 1); + prototypes->info = NULL; + ((HashBucket *) prototypes->buckets)[0].count = 1; + ((HashBucket *) prototypes->buckets)[0].elements.one = &protoPrototype; + }; + +int NXPtrIsEqual (const void *info, const void *data1, const void *data2) { + return data1 == data2; + }; + +/************************************************************************* + * + * On z'y va + * + *************************************************************************/ + +NXHashTable *NXCreateHashTable (NXHashTablePrototype prototype, unsigned capacity, const void *info) { + return NXCreateHashTableFromZone(prototype, capacity, info, DEFAULT_ZONE); +} + +NXHashTable *NXCreateHashTableFromZone (NXHashTablePrototype prototype, unsigned capacity, const void *info, void *z) { + NXHashTable *table; + NXHashTablePrototype *proto; + + table = ALLOCTABLE(z); + if (! prototypes) bootstrap (); + if (! prototype.hash) prototype.hash = NXPtrHash; + if (! prototype.isEqual) prototype.isEqual = NXPtrIsEqual; + if (! prototype.free) prototype.free = NXNoEffectFree; + if (prototype.style) { + _objc_inform ("*** NXCreateHashTable: invalid style\n"); + return NULL; + }; + proto = (NXHashTablePrototype *)NXHashGet (prototypes, &prototype); + if (! proto) { + proto + = (NXHashTablePrototype *) malloc(sizeof (NXHashTablePrototype)); + bcopy ((const char*)&prototype, (char*)proto, sizeof (NXHashTablePrototype)); + (void) NXHashInsert (prototypes, proto); + proto = (NXHashTablePrototype *)NXHashGet (prototypes, &prototype); + if (! proto) { + _objc_inform ("*** NXCreateHashTable: bug\n"); + return NULL; + }; + }; + table->prototype = proto; table->count = 0; table->info = info; + table->nbBuckets = GOOD_CAPACITY(capacity); + table->buckets = ALLOCBUCKETS(z, table->nbBuckets); + return table; + } + +static void freeBucketPairs (void (*freeProc)(const void *info, void *data), HashBucket bucket, const void *info) { + unsigned j = bucket.count; + const void **pairs; + + if (j == 1) { + (*freeProc) (info, (void *) bucket.elements.one); + return; + }; + pairs = bucket.elements.many; + while (j--) { + (*freeProc) (info, (void *) *pairs); + pairs ++; + }; + FREEPAIRS (bucket.elements.many); + }; + +static void freeBuckets (NXHashTable *table, int freeObjects) { + unsigned i = table->nbBuckets; + HashBucket *buckets = (HashBucket *) table->buckets; + + while (i--) { + if (buckets->count) { + freeBucketPairs ((freeObjects) ? table->prototype->free : NXNoEffectFree, *buckets, table->info); + buckets->count = 0; + buckets->elements.one = NULL; + }; + buckets++; + }; + }; + +void NXFreeHashTable (NXHashTable *table) { + freeBuckets (table, YES); + free (table->buckets); + free (table); + }; + +void NXEmptyHashTable (NXHashTable *table) { + freeBuckets (table, NO); + table->count = 0; + } + +void NXResetHashTable (NXHashTable *table) { + freeBuckets (table, YES); + table->count = 0; +} + +BOOL NXCompareHashTables (NXHashTable *table1, NXHashTable *table2) { + if (table1 == table2) return YES; + if (NXCountHashTable (table1) != NXCountHashTable (table2)) return NO; + else { + void *data; + NXHashState state = NXInitHashState (table1); + while (NXNextHashState (table1, &state, &data)) { + if (! NXHashMember (table2, data)) return NO; + } + return YES; + } +} + +NXHashTable *NXCopyHashTable (NXHashTable *table) { + NXHashTable *newt; + NXHashState state = NXInitHashState (table); + void *data; + __unused void *z = ZONE_FROM_PTR(table); + + newt = ALLOCTABLE(z); + newt->prototype = table->prototype; newt->count = 0; + newt->info = table->info; + newt->nbBuckets = table->nbBuckets; + newt->buckets = ALLOCBUCKETS(z, newt->nbBuckets); + while (NXNextHashState (table, &state, &data)) + NXHashInsert (newt, data); + return newt; + } + +unsigned NXCountHashTable (NXHashTable *table) { + return table->count; + } + +int NXHashMember (NXHashTable *table, const void *data) { + HashBucket *bucket = BUCKETOF(table, data); + unsigned j = bucket->count; + const void **pairs; + + if (! j) return 0; + if (j == 1) { + return ISEQUAL(table, data, bucket->elements.one); + }; + pairs = bucket->elements.many; + while (j--) { + /* we don't cache isEqual because lists are short */ + if (ISEQUAL(table, data, *pairs)) return 1; + pairs ++; + }; + return 0; + } + +void *NXHashGet (NXHashTable *table, const void *data) { + HashBucket *bucket = BUCKETOF(table, data); + unsigned j = bucket->count; + const void **pairs; + + if (! j) return NULL; + if (j == 1) { + return ISEQUAL(table, data, bucket->elements.one) + ? (void *) bucket->elements.one : NULL; + }; + pairs = bucket->elements.many; + while (j--) { + /* we don't cache isEqual because lists are short */ + if (ISEQUAL(table, data, *pairs)) return (void *) *pairs; + pairs ++; + }; + return NULL; + } + +unsigned _NXHashCapacity (NXHashTable *table) { + return table->nbBuckets; + } + +void _NXHashRehashToCapacity (NXHashTable *table, unsigned newCapacity) { + /* Rehash: we create a pseudo table pointing really to the old guys, + extend self, copy the old pairs, and free the pseudo table */ + NXHashTable *old; + NXHashState state; + void *aux; + __unused void *z = ZONE_FROM_PTR(table); + + old = ALLOCTABLE(z); + old->prototype = table->prototype; old->count = table->count; + old->nbBuckets = table->nbBuckets; old->buckets = table->buckets; + table->nbBuckets = newCapacity; + table->count = 0; table->buckets = ALLOCBUCKETS(z, table->nbBuckets); + state = NXInitHashState (old); + while (NXNextHashState (old, &state, &aux)) + (void) NXHashInsert (table, aux); + freeBuckets (old, NO); + if (old->count != table->count) + _objc_inform("*** hashtable: count differs after rehashing; probably indicates a broken invariant: there are x and y such as isEqual(x, y) is TRUE but hash(x) != hash (y)\n"); + free (old->buckets); + free (old); + } + +static void _NXHashRehash (NXHashTable *table) { + _NXHashRehashToCapacity (table, MORE_CAPACITY(table->nbBuckets)); + } + +void *NXHashInsert (NXHashTable *table, const void *data) { + HashBucket *bucket = BUCKETOF(table, data); + unsigned j = bucket->count; + const void **pairs; + const void **newt; + __unused void *z = ZONE_FROM_PTR(table); + + if (! j) { + bucket->count++; bucket->elements.one = data; + table->count++; + return NULL; + }; + if (j == 1) { + if (ISEQUAL(table, data, bucket->elements.one)) { + const void *old = bucket->elements.one; + bucket->elements.one = data; + return (void *) old; + }; + newt = ALLOCPAIRS(z, 2); + newt[1] = bucket->elements.one; + *newt = data; + bucket->count++; bucket->elements.many = newt; + table->count++; + if (table->count > table->nbBuckets) _NXHashRehash (table); + return NULL; + }; + pairs = bucket->elements.many; + while (j--) { + /* we don't cache isEqual because lists are short */ + if (ISEQUAL(table, data, *pairs)) { + const void *old = *pairs; + *pairs = data; + return (void *) old; + }; + pairs ++; + }; + /* we enlarge this bucket; and put new data in front */ + newt = ALLOCPAIRS(z, bucket->count+1); + if (bucket->count) bcopy ((const char*)bucket->elements.many, (char*)(newt+1), bucket->count * PTRSIZE); + *newt = data; + FREEPAIRS (bucket->elements.many); + bucket->count++; bucket->elements.many = newt; + table->count++; + if (table->count > table->nbBuckets) _NXHashRehash (table); + return NULL; + } + +void *NXHashInsertIfAbsent (NXHashTable *table, const void *data) { + HashBucket *bucket = BUCKETOF(table, data); + unsigned j = bucket->count; + const void **pairs; + const void **newt; + __unused void *z = ZONE_FROM_PTR(table); + + if (! j) { + bucket->count++; bucket->elements.one = data; + table->count++; + return (void *) data; + }; + if (j == 1) { + if (ISEQUAL(table, data, bucket->elements.one)) + return (void *) bucket->elements.one; + newt = ALLOCPAIRS(z, 2); + newt[1] = bucket->elements.one; + *newt = data; + bucket->count++; bucket->elements.many = newt; + table->count++; + if (table->count > table->nbBuckets) _NXHashRehash (table); + return (void *) data; + }; + pairs = bucket->elements.many; + while (j--) { + /* we don't cache isEqual because lists are short */ + if (ISEQUAL(table, data, *pairs)) + return (void *) *pairs; + pairs ++; + }; + /* we enlarge this bucket; and put new data in front */ + newt = ALLOCPAIRS(z, bucket->count+1); + if (bucket->count) bcopy ((const char*)bucket->elements.many, (char*)(newt+1), bucket->count * PTRSIZE); + *newt = data; + FREEPAIRS (bucket->elements.many); + bucket->count++; bucket->elements.many = newt; + table->count++; + if (table->count > table->nbBuckets) _NXHashRehash (table); + return (void *) data; + } + +void *NXHashRemove (NXHashTable *table, const void *data) { + HashBucket *bucket = BUCKETOF(table, data); + unsigned j = bucket->count; + const void **pairs; + const void **newt; + __unused void *z = ZONE_FROM_PTR(table); + + if (! j) return NULL; + if (j == 1) { + if (! ISEQUAL(table, data, bucket->elements.one)) return NULL; + data = bucket->elements.one; + table->count--; bucket->count--; bucket->elements.one = NULL; + return (void *) data; + }; + pairs = bucket->elements.many; + if (j == 2) { + if (ISEQUAL(table, data, pairs[0])) { + bucket->elements.one = pairs[1]; data = pairs[0]; + } + else if (ISEQUAL(table, data, pairs[1])) { + bucket->elements.one = pairs[0]; data = pairs[1]; + } + else return NULL; + FREEPAIRS (pairs); + table->count--; bucket->count--; + return (void *) data; + }; + while (j--) { + if (ISEQUAL(table, data, *pairs)) { + data = *pairs; + /* we shrink this bucket */ + newt = (bucket->count-1) + ? ALLOCPAIRS(z, bucket->count-1) : NULL; + if (bucket->count-1 != j) + bcopy ((const char*)bucket->elements.many, (char*)newt, PTRSIZE*(bucket->count-j-1)); + if (j) + bcopy ((const char*)(bucket->elements.many + bucket->count-j), (char*)(newt+bucket->count-j-1), PTRSIZE*j); + FREEPAIRS (bucket->elements.many); + table->count--; bucket->count--; bucket->elements.many = newt; + return (void *) data; + }; + pairs ++; + }; + return NULL; + } + +NXHashState NXInitHashState (NXHashTable *table) { + NXHashState state; + + state.i = table->nbBuckets; + state.j = 0; + return state; + }; + +int NXNextHashState (NXHashTable *table, NXHashState *state, void **data) { + HashBucket *buckets = (HashBucket *) table->buckets; + + while (state->j == 0) { + if (state->i == 0) return NO; + state->i--; state->j = buckets[state->i].count; + } + state->j--; + buckets += state->i; + *data = (void *) ((buckets->count == 1) + ? buckets->elements.one : buckets->elements.many[state->j]); + return YES; + }; + +/************************************************************************* + * + * Conveniences + * + *************************************************************************/ + +uintptr_t NXPtrHash (const void *info, const void *data) { + return (((uintptr_t) data) >> 16) ^ ((uintptr_t) data); + }; + +uintptr_t NXStrHash (const void *info, const void *data) { + uintptr_t hash = 0; + unsigned char *s = (unsigned char *) data; + /* unsigned to avoid a sign-extend */ + /* unroll the loop */ + if (s) for (; ; ) { + if (*s == '\0') break; + hash ^= (uintptr_t) *s++; + if (*s == '\0') break; + hash ^= (uintptr_t) *s++ << 8; + if (*s == '\0') break; + hash ^= (uintptr_t) *s++ << 16; + if (*s == '\0') break; + hash ^= (uintptr_t) *s++ << 24; + } + return hash; + }; + +int NXStrIsEqual (const void *info, const void *data1, const void *data2) { + if (data1 == data2) return YES; + if (! data1) return ! strlen ((char *) data2); + if (! data2) return ! strlen ((char *) data1); + if (((char *) data1)[0] != ((char *) data2)[0]) return NO; + return (strcmp ((char *) data1, (char *) data2)) ? NO : YES; + }; + +void NXReallyFree (const void *info, void *data) { + free (data); + }; + +/* All the following functions are really private, made non-static only for the benefit of shlibs */ +static uintptr_t hashPtrStructKey (const void *info, const void *data) { + return NXPtrHash(info, *((void **) data)); + }; + +static int isEqualPtrStructKey (const void *info, const void *data1, const void *data2) { + return NXPtrIsEqual (info, *((void **) data1), *((void **) data2)); + }; + +static uintptr_t hashStrStructKey (const void *info, const void *data) { + return NXStrHash(info, *((char **) data)); + }; + +static int isEqualStrStructKey (const void *info, const void *data1, const void *data2) { + return NXStrIsEqual (info, *((char **) data1), *((char **) data2)); + }; + +const NXHashTablePrototype NXPtrPrototype = { + NXPtrHash, NXPtrIsEqual, NXNoEffectFree, 0 + }; + +const NXHashTablePrototype NXStrPrototype = { + NXStrHash, NXStrIsEqual, NXNoEffectFree, 0 + }; + +const NXHashTablePrototype NXPtrStructKeyPrototype = { + hashPtrStructKey, isEqualPtrStructKey, NXReallyFree, 0 + }; + +const NXHashTablePrototype NXStrStructKeyPrototype = { + hashStrStructKey, isEqualStrStructKey, NXReallyFree, 0 + }; + +/************************************************************************* + * + * Unique strings + * + *************************************************************************/ + +#if !__OBJC2__ && !TARGET_OS_WIN32 + +/* the implementation could be made faster at the expense of memory if the size of the strings were kept around */ +static NXHashTable *uniqueStrings = NULL; + +/* this is based on most apps using a few K of strings, and an average string size of 15 using sqrt(2*dataAlloced*perChunkOverhead) */ +#define CHUNK_SIZE 360 + +static int accessUniqueString = 0; + +static char *z = NULL; +static size_t zSize = 0; +static mutex_t uniquerLock; + +static const char *CopyIntoReadOnly (const char *str) { + size_t len = strlen (str) + 1; + char *result; + + if (len > CHUNK_SIZE/2) { /* dont let big strings waste space */ + result = (char *)malloc (len); + bcopy (str, result, len); + return result; + } + + mutex_locker_t lock(uniquerLock); + if (zSize < len) { + zSize = CHUNK_SIZE *((len + CHUNK_SIZE - 1) / CHUNK_SIZE); + /* not enough room, we try to allocate. If no room left, too bad */ + z = (char *)malloc (zSize); + }; + + result = z; + bcopy (str, result, len); + z += len; + zSize -= len; + return result; + }; + +NXAtom NXUniqueString (const char *buffer) { + const char *previous; + + if (! buffer) return buffer; + accessUniqueString++; + if (! uniqueStrings) + uniqueStrings = NXCreateHashTable (NXStrPrototype, 0, NULL); + previous = (const char *) NXHashGet (uniqueStrings, buffer); + if (previous) return previous; + previous = CopyIntoReadOnly (buffer); + if (NXHashInsert (uniqueStrings, previous)) { + _objc_inform ("*** NXUniqueString: invariant broken\n"); + return NULL; + }; + return previous; + }; + +NXAtom NXUniqueStringNoCopy (const char *string) { + accessUniqueString++; + if (! uniqueStrings) + uniqueStrings = NXCreateHashTable (NXStrPrototype, 0, NULL); + return (const char *) NXHashInsertIfAbsent (uniqueStrings, string); + }; + +#define BUF_SIZE 256 + +NXAtom NXUniqueStringWithLength (const char *buffer, int length) { + NXAtom atom; + char *nullTermStr; + char stackBuf[BUF_SIZE]; + + if (length+1 > BUF_SIZE) + nullTermStr = (char *)malloc (length+1); + else + nullTermStr = stackBuf; + bcopy (buffer, nullTermStr, length); + nullTermStr[length] = '\0'; + atom = NXUniqueString (nullTermStr); + if (length+1 > BUF_SIZE) + free (nullTermStr); + return atom; + }; + +char *NXCopyStringBufferFromZone (const char *str, void *zone) { +#if !SUPPORT_ZONES + return strdup(str); +#else + return strcpy ((char *) malloc_zone_malloc((malloc_zone_t *)zone, strlen (str) + 1), str); +#endif + }; + +char *NXCopyStringBuffer (const char *str) { + return strdup(str); + }; + +#endif diff --git a/runtime/llvm-AlignOf.h b/runtime/llvm-AlignOf.h new file mode 100644 index 0000000..cad2422 --- /dev/null +++ b/runtime/llvm-AlignOf.h @@ -0,0 +1,171 @@ +//===--- AlignOf.h - Portable calculation of type alignment -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the AlignOf function that computes alignments for +// arbitrary types. +// +//===----------------------------------------------------------------------===// + +// Taken from llvmCore-3425.0.31. + +#ifndef LLVM_SUPPORT_ALIGNOF_H +#define LLVM_SUPPORT_ALIGNOF_H + +#include + +namespace objc { + +template +struct AlignmentCalcImpl { + char x; + T t; +private: + AlignmentCalcImpl() {} // Never instantiate. +}; + +/// AlignOf - A templated class that contains an enum value representing +/// the alignment of the template argument. For example, +/// AlignOf::Alignment represents the alignment of type "int". The +/// alignment calculated is the minimum alignment, and not necessarily +/// the "desired" alignment returned by GCC's __alignof__ (for example). Note +/// that because the alignment is an enum value, it can be used as a +/// compile-time constant (e.g., for template instantiation). +template +struct AlignOf { + enum { Alignment = + static_cast(sizeof(AlignmentCalcImpl) - sizeof(T)) }; + + enum { Alignment_GreaterEqual_2Bytes = Alignment >= 2 ? 1 : 0 }; + enum { Alignment_GreaterEqual_4Bytes = Alignment >= 4 ? 1 : 0 }; + enum { Alignment_GreaterEqual_8Bytes = Alignment >= 8 ? 1 : 0 }; + enum { Alignment_GreaterEqual_16Bytes = Alignment >= 16 ? 1 : 0 }; + + enum { Alignment_LessEqual_2Bytes = Alignment <= 2 ? 1 : 0 }; + enum { Alignment_LessEqual_4Bytes = Alignment <= 4 ? 1 : 0 }; + enum { Alignment_LessEqual_8Bytes = Alignment <= 8 ? 1 : 0 }; + enum { Alignment_LessEqual_16Bytes = Alignment <= 16 ? 1 : 0 }; + +}; + +/// alignOf - A templated function that returns the minimum alignment of +/// of a type. This provides no extra functionality beyond the AlignOf +/// class besides some cosmetic cleanliness. Example usage: +/// alignOf() returns the alignment of an int. +template +inline unsigned alignOf() { return AlignOf::Alignment; } + + +/// \brief Helper for building an aligned character array type. +/// +/// This template is used to explicitly build up a collection of aligned +/// character types. We have to build these up using a macro and explicit +/// specialization to cope with old versions of MSVC and GCC where only an +/// integer literal can be used to specify an alignment constraint. Once built +/// up here, we can then begin to indirect between these using normal C++ +/// template parameters. +template struct AlignedCharArrayImpl; + +// MSVC requires special handling here. +#ifndef _MSC_VER + +#if __has_feature(cxx_alignas) +#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ + template <> struct AlignedCharArrayImpl { \ + char aligned alignas(x); \ + } +#elif defined(__GNUC__) +#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ + template <> struct AlignedCharArrayImpl { \ + char aligned __attribute__((aligned(x))); \ + } +#else +# error No supported align as directive. +#endif + +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192); + +#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT + +#else // _MSC_VER + +// We provide special variations of this template for the most common +// alignments because __declspec(align(...)) doesn't actually work when it is +// a member of a by-value function argument in MSVC, even if the alignment +// request is something reasonably like 8-byte or 16-byte. +template <> struct AlignedCharArrayImpl<1> { char aligned; }; +template <> struct AlignedCharArrayImpl<2> { short aligned; }; +template <> struct AlignedCharArrayImpl<4> { int aligned; }; +template <> struct AlignedCharArrayImpl<8> { double aligned; }; + +#define LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(x) \ + template <> struct AlignedCharArrayImpl { \ + __declspec(align(x)) char aligned; \ + } +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(16); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(32); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(64); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(128); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(512); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(1024); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(2048); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(4096); +LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT(8192); +// Any larger and MSVC complains. +#undef LLVM_ALIGNEDCHARARRAY_TEMPLATE_ALIGNMENT + +#endif // _MSC_VER + +/// \brief This union template exposes a suitably aligned and sized character +/// array member which can hold elements of any of up to four types. +/// +/// These types may be arrays, structs, or any other types. The goal is to +/// produce a union type containing a character array which, when used, forms +/// storage suitable to placement new any of these types over. Support for more +/// than four types can be added at the cost of more boiler plate. +template +union AlignedCharArrayUnion { +private: + class AlignerImpl { + T1 t1; T2 t2; T3 t3; T4 t4; + + AlignerImpl(); // Never defined or instantiated. + }; + union SizerImpl { + char arr1[sizeof(T1)], arr2[sizeof(T2)], arr3[sizeof(T3)], arr4[sizeof(T4)]; + }; + +public: + /// \brief The character array buffer for use by clients. + /// + /// No other member of this union should be referenced. The exist purely to + /// constrain the layout of this character array. + char buffer[sizeof(SizerImpl)]; + +private: + // Tests seem to indicate that both Clang and GCC will properly register the + // alignment of a struct containing an aligned member, and this alignment + // should carry over to the character array in the union. + AlignedCharArrayImpl::Alignment> nonce_member; +}; + +} // end namespace objc +#endif diff --git a/runtime/llvm-DenseMap.h b/runtime/llvm-DenseMap.h new file mode 100644 index 0000000..6fe1382 --- /dev/null +++ b/runtime/llvm-DenseMap.h @@ -0,0 +1,1097 @@ +//===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the DenseMap class. +// +//===----------------------------------------------------------------------===// + +// Taken from llvmCore-3425.0.31. + +#ifndef LLVM_ADT_DENSEMAP_H +#define LLVM_ADT_DENSEMAP_H + +#include "llvm-type_traits.h" +#include "llvm-MathExtras.h" +#include "llvm-AlignOf.h" +#include "llvm-DenseMapInfo.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "objc-private.h" + +// From llvm/Support/Compiler.h +#define LLVM_USE_RVALUE_REFERENCES 1 +#define llvm_move(value) (::std::move(value)) + +#define MIN_BUCKETS 4 +#define MIN_COMPACT 1024 + + +namespace objc { + +template, + bool IsConst = false> +class DenseMapIterator; + +// ZeroValuesArePurgeable=true is used by the refcount table. +// A key/value pair with value==0 is not required to be stored +// in the refcount table; it could correctly be erased instead. +// For performance, we do keep zero values in the table when the +// true refcount decreases to 1: this makes any future retain faster. +// For memory size, we allow rehashes and table insertions to +// remove a zero value as if it were a tombstone. + +template +class DenseMapBase { +protected: + typedef std::pair BucketT; + +public: + typedef KeyT key_type; + typedef ValueT mapped_type; + typedef BucketT value_type; + + typedef DenseMapIterator iterator; + typedef DenseMapIterator const_iterator; + inline iterator begin() { + // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets(). + return empty() ? end() : iterator(getBuckets(), getBucketsEnd()); + } + inline iterator end() { + return iterator(getBucketsEnd(), getBucketsEnd(), true); + } + inline const_iterator begin() const { + return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd()); + } + inline const_iterator end() const { + return const_iterator(getBucketsEnd(), getBucketsEnd(), true); + } + + bool empty() const { return getNumEntries() == 0; } + unsigned size() const { return getNumEntries(); } + + /// Grow the densemap so that it has at least Size buckets. Does not shrink + void resize(size_t Size) { + if (Size > getNumBuckets()) + grow(Size); + } + + void clear() { + if (getNumEntries() == 0 && getNumTombstones() == 0) return; + + // If the capacity of the array is huge, and the # elements used is small, + // shrink the array. + if (getNumEntries() * 4 < getNumBuckets() && + getNumBuckets() > MIN_BUCKETS) { + shrink_and_clear(); + return; + } + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->first, EmptyKey)) { + if (!KeyInfoT::isEqual(P->first, TombstoneKey)) { + P->second.~ValueT(); + decrementNumEntries(); + } + P->first = EmptyKey; + } + } + assert(getNumEntries() == 0 && "Node count imbalance!"); + setNumTombstones(0); + } + + /// count - Return true if the specified key is in the map. + bool count(const KeyT &Val) const { + const BucketT *TheBucket; + return LookupBucketFor(Val, TheBucket); + } + + iterator find(const KeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return iterator(TheBucket, getBucketsEnd(), true); + return end(); + } + const_iterator find(const KeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return const_iterator(TheBucket, getBucketsEnd(), true); + return end(); + } + + /// Alternate version of find() which allows a different, and possibly + /// less expensive, key type. + /// The DenseMapInfo is responsible for supplying methods + /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key + /// type used. + template + iterator find_as(const LookupKeyT &Val) { + BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return iterator(TheBucket, getBucketsEnd(), true); + return end(); + } + template + const_iterator find_as(const LookupKeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return const_iterator(TheBucket, getBucketsEnd(), true); + return end(); + } + + /// lookup - Return the entry for the specified key, or a default + /// constructed value if no such entry exists. + ValueT lookup(const KeyT &Val) const { + const BucketT *TheBucket; + if (LookupBucketFor(Val, TheBucket)) + return TheBucket->second; + return ValueT(); + } + + // Inserts key,value pair into the map if the key isn't already in the map. + // If the key is already in the map, it returns false and doesn't update the + // value. + std::pair insert(const std::pair &KV) { + BucketT *TheBucket; + if (LookupBucketFor(KV.first, TheBucket)) + return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), + false); // Already in map. + + // Otherwise, insert the new element. + TheBucket = InsertIntoBucket(KV.first, KV.second, TheBucket); + return std::make_pair(iterator(TheBucket, getBucketsEnd(), true), true); + } + + /// insert - Range insertion of pairs. + template + void insert(InputIt I, InputIt E) { + for (; I != E; ++I) + insert(*I); + } + + // Clear if empty. + // Shrink if at least 15/16 empty and larger than MIN_COMPACT. + void compact() { + if (getNumEntries() == 0) { + shrink_and_clear(); + } + else if (getNumBuckets() / 16 > getNumEntries() && + getNumBuckets() > MIN_COMPACT) + { + grow(getNumEntries() * 2); + } + } + + bool erase(const KeyT &Val) { + BucketT *TheBucket; + if (!LookupBucketFor(Val, TheBucket)) + return false; // not in map. + + TheBucket->second.~ValueT(); + TheBucket->first = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + compact(); + return true; + } + void erase(iterator I) { + BucketT *TheBucket = &*I; + TheBucket->second.~ValueT(); + TheBucket->first = getTombstoneKey(); + decrementNumEntries(); + incrementNumTombstones(); + compact(); + } + + value_type& FindAndConstruct(const KeyT &Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(Key, ValueT(), TheBucket); + } + + ValueT &operator[](const KeyT &Key) { + return FindAndConstruct(Key).second; + } + +#if LLVM_USE_RVALUE_REFERENCES + value_type& FindAndConstruct(KeyT &&Key) { + BucketT *TheBucket; + if (LookupBucketFor(Key, TheBucket)) + return *TheBucket; + + return *InsertIntoBucket(Key, ValueT(), TheBucket); + } + + ValueT &operator[](KeyT &&Key) { + return FindAndConstruct(Key).second; + } +#endif + + /// isPointerIntoBucketsArray - Return true if the specified pointer points + /// somewhere into the DenseMap's array of buckets (i.e. either to a key or + /// value in the DenseMap). + bool isPointerIntoBucketsArray(const void *Ptr) const { + return Ptr >= getBuckets() && Ptr < getBucketsEnd(); + } + + /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets + /// array. In conjunction with the previous method, this can be used to + /// determine whether an insertion caused the DenseMap to reallocate. + const void *getPointerIntoBucketsArray() const { return getBuckets(); } + +protected: + DenseMapBase() {} + + void destroyAll() { + if (getNumBuckets() == 0) // Nothing to do. + return; + + const KeyT EmptyKey = getEmptyKey(), TombstoneKey = getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = getBucketsEnd(); P != E; ++P) { + if (!KeyInfoT::isEqual(P->first, EmptyKey) && + !KeyInfoT::isEqual(P->first, TombstoneKey)) + P->second.~ValueT(); + P->first.~KeyT(); + } + +#ifndef NDEBUG + memset((void*)getBuckets(), 0x5a, sizeof(BucketT)*getNumBuckets()); +#endif + } + + void initEmpty() { + setNumEntries(0); + setNumTombstones(0); + + assert((getNumBuckets() & (getNumBuckets()-1)) == 0 && + "# initial buckets must be a power of two!"); + const KeyT EmptyKey = getEmptyKey(); + for (BucketT *B = getBuckets(), *E = getBucketsEnd(); B != E; ++B) + new (&B->first) KeyT(EmptyKey); + } + + void moveFromOldBuckets(BucketT *OldBucketsBegin, BucketT *OldBucketsEnd) { + initEmpty(); + + // Insert all the old elements. + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + for (BucketT *B = OldBucketsBegin, *E = OldBucketsEnd; B != E; ++B) { + if (!KeyInfoT::isEqual(B->first, EmptyKey) && + !KeyInfoT::isEqual(B->first, TombstoneKey) && + !(ZeroValuesArePurgeable && B->second == 0)) { + // Insert the key/value into the new table. + BucketT *DestBucket; + bool FoundVal = LookupBucketFor(B->first, DestBucket); + (void)FoundVal; // silence warning. + assert(!FoundVal && "Key already in new map?"); + DestBucket->first = llvm_move(B->first); + new (&DestBucket->second) ValueT(llvm_move(B->second)); + incrementNumEntries(); + + // Free the value. + B->second.~ValueT(); + } + B->first.~KeyT(); + } + +#ifndef NDEBUG + if (OldBucketsBegin != OldBucketsEnd) + memset((void*)OldBucketsBegin, 0x5a, + sizeof(BucketT) * (OldBucketsEnd - OldBucketsBegin)); +#endif + } + + template + void copyFrom(const DenseMapBase& other) { + assert(getNumBuckets() == other.getNumBuckets()); + + setNumEntries(other.getNumEntries()); + setNumTombstones(other.getNumTombstones()); + + if (isPodLike::value && isPodLike::value) + memcpy(getBuckets(), other.getBuckets(), + getNumBuckets() * sizeof(BucketT)); + else + for (size_t i = 0; i < getNumBuckets(); ++i) { + new (&getBuckets()[i].first) KeyT(other.getBuckets()[i].first); + if (!KeyInfoT::isEqual(getBuckets()[i].first, getEmptyKey()) && + !KeyInfoT::isEqual(getBuckets()[i].first, getTombstoneKey())) + new (&getBuckets()[i].second) ValueT(other.getBuckets()[i].second); + } + } + + void swap(DenseMapBase& RHS) { + std::swap(getNumEntries(), RHS.getNumEntries()); + std::swap(getNumTombstones(), RHS.getNumTombstones()); + } + + static unsigned getHashValue(const KeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + template + static unsigned getHashValue(const LookupKeyT &Val) { + return KeyInfoT::getHashValue(Val); + } + static const KeyT getEmptyKey() { + return KeyInfoT::getEmptyKey(); + } + static const KeyT getTombstoneKey() { + return KeyInfoT::getTombstoneKey(); + } + +private: + unsigned getNumEntries() const { + return static_cast(this)->getNumEntries(); + } + void setNumEntries(unsigned Num) { + static_cast(this)->setNumEntries(Num); + } + void incrementNumEntries() { + setNumEntries(getNumEntries() + 1); + } + void decrementNumEntries() { + setNumEntries(getNumEntries() - 1); + } + unsigned getNumTombstones() const { + return static_cast(this)->getNumTombstones(); + } + void setNumTombstones(unsigned Num) { + static_cast(this)->setNumTombstones(Num); + } + void incrementNumTombstones() { + setNumTombstones(getNumTombstones() + 1); + } + void decrementNumTombstones() { + setNumTombstones(getNumTombstones() - 1); + } + const BucketT *getBuckets() const { + return static_cast(this)->getBuckets(); + } + BucketT *getBuckets() { + return static_cast(this)->getBuckets(); + } + unsigned getNumBuckets() const { + return static_cast(this)->getNumBuckets(); + } + BucketT *getBucketsEnd() { + return getBuckets() + getNumBuckets(); + } + const BucketT *getBucketsEnd() const { + return getBuckets() + getNumBuckets(); + } + + void grow(unsigned AtLeast) { + static_cast(this)->grow(AtLeast); + } + + void shrink_and_clear() { + static_cast(this)->shrink_and_clear(); + } + + + BucketT *InsertIntoBucket(const KeyT &Key, const ValueT &Value, + BucketT *TheBucket) { + TheBucket = InsertIntoBucketImpl(Key, TheBucket); + + TheBucket->first = Key; + new (&TheBucket->second) ValueT(Value); + return TheBucket; + } + +#if LLVM_USE_RVALUE_REFERENCES + BucketT *InsertIntoBucket(const KeyT &Key, ValueT &&Value, + BucketT *TheBucket) { + TheBucket = InsertIntoBucketImpl(Key, TheBucket); + + TheBucket->first = Key; + new (&TheBucket->second) ValueT(std::move(Value)); + return TheBucket; + } + + BucketT *InsertIntoBucket(KeyT &&Key, ValueT &&Value, BucketT *TheBucket) { + TheBucket = InsertIntoBucketImpl(Key, TheBucket); + + TheBucket->first = std::move(Key); + new (&TheBucket->second) ValueT(std::move(Value)); + return TheBucket; + } +#endif + + BucketT *InsertIntoBucketImpl(const KeyT &Key, BucketT *TheBucket) { + // If the load of the hash table is more than 3/4, grow the table. + // If fewer than 1/8 of the buckets are empty (meaning that many are + // filled with tombstones), rehash the table without growing. + // + // The later case is tricky. For example, if we had one empty bucket with + // tons of tombstones, failing lookups (e.g. for insertion) would have to + // probe almost the entire table until it found the empty bucket. If the + // table completely filled with tombstones, no lookup would ever succeed, + // causing infinite loops in lookup. + unsigned NewNumEntries = getNumEntries() + 1; + unsigned NumBuckets = getNumBuckets(); + if (NewNumEntries*4 >= NumBuckets*3) { + this->grow(NumBuckets * 2); + LookupBucketFor(Key, TheBucket); + NumBuckets = getNumBuckets(); + } + if (NumBuckets-(NewNumEntries+getNumTombstones()) <= NumBuckets/8) { + this->grow(NumBuckets); + LookupBucketFor(Key, TheBucket); + } + assert(TheBucket); + + // Only update the state after we've grown our bucket space appropriately + // so that when growing buckets we have self-consistent entry count. + // If we are writing over a tombstone or zero value, remember this. + if (KeyInfoT::isEqual(TheBucket->first, getEmptyKey())) { + // Replacing an empty bucket. + incrementNumEntries(); + } + else if (KeyInfoT::isEqual(TheBucket->first, getTombstoneKey())) { + // Replacing a tombstone. + incrementNumEntries(); + decrementNumTombstones(); + } + else if (ZeroValuesArePurgeable && TheBucket->second == 0) { + // Purging a zero. No accounting changes. + TheBucket->second.~ValueT(); + } else { + // Updating an existing entry. No accounting changes. + } + + return TheBucket; + } + + /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in + /// FoundBucket. If the bucket contains the key and a value, this returns + /// true, otherwise it returns a bucket with an empty marker or tombstone + /// or zero value and returns false. + template + bool LookupBucketFor(const LookupKeyT &Val, + const BucketT *&FoundBucket) const { + const BucketT *BucketsPtr = getBuckets(); + const unsigned NumBuckets = getNumBuckets(); + + if (NumBuckets == 0) { + FoundBucket = 0; + return false; + } + + // FoundTombstone - Keep track of whether we find a tombstone or zero value while probing. + const BucketT *FoundTombstone = 0; + const KeyT EmptyKey = getEmptyKey(); + const KeyT TombstoneKey = getTombstoneKey(); + assert(!KeyInfoT::isEqual(Val, EmptyKey) && + !KeyInfoT::isEqual(Val, TombstoneKey) && + "Empty/Tombstone value shouldn't be inserted into map!"); + + unsigned BucketNo = getHashValue(Val) & (NumBuckets-1); + unsigned ProbeAmt = 1; + while (1) { + const BucketT *ThisBucket = BucketsPtr + BucketNo; + // Found Val's bucket? If so, return it. + if (KeyInfoT::isEqual(Val, ThisBucket->first)) { + FoundBucket = ThisBucket; + return true; + } + + // If we found an empty bucket, the key doesn't exist in the set. + // Insert it and return the default value. + if (KeyInfoT::isEqual(ThisBucket->first, EmptyKey)) { + // If we've already seen a tombstone while probing, fill it in instead + // of the empty bucket we eventually probed to. + if (FoundTombstone) ThisBucket = FoundTombstone; + FoundBucket = FoundTombstone ? FoundTombstone : ThisBucket; + return false; + } + + // If this is a tombstone, remember it. If Val ends up not in the map, we + // prefer to return it than something that would require more probing. + // Ditto for zero values. + if (KeyInfoT::isEqual(ThisBucket->first, TombstoneKey) && !FoundTombstone) + FoundTombstone = ThisBucket; // Remember the first tombstone found. + if (ZeroValuesArePurgeable && + ThisBucket->second == 0 && !FoundTombstone) + FoundTombstone = ThisBucket; + + // Otherwise, it's a hash collision or a tombstone, continue quadratic + // probing. + if (ProbeAmt > NumBuckets) { + // No empty buckets in table. Die. + _objc_fatal("Hash table corrupted. This is probably a memory error " + "somewhere. (table at %p, buckets at %p (%zu bytes), " + "%u buckets, %u entries, %u tombstones, " + "data %p %p %p %p)", + this, BucketsPtr, malloc_size(BucketsPtr), + NumBuckets, getNumEntries(), getNumTombstones(), + ((void**)BucketsPtr)[0], ((void**)BucketsPtr)[1], + ((void**)BucketsPtr)[2], ((void**)BucketsPtr)[3]); + } + BucketNo += ProbeAmt++; + BucketNo&= (NumBuckets-1); + } + } + + template + bool LookupBucketFor(const LookupKeyT &Val, BucketT *&FoundBucket) { + const BucketT *ConstFoundBucket; + bool Result = const_cast(this) + ->LookupBucketFor(Val, ConstFoundBucket); + FoundBucket = const_cast(ConstFoundBucket); + return Result; + } + +public: + /// Return the approximate size (in bytes) of the actual map. + /// This is just the raw memory used by DenseMap. + /// If entries are pointers to objects, the size of the referenced objects + /// are not included. + size_t getMemorySize() const { + return getNumBuckets() * sizeof(BucketT); + } +}; + +template > +class DenseMap + : public DenseMapBase, + KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> { + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + typedef DenseMapBase BaseT; + typedef typename BaseT::BucketT BucketT; + friend class DenseMapBase; + + BucketT *Buckets; + unsigned NumEntries; + unsigned NumTombstones; + unsigned NumBuckets; + +public: + explicit DenseMap(unsigned NumInitBuckets = 0) { + init(NumInitBuckets); + } + + DenseMap(const DenseMap &other) { + init(0); + copyFrom(other); + } + +#if LLVM_USE_RVALUE_REFERENCES + DenseMap(DenseMap &&other) { + init(0); + swap(other); + } +#endif + + template + DenseMap(const InputIt &I, const InputIt &E) { + init(NextPowerOf2(std::distance(I, E))); + this->insert(I, E); + } + + ~DenseMap() { + this->destroyAll(); + operator delete(Buckets); + } + + void swap(DenseMap& RHS) { + std::swap(Buckets, RHS.Buckets); + std::swap(NumEntries, RHS.NumEntries); + std::swap(NumTombstones, RHS.NumTombstones); + std::swap(NumBuckets, RHS.NumBuckets); + } + + DenseMap& operator=(const DenseMap& other) { + copyFrom(other); + return *this; + } + +#if LLVM_USE_RVALUE_REFERENCES + DenseMap& operator=(DenseMap &&other) { + this->destroyAll(); + operator delete(Buckets); + init(0); + swap(other); + return *this; + } +#endif + + void copyFrom(const DenseMap& other) { + this->destroyAll(); + operator delete(Buckets); + if (allocateBuckets(other.NumBuckets)) { + this->BaseT::copyFrom(other); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void init(unsigned InitBuckets) { + if (allocateBuckets(InitBuckets)) { + this->BaseT::initEmpty(); + } else { + NumEntries = 0; + NumTombstones = 0; + } + } + + void grow(unsigned AtLeast) { + unsigned OldNumBuckets = NumBuckets; + BucketT *OldBuckets = Buckets; + + allocateBuckets(std::max(MIN_BUCKETS, NextPowerOf2(AtLeast))); + assert(Buckets); + if (!OldBuckets) { + this->BaseT::initEmpty(); + return; + } + + this->moveFromOldBuckets(OldBuckets, OldBuckets+OldNumBuckets); + + // Free the old table. + operator delete(OldBuckets); + } + + void shrink_and_clear() { + unsigned OldNumEntries = NumEntries; + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldNumEntries) + NewNumBuckets = std::max(MIN_BUCKETS, 1 << (Log2_32_Ceil(OldNumEntries) + 1)); + if (NewNumBuckets == NumBuckets) { + this->BaseT::initEmpty(); + return; + } + + operator delete(Buckets); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + void setNumEntries(unsigned Num) { + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + BucketT *getBuckets() const { + return Buckets; + } + + unsigned getNumBuckets() const { + return NumBuckets; + } + + bool allocateBuckets(unsigned Num) { + NumBuckets = Num; + if (NumBuckets == 0) { + Buckets = 0; + return false; + } + + Buckets = static_cast(operator new(sizeof(BucketT)*NumBuckets)); + return true; + } +}; + +template > +class SmallDenseMap + : public DenseMapBase, + KeyT, ValueT, KeyInfoT, ZeroValuesArePurgeable> { + // Lift some types from the dependent base class into this class for + // simplicity of referring to them. + typedef DenseMapBase BaseT; + typedef typename BaseT::BucketT BucketT; + friend class DenseMapBase; + + unsigned Small : 1; + unsigned NumEntries : 31; + unsigned NumTombstones; + + struct LargeRep { + BucketT *Buckets; + unsigned NumBuckets; + }; + + /// A "union" of an inline bucket array and the struct representing + /// a large bucket. This union will be discriminated by the 'Small' bit. + AlignedCharArrayUnion storage; + +public: + explicit SmallDenseMap(unsigned NumInitBuckets = 0) { + init(NumInitBuckets); + } + + SmallDenseMap(const SmallDenseMap &other) { + init(0); + copyFrom(other); + } + +#if LLVM_USE_RVALUE_REFERENCES + SmallDenseMap(SmallDenseMap &&other) { + init(0); + swap(other); + } +#endif + + template + SmallDenseMap(const InputIt &I, const InputIt &E) { + init(NextPowerOf2(std::distance(I, E))); + this->insert(I, E); + } + + ~SmallDenseMap() { + this->destroyAll(); + deallocateBuckets(); + } + + void swap(SmallDenseMap& RHS) { + unsigned TmpNumEntries = RHS.NumEntries; + RHS.NumEntries = NumEntries; + NumEntries = TmpNumEntries; + std::swap(NumTombstones, RHS.NumTombstones); + + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + if (Small && RHS.Small) { + // If we're swapping inline bucket arrays, we have to cope with some of + // the tricky bits of DenseMap's storage system: the buckets are not + // fully initialized. Thus we swap every key, but we may have + // a one-directional move of the value. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *LHSB = &getInlineBuckets()[i], + *RHSB = &RHS.getInlineBuckets()[i]; + bool hasLHSValue = (!KeyInfoT::isEqual(LHSB->first, EmptyKey) && + !KeyInfoT::isEqual(LHSB->first, TombstoneKey)); + bool hasRHSValue = (!KeyInfoT::isEqual(RHSB->first, EmptyKey) && + !KeyInfoT::isEqual(RHSB->first, TombstoneKey)); + if (hasLHSValue && hasRHSValue) { + // Swap together if we can... + std::swap(*LHSB, *RHSB); + continue; + } + // Swap separately and handle any assymetry. + std::swap(LHSB->first, RHSB->first); + if (hasLHSValue) { + new (&RHSB->second) ValueT(llvm_move(LHSB->second)); + LHSB->second.~ValueT(); + } else if (hasRHSValue) { + new (&LHSB->second) ValueT(llvm_move(RHSB->second)); + RHSB->second.~ValueT(); + } + } + return; + } + if (!Small && !RHS.Small) { + std::swap(getLargeRep()->Buckets, RHS.getLargeRep()->Buckets); + std::swap(getLargeRep()->NumBuckets, RHS.getLargeRep()->NumBuckets); + return; + } + + SmallDenseMap &SmallSide = Small ? *this : RHS; + SmallDenseMap &LargeSide = Small ? RHS : *this; + + // First stash the large side's rep and move the small side across. + LargeRep TmpRep = llvm_move(*LargeSide.getLargeRep()); + LargeSide.getLargeRep()->~LargeRep(); + LargeSide.Small = true; + // This is similar to the standard move-from-old-buckets, but the bucket + // count hasn't actually rotated in this case. So we have to carefully + // move construct the keys and values into their new locations, but there + // is no need to re-hash things. + for (unsigned i = 0, e = InlineBuckets; i != e; ++i) { + BucketT *NewB = &LargeSide.getInlineBuckets()[i], + *OldB = &SmallSide.getInlineBuckets()[i]; + new (&NewB->first) KeyT(llvm_move(OldB->first)); + OldB->first.~KeyT(); + if (!KeyInfoT::isEqual(NewB->first, EmptyKey) && + !KeyInfoT::isEqual(NewB->first, TombstoneKey)) { + new (&NewB->second) ValueT(llvm_move(OldB->second)); + OldB->second.~ValueT(); + } + } + + // The hard part of moving the small buckets across is done, just move + // the TmpRep into its new home. + SmallSide.Small = false; + new (SmallSide.getLargeRep()) LargeRep(llvm_move(TmpRep)); + } + + SmallDenseMap& operator=(const SmallDenseMap& other) { + copyFrom(other); + return *this; + } + +#if LLVM_USE_RVALUE_REFERENCES + SmallDenseMap& operator=(SmallDenseMap &&other) { + this->destroyAll(); + deallocateBuckets(); + init(0); + swap(other); + return *this; + } +#endif + + void copyFrom(const SmallDenseMap& other) { + this->destroyAll(); + deallocateBuckets(); + Small = true; + if (other.getNumBuckets() > InlineBuckets) { + Small = false; + allocateBuckets(other.getNumBuckets()); + } + this->BaseT::copyFrom(other); + } + + void init(unsigned InitBuckets) { + Small = true; + if (InitBuckets > InlineBuckets) { + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets)); + } + this->BaseT::initEmpty(); + } + + void grow(unsigned AtLeast) { + if (AtLeast > InlineBuckets) + AtLeast = std::max(MIN_BUCKETS, NextPowerOf2(AtLeast)); + + if (Small) { + if (AtLeast <= InlineBuckets) + return; // Nothing to do. + + // First move the inline buckets into a temporary storage. + AlignedCharArrayUnion TmpStorage; + BucketT *TmpBegin = reinterpret_cast(TmpStorage.buffer); + BucketT *TmpEnd = TmpBegin; + + // Loop over the buckets, moving non-empty, non-tombstones into the + // temporary storage. Have the loop move the TmpEnd forward as it goes. + const KeyT EmptyKey = this->getEmptyKey(); + const KeyT TombstoneKey = this->getTombstoneKey(); + for (BucketT *P = getBuckets(), *E = P + InlineBuckets; P != E; ++P) { + if (!KeyInfoT::isEqual(P->first, EmptyKey) && + !KeyInfoT::isEqual(P->first, TombstoneKey)) { + assert(size_t(TmpEnd - TmpBegin) < InlineBuckets && + "Too many inline buckets!"); + new (&TmpEnd->first) KeyT(llvm_move(P->first)); + new (&TmpEnd->second) ValueT(llvm_move(P->second)); + ++TmpEnd; + P->second.~ValueT(); + } + P->first.~KeyT(); + } + + // Now make this map use the large rep, and move all the entries back + // into it. + Small = false; + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + this->moveFromOldBuckets(TmpBegin, TmpEnd); + return; + } + + LargeRep OldRep = llvm_move(*getLargeRep()); + getLargeRep()->~LargeRep(); + if (AtLeast <= InlineBuckets) { + Small = true; + } else { + new (getLargeRep()) LargeRep(allocateBuckets(AtLeast)); + } + + this->moveFromOldBuckets(OldRep.Buckets, OldRep.Buckets+OldRep.NumBuckets); + + // Free the old table. + operator delete(OldRep.Buckets); + } + + void shrink_and_clear() { + unsigned OldSize = this->size(); + this->destroyAll(); + + // Reduce the number of buckets. + unsigned NewNumBuckets = 0; + if (OldSize) { + NewNumBuckets = 1 << (Log2_32_Ceil(OldSize) + 1); + if (NewNumBuckets > InlineBuckets && NewNumBuckets < MIN_BUCKETS) + NewNumBuckets = MIN_BUCKETS; + } + if ((Small && NewNumBuckets <= InlineBuckets) || + (!Small && NewNumBuckets == getLargeRep()->NumBuckets)) { + this->BaseT::initEmpty(); + return; + } + + deallocateBuckets(); + init(NewNumBuckets); + } + +private: + unsigned getNumEntries() const { + return NumEntries; + } + void setNumEntries(unsigned Num) { + assert(Num < INT_MAX && "Cannot support more than INT_MAX entries"); + NumEntries = Num; + } + + unsigned getNumTombstones() const { + return NumTombstones; + } + void setNumTombstones(unsigned Num) { + NumTombstones = Num; + } + + const BucketT *getInlineBuckets() const { + assert(Small); + // Note that this cast does not violate aliasing rules as we assert that + // the memory's dynamic type is the small, inline bucket buffer, and the + // 'storage.buffer' static type is 'char *'. + return reinterpret_cast(storage.buffer); + } + BucketT *getInlineBuckets() { + return const_cast( + const_cast(this)->getInlineBuckets()); + } + const LargeRep *getLargeRep() const { + assert(!Small); + // Note, same rule about aliasing as with getInlineBuckets. + return reinterpret_cast(storage.buffer); + } + LargeRep *getLargeRep() { + return const_cast( + const_cast(this)->getLargeRep()); + } + + const BucketT *getBuckets() const { + return Small ? getInlineBuckets() : getLargeRep()->Buckets; + } + BucketT *getBuckets() { + return const_cast( + const_cast(this)->getBuckets()); + } + unsigned getNumBuckets() const { + return Small ? InlineBuckets : getLargeRep()->NumBuckets; + } + + void deallocateBuckets() { + if (Small) + return; + + operator delete(getLargeRep()->Buckets); + getLargeRep()->~LargeRep(); + } + + LargeRep allocateBuckets(unsigned Num) { + assert(Num > InlineBuckets && "Must allocate more buckets than are inline"); + LargeRep Rep = { + static_cast(operator new(sizeof(BucketT) * Num)), Num +}; + return Rep; + } +}; + +template +class DenseMapIterator { + typedef std::pair Bucket; + typedef DenseMapIterator ConstIterator; + friend class DenseMapIterator; +public: + typedef ptrdiff_t difference_type; + typedef typename conditional::type value_type; + typedef value_type *pointer; + typedef value_type &reference; + typedef std::forward_iterator_tag iterator_category; +private: + pointer Ptr, End; +public: + DenseMapIterator() : Ptr(0), End(0) {} + + DenseMapIterator(pointer Pos, pointer E, bool NoAdvance = false) + : Ptr(Pos), End(E) { + if (!NoAdvance) AdvancePastEmptyBuckets(); + } + + // If IsConst is true this is a converting constructor from iterator to + // const_iterator and the default copy constructor is used. + // Otherwise this is a copy constructor for iterator. + DenseMapIterator(const DenseMapIterator& I) + : Ptr(I.Ptr), End(I.End) {} + + reference operator*() const { + return *Ptr; + } + pointer operator->() const { + return Ptr; + } + + bool operator==(const ConstIterator &RHS) const { + return Ptr == RHS.operator->(); + } + bool operator!=(const ConstIterator &RHS) const { + return Ptr != RHS.operator->(); + } + + inline DenseMapIterator& operator++() { // Preincrement + ++Ptr; + AdvancePastEmptyBuckets(); + return *this; + } + DenseMapIterator operator++(int) { // Postincrement + DenseMapIterator tmp = *this; ++*this; return tmp; + } + +private: + void AdvancePastEmptyBuckets() { + const KeyT Empty = KeyInfoT::getEmptyKey(); + const KeyT Tombstone = KeyInfoT::getTombstoneKey(); + + while (Ptr != End && + (KeyInfoT::isEqual(Ptr->first, Empty) || + KeyInfoT::isEqual(Ptr->first, Tombstone))) + ++Ptr; + } +}; + +} // end namespace objc + +#endif diff --git a/runtime/llvm-DenseMapInfo.h b/runtime/llvm-DenseMapInfo.h new file mode 100644 index 0000000..4b7869f --- /dev/null +++ b/runtime/llvm-DenseMapInfo.h @@ -0,0 +1,200 @@ +//===- llvm/ADT/DenseMapInfo.h - Type traits for DenseMap -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines DenseMapInfo traits for DenseMap. +// +//===----------------------------------------------------------------------===// + +// Taken from llvmCore-3425.0.31. + +#ifndef LLVM_ADT_DENSEMAPINFO_H +#define LLVM_ADT_DENSEMAPINFO_H + +#include "objc-private.h" +#include "llvm-type_traits.h" + +namespace objc { + +template +struct DenseMapInfo { + //static inline T getEmptyKey(); + //static inline T getTombstoneKey(); + //static unsigned getHashValue(const T &Val); + //static bool isEqual(const T &LHS, const T &RHS); +}; + +// Provide DenseMapInfo for all pointers. +template +struct DenseMapInfo { + static inline T* getEmptyKey() { + uintptr_t Val = static_cast(-1); + return reinterpret_cast(Val); + } + static inline T* getTombstoneKey() { + uintptr_t Val = static_cast(-2); + return reinterpret_cast(Val); + } + static unsigned getHashValue(const T *PtrVal) { + return ptr_hash((uintptr_t)PtrVal); + } + static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; } +}; + +// Provide DenseMapInfo for disguised pointers. +template +struct DenseMapInfo> { + static inline DisguisedPtr getEmptyKey() { + return DisguisedPtr((T*)(uintptr_t)-1); + } + static inline DisguisedPtr getTombstoneKey() { + return DisguisedPtr((T*)(uintptr_t)-2); + } + static unsigned getHashValue(const T *PtrVal) { + return ptr_hash((uintptr_t)PtrVal); + } + static bool isEqual(const DisguisedPtr &LHS, const DisguisedPtr &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for cstrings. +template<> struct DenseMapInfo { + static inline const char* getEmptyKey() { + return reinterpret_cast((intptr_t)-1); + } + static inline const char* getTombstoneKey() { + return reinterpret_cast((intptr_t)-2); + } + static unsigned getHashValue(const char* const &Val) { + return _objc_strhash(Val); + } + static bool isEqual(const char* const &LHS, const char* const &RHS) { + return 0 == strcmp(LHS, RHS); + } +}; + +// Provide DenseMapInfo for chars. +template<> struct DenseMapInfo { + static inline char getEmptyKey() { return ~0; } + static inline char getTombstoneKey() { return ~0 - 1; } + static unsigned getHashValue(const char& Val) { return Val * 37U; } + static bool isEqual(const char &LHS, const char &RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned ints. +template<> struct DenseMapInfo { + static inline unsigned getEmptyKey() { return ~0U; } + static inline unsigned getTombstoneKey() { return ~0U - 1; } + static unsigned getHashValue(const unsigned& Val) { return Val * 37U; } + static bool isEqual(const unsigned& LHS, const unsigned& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned longs. +template<> struct DenseMapInfo { + static inline unsigned long getEmptyKey() { return ~0UL; } + static inline unsigned long getTombstoneKey() { return ~0UL - 1L; } + static unsigned getHashValue(const unsigned long& Val) { + return (unsigned)(Val * 37UL); + } + static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for unsigned long longs. +template<> struct DenseMapInfo { + static inline unsigned long long getEmptyKey() { return ~0ULL; } + static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; } + static unsigned getHashValue(const unsigned long long& Val) { + return (unsigned)(Val * 37ULL); + } + static bool isEqual(const unsigned long long& LHS, + const unsigned long long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for ints. +template<> struct DenseMapInfo { + static inline int getEmptyKey() { return 0x7fffffff; } + static inline int getTombstoneKey() { return -0x7fffffff - 1; } + static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); } + static bool isEqual(const int& LHS, const int& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for longs. +template<> struct DenseMapInfo { + static inline long getEmptyKey() { + return (1UL << (sizeof(long) * 8 - 1)) - 1UL; + } + static inline long getTombstoneKey() { return getEmptyKey() - 1L; } + static unsigned getHashValue(const long& Val) { + return (unsigned)(Val * 37UL); + } + static bool isEqual(const long& LHS, const long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for long longs. +template<> struct DenseMapInfo { + static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; } + static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; } + static unsigned getHashValue(const long long& Val) { + return (unsigned)(Val * 37ULL); + } + static bool isEqual(const long long& LHS, + const long long& RHS) { + return LHS == RHS; + } +}; + +// Provide DenseMapInfo for all pairs whose members have info. +template +struct DenseMapInfo > { + typedef std::pair Pair; + typedef DenseMapInfo FirstInfo; + typedef DenseMapInfo SecondInfo; + + static inline Pair getEmptyKey() { + return std::make_pair(FirstInfo::getEmptyKey(), + SecondInfo::getEmptyKey()); + } + static inline Pair getTombstoneKey() { + return std::make_pair(FirstInfo::getTombstoneKey(), + SecondInfo::getTombstoneKey()); + } + static unsigned getHashValue(const Pair& PairVal) { + uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32 + | (uint64_t)SecondInfo::getHashValue(PairVal.second); + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return (unsigned)key; + } + static bool isEqual(const Pair &LHS, const Pair &RHS) { + return FirstInfo::isEqual(LHS.first, RHS.first) && + SecondInfo::isEqual(LHS.second, RHS.second); + } +}; + +} // end namespace objc + +#endif diff --git a/runtime/llvm-MathExtras.h b/runtime/llvm-MathExtras.h new file mode 100644 index 0000000..7baad41 --- /dev/null +++ b/runtime/llvm-MathExtras.h @@ -0,0 +1,480 @@ +//===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains some functions that are useful for math stuff. +// +//===----------------------------------------------------------------------===// + +// Taken from llvmCore-3425.0.31. + +#ifndef LLVM_SUPPORT_MATHEXTRAS_H +#define LLVM_SUPPORT_MATHEXTRAS_H + +namespace objc { + +// NOTE: The following support functions use the _32/_64 extensions instead of +// type overloading so that signed and unsigned integers can be used without +// ambiguity. + +/// Hi_32 - This function returns the high 32 bits of a 64 bit value. +inline uint32_t Hi_32(uint64_t Value) { + return static_cast(Value >> 32); +} + +/// Lo_32 - This function returns the low 32 bits of a 64 bit value. +inline uint32_t Lo_32(uint64_t Value) { + return static_cast(Value); +} + +/// isInt - Checks if an integer fits into the given bit width. +template +inline bool isInt(int64_t x) { + return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); +} +// Template specializations to get better code for common cases. +template<> +inline bool isInt<8>(int64_t x) { + return static_cast(x) == x; +} +template<> +inline bool isInt<16>(int64_t x) { + return static_cast(x) == x; +} +template<> +inline bool isInt<32>(int64_t x) { + return static_cast(x) == x; +} + +/// isShiftedInt - Checks if a signed integer is an N bit number shifted +/// left by S. +template +inline bool isShiftedInt(int64_t x) { + return isInt(x) && (x % (1< +inline bool isUInt(uint64_t x) { + return N >= 64 || x < (UINT64_C(1)< +inline bool isUInt<8>(uint64_t x) { + return static_cast(x) == x; +} +template<> +inline bool isUInt<16>(uint64_t x) { + return static_cast(x) == x; +} +template<> +inline bool isUInt<32>(uint64_t x) { + return static_cast(x) == x; +} + +/// isShiftedUInt - Checks if a unsigned integer is an N bit number shifted +/// left by S. +template +inline bool isShiftedUInt(uint64_t x) { + return isUInt(x) && (x % (1<> (64 - N))); +} + +/// isIntN - Checks if an signed integer fits into the given (dynamic) +/// bit width. +inline bool isIntN(unsigned N, int64_t x) { + return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); +} + +/// isMask_32 - This function returns true if the argument is a sequence of ones +/// starting at the least significant bit with the remainder zero (32 bit +/// version). Ex. isMask_32(0x0000FFFFU) == true. +inline bool isMask_32(uint32_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// isMask_64 - This function returns true if the argument is a sequence of ones +/// starting at the least significant bit with the remainder zero (64 bit +/// version). +inline bool isMask_64(uint64_t Value) { + return Value && ((Value + 1) & Value) == 0; +} + +/// isShiftedMask_32 - This function returns true if the argument contains a +/// sequence of ones with the remainder zero (32 bit version.) +/// Ex. isShiftedMask_32(0x0000FF00U) == true. +inline bool isShiftedMask_32(uint32_t Value) { + return isMask_32((Value - 1) | Value); +} + +/// isShiftedMask_64 - This function returns true if the argument contains a +/// sequence of ones with the remainder zero (64 bit version.) +inline bool isShiftedMask_64(uint64_t Value) { + return isMask_64((Value - 1) | Value); +} + +/// isPowerOf2_32 - This function returns true if the argument is a power of +/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) +inline bool isPowerOf2_32(uint32_t Value) { + return Value && !(Value & (Value - 1)); +} + +/// isPowerOf2_64 - This function returns true if the argument is a power of two +/// > 0 (64 bit edition.) +inline bool isPowerOf2_64(uint64_t Value) { + return Value && !(Value & (Value - int64_t(1L))); +} + +/// CountLeadingZeros_32 - this function performs the platform optimal form of +/// counting the number of zeros from the most significant bit to the first one +/// bit. Ex. CountLeadingZeros_32(0x00F000FF) == 8. +/// Returns 32 if the word is zero. +inline unsigned CountLeadingZeros_32(uint32_t Value) { + unsigned Count; // result +#if __GNUC__ >= 4 + // PowerPC is defined for __builtin_clz(0) +#if !defined(__ppc__) && !defined(__ppc64__) + if (!Value) return 32; +#endif + Count = __builtin_clz(Value); +#else + if (!Value) return 32; + Count = 0; + // bisection method for count leading zeros + for (unsigned Shift = 32 >> 1; Shift; Shift >>= 1) { + uint32_t Tmp = Value >> Shift; + if (Tmp) { + Value = Tmp; + } else { + Count |= Shift; + } + } +#endif + return Count; +} + +/// CountLeadingOnes_32 - this function performs the operation of +/// counting the number of ones from the most significant bit to the first zero +/// bit. Ex. CountLeadingOnes_32(0xFF0FFF00) == 8. +/// Returns 32 if the word is all ones. +inline unsigned CountLeadingOnes_32(uint32_t Value) { + return CountLeadingZeros_32(~Value); +} + +/// CountLeadingZeros_64 - This function performs the platform optimal form +/// of counting the number of zeros from the most significant bit to the first +/// one bit (64 bit edition.) +/// Returns 64 if the word is zero. +inline unsigned CountLeadingZeros_64(uint64_t Value) { + unsigned Count; // result +#if __GNUC__ >= 4 + // PowerPC is defined for __builtin_clzll(0) +#if !defined(__ppc__) && !defined(__ppc64__) + if (!Value) return 64; +#endif + Count = __builtin_clzll(Value); +#else + if (sizeof(long) == sizeof(int64_t)) { + if (!Value) return 64; + Count = 0; + // bisection method for count leading zeros + for (unsigned Shift = 64 >> 1; Shift; Shift >>= 1) { + uint64_t Tmp = Value >> Shift; + if (Tmp) { + Value = Tmp; + } else { + Count |= Shift; + } + } + } else { + // get hi portion + uint32_t Hi = Hi_32(Value); + + // if some bits in hi portion + if (Hi) { + // leading zeros in hi portion plus all bits in lo portion + Count = CountLeadingZeros_32(Hi); + } else { + // get lo portion + uint32_t Lo = Lo_32(Value); + // same as 32 bit value + Count = CountLeadingZeros_32(Lo)+32; + } + } +#endif + return Count; +} + +/// CountLeadingOnes_64 - This function performs the operation +/// of counting the number of ones from the most significant bit to the first +/// zero bit (64 bit edition.) +/// Returns 64 if the word is all ones. +inline unsigned CountLeadingOnes_64(uint64_t Value) { + return CountLeadingZeros_64(~Value); +} + +/// CountTrailingZeros_32 - this function performs the platform optimal form of +/// counting the number of zeros from the least significant bit to the first one +/// bit. Ex. CountTrailingZeros_32(0xFF00FF00) == 8. +/// Returns 32 if the word is zero. +inline unsigned CountTrailingZeros_32(uint32_t Value) { +#if __GNUC__ >= 4 + return Value ? __builtin_ctz(Value) : 32; +#else + static const unsigned Mod37BitPosition[] = { + 32, 0, 1, 26, 2, 23, 27, 0, 3, 16, 24, 30, 28, 11, 0, 13, + 4, 7, 17, 0, 25, 22, 31, 15, 29, 10, 12, 6, 0, 21, 14, 9, + 5, 20, 8, 19, 18 + }; + return Mod37BitPosition[(-Value & Value) % 37]; +#endif +} + +/// CountTrailingOnes_32 - this function performs the operation of +/// counting the number of ones from the least significant bit to the first zero +/// bit. Ex. CountTrailingOnes_32(0x00FF00FF) == 8. +/// Returns 32 if the word is all ones. +inline unsigned CountTrailingOnes_32(uint32_t Value) { + return CountTrailingZeros_32(~Value); +} + +/// CountTrailingZeros_64 - This function performs the platform optimal form +/// of counting the number of zeros from the least significant bit to the first +/// one bit (64 bit edition.) +/// Returns 64 if the word is zero. +inline unsigned CountTrailingZeros_64(uint64_t Value) { +#if __GNUC__ >= 4 + return Value ? __builtin_ctzll(Value) : 64; +#else + static const unsigned Mod67Position[] = { + 64, 0, 1, 39, 2, 15, 40, 23, 3, 12, 16, 59, 41, 19, 24, 54, + 4, 64, 13, 10, 17, 62, 60, 28, 42, 30, 20, 51, 25, 44, 55, + 47, 5, 32, 65, 38, 14, 22, 11, 58, 18, 53, 63, 9, 61, 27, + 29, 50, 43, 46, 31, 37, 21, 57, 52, 8, 26, 49, 45, 36, 56, + 7, 48, 35, 6, 34, 33, 0 + }; + return Mod67Position[(-Value & Value) % 67]; +#endif +} + +/// CountTrailingOnes_64 - This function performs the operation +/// of counting the number of ones from the least significant bit to the first +/// zero bit (64 bit edition.) +/// Returns 64 if the word is all ones. +inline unsigned CountTrailingOnes_64(uint64_t Value) { + return CountTrailingZeros_64(~Value); +} + +/// CountPopulation_32 - this function counts the number of set bits in a value. +/// Ex. CountPopulation(0xF000F000) = 8 +/// Returns 0 if the word is zero. +inline unsigned CountPopulation_32(uint32_t Value) { +#if __GNUC__ >= 4 + return __builtin_popcount(Value); +#else + uint32_t v = Value - ((Value >> 1) & 0x55555555); + v = (v & 0x33333333) + ((v >> 2) & 0x33333333); + return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; +#endif +} + +/// CountPopulation_64 - this function counts the number of set bits in a value, +/// (64 bit edition.) +inline unsigned CountPopulation_64(uint64_t Value) { +#if __GNUC__ >= 4 + return __builtin_popcountll(Value); +#else + uint64_t v = Value - ((Value >> 1) & 0x5555555555555555ULL); + v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); + v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; + return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); +#endif +} + +/// Log2_32 - This function returns the floor log base 2 of the specified value, +/// -1 if the value is zero. (32 bit edition.) +/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 +inline unsigned Log2_32(uint32_t Value) { + return 31 - CountLeadingZeros_32(Value); +} + +/// Log2_64 - This function returns the floor log base 2 of the specified value, +/// -1 if the value is zero. (64 bit edition.) +inline unsigned Log2_64(uint64_t Value) { + return 63 - CountLeadingZeros_64(Value); +} + +/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified +/// value, 32 if the value is zero. (32 bit edition). +/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 +inline unsigned Log2_32_Ceil(uint32_t Value) { + return 32-CountLeadingZeros_32(Value-1); +} + +/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified +/// value, 64 if the value is zero. (64 bit edition.) +inline unsigned Log2_64_Ceil(uint64_t Value) { + return 64-CountLeadingZeros_64(Value-1); +} + +/// GreatestCommonDivisor64 - Return the greatest common divisor of the two +/// values using Euclid's algorithm. +inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { + while (B) { + uint64_t T = B; + B = A % B; + A = T; + } + return A; +} + +/// BitsToDouble - This function takes a 64-bit integer and returns the bit +/// equivalent double. +inline double BitsToDouble(uint64_t Bits) { + union { + uint64_t L; + double D; + } T; + T.L = Bits; + return T.D; +} + +/// BitsToFloat - This function takes a 32-bit integer and returns the bit +/// equivalent float. +inline float BitsToFloat(uint32_t Bits) { + union { + uint32_t I; + float F; + } T; + T.I = Bits; + return T.F; +} + +/// DoubleToBits - This function takes a double and returns the bit +/// equivalent 64-bit integer. Note that copying doubles around +/// changes the bits of NaNs on some hosts, notably x86, so this +/// routine cannot be used if these bits are needed. +inline uint64_t DoubleToBits(double Double) { + union { + uint64_t L; + double D; + } T; + T.D = Double; + return T.L; +} + +/// FloatToBits - This function takes a float and returns the bit +/// equivalent 32-bit integer. Note that copying floats around +/// changes the bits of NaNs on some hosts, notably x86, so this +/// routine cannot be used if these bits are needed. +inline uint32_t FloatToBits(float Float) { + union { + uint32_t I; + float F; + } T; + T.F = Float; + return T.I; +} + +/// Platform-independent wrappers for the C99 isnan() function. +int IsNAN(float f); +int IsNAN(double d); + +/// Platform-independent wrappers for the C99 isinf() function. +int IsInf(float f); +int IsInf(double d); + +/// MinAlign - A and B are either alignments or offsets. Return the minimum +/// alignment that may be assumed after adding the two together. +inline uint64_t MinAlign(uint64_t A, uint64_t B) { + // The largest power of 2 that divides both A and B. + return (A | B) & -(A | B); +} + +/// NextPowerOf2 - Returns the next power of two (in 64-bits) +/// that is strictly greater than A. Returns zero on overflow. +inline uint64_t NextPowerOf2(uint64_t A) { + A |= (A >> 1); + A |= (A >> 2); + A |= (A >> 4); + A |= (A >> 8); + A |= (A >> 16); + A |= (A >> 32); + return A + 1; +} + +/// NextPowerOf2 - Returns the next power of two (in 32-bits) +/// that is strictly greater than A. Returns zero on overflow. +inline uint32_t NextPowerOf2(uint32_t A) { + A |= (A >> 1); + A |= (A >> 2); + A |= (A >> 4); + A |= (A >> 8); + A |= (A >> 16); + return A + 1; +} + +/// Returns the next integer (mod 2**64) that is greater than or equal to +/// \p Value and is a multiple of \p Align. \p Align must be non-zero. +/// +/// Examples: +/// \code +/// RoundUpToAlignment(5, 8) = 8 +/// RoundUpToAlignment(17, 8) = 24 +/// RoundUpToAlignment(~0LL, 8) = 0 +/// \endcode +inline uint64_t RoundUpToAlignment(uint64_t Value, uint64_t Align) { + return ((Value + Align - 1) / Align) * Align; +} + +/// Returns the offset to the next integer (mod 2**64) that is greater than +/// or equal to \p Value and is a multiple of \p Align. \p Align must be +/// non-zero. +inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { + return RoundUpToAlignment(Value, Align) - Value; +} + +/// abs64 - absolute value of a 64-bit int. Not all environments support +/// "abs" on whatever their name for the 64-bit int type is. The absolute +/// value of the largest negative number is undefined, as with "abs". +inline int64_t abs64(int64_t x) { + return (x < 0) ? -x : x; +} + +/// SignExtend32 - Sign extend B-bit number x to 32-bit int. +/// Usage int32_t r = SignExtend32<5>(x); +template inline int32_t SignExtend32(uint32_t x) { + return int32_t(x << (32 - B)) >> (32 - B); +} + +/// \brief Sign extend number in the bottom B bits of X to a 32-bit int. +/// Requires 0 < B <= 32. +inline int32_t SignExtend32(uint32_t X, unsigned B) { + return int32_t(X << (32 - B)) >> (32 - B); +} + +/// SignExtend64 - Sign extend B-bit number x to 64-bit int. +/// Usage int64_t r = SignExtend64<5>(x); +template inline int64_t SignExtend64(uint64_t x) { + return int64_t(x << (64 - B)) >> (64 - B); +} + +/// \brief Sign extend number in the bottom B bits of X to a 64-bit int. +/// Requires 0 < B <= 64. +inline int64_t SignExtend64(uint64_t X, unsigned B) { + return int64_t(X << (64 - B)) >> (64 - B); +} + +} // End llvm namespace + +#endif diff --git a/runtime/llvm-type_traits.h b/runtime/llvm-type_traits.h new file mode 100644 index 0000000..bbe38d4 --- /dev/null +++ b/runtime/llvm-type_traits.h @@ -0,0 +1,221 @@ +//===- llvm/Support/type_traits.h - Simplfied type traits -------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides a template class that determines if a type is a class or +// not. The basic mechanism, based on using the pointer to member function of +// a zero argument to a function was "boosted" from the boost type_traits +// library. See http://www.boost.org/ for all the gory details. +// +//===----------------------------------------------------------------------===// + +// Taken from llvmCore-3425.0.31. + +#ifndef LLVM_SUPPORT_TYPE_TRAITS_H +#define LLVM_SUPPORT_TYPE_TRAITS_H + +#include +#include + +#ifndef __has_feature +#define LLVM_DEFINED_HAS_FEATURE +#define __has_feature(x) 0 +#endif + +// This is actually the conforming implementation which works with abstract +// classes. However, enough compilers have trouble with it that most will use +// the one in boost/type_traits/object_traits.hpp. This implementation actually +// works with VC7.0, but other interactions seem to fail when we use it. + +namespace objc { + +namespace dont_use +{ + // These two functions should never be used. They are helpers to + // the is_class template below. They cannot be located inside + // is_class because doing so causes at least GCC to think that + // the value of the "value" enumerator is not constant. Placing + // them out here (for some strange reason) allows the sizeof + // operator against them to magically be constant. This is + // important to make the is_class::value idiom zero cost. it + // evaluates to a constant 1 or 0 depending on whether the + // parameter T is a class or not (respectively). + template char is_class_helper(void(T::*)()); + template double is_class_helper(...); +} + +template +struct is_class +{ + // is_class<> metafunction due to Paul Mensonides (leavings@attbi.com). For + // more details: + // http://groups.google.com/groups?hl=en&selm=000001c1cc83%24e154d5e0%247772e50c%40c161550a&rnum=1 +public: + static const bool value = + sizeof(char) == sizeof(dont_use::is_class_helper(0)); +}; + + +/// isPodLike - This is a type trait that is used to determine whether a given +/// type can be copied around with memcpy instead of running ctors etc. +template +struct isPodLike { +#if __has_feature(is_trivially_copyable) + // If the compiler supports the is_trivially_copyable trait use it, as it + // matches the definition of isPodLike closely. + static const bool value = __is_trivially_copyable(T); +#else + // If we don't know anything else, we can (at least) assume that all non-class + // types are PODs. + static const bool value = !is_class::value; +#endif +}; + +// std::pair's are pod-like if their elements are. +template +struct isPodLike > { + static const bool value = isPodLike::value && isPodLike::value; +}; + + +template +struct integral_constant { + typedef T value_type; + static const value_type value = v; + typedef integral_constant type; + operator value_type() { return value; } +}; + +typedef integral_constant true_type; +typedef integral_constant false_type; + +/// \brief Metafunction that determines whether the two given types are +/// equivalent. +template struct is_same : public false_type {}; +template struct is_same : public true_type {}; + +/// \brief Metafunction that removes const qualification from a type. +template struct remove_const { typedef T type; }; +template struct remove_const { typedef T type; }; + +/// \brief Metafunction that removes volatile qualification from a type. +template struct remove_volatile { typedef T type; }; +template struct remove_volatile { typedef T type; }; + +/// \brief Metafunction that removes both const and volatile qualification from +/// a type. +template struct remove_cv { + typedef typename remove_const::type>::type type; +}; + +/// \brief Helper to implement is_integral metafunction. +template struct is_integral_impl : false_type {}; +template <> struct is_integral_impl< bool> : true_type {}; +template <> struct is_integral_impl< char> : true_type {}; +template <> struct is_integral_impl< signed char> : true_type {}; +template <> struct is_integral_impl : true_type {}; +template <> struct is_integral_impl< wchar_t> : true_type {}; +template <> struct is_integral_impl< short> : true_type {}; +template <> struct is_integral_impl : true_type {}; +template <> struct is_integral_impl< int> : true_type {}; +template <> struct is_integral_impl : true_type {}; +template <> struct is_integral_impl< long> : true_type {}; +template <> struct is_integral_impl : true_type {}; +template <> struct is_integral_impl< long long> : true_type {}; +template <> struct is_integral_impl : true_type {}; + +/// \brief Metafunction that determines whether the given type is an integral +/// type. +template +struct is_integral : is_integral_impl {}; + +/// \brief Metafunction to remove reference from a type. +template struct remove_reference { typedef T type; }; +template struct remove_reference { typedef T type; }; + +/// \brief Metafunction that determines whether the given type is a pointer +/// type. +template struct is_pointer : false_type {}; +template struct is_pointer : true_type {}; +template struct is_pointer : true_type {}; +template struct is_pointer : true_type {}; +template struct is_pointer : true_type {}; + +/// \brief Metafunction that determines whether the given type is either an +/// integral type or an enumeration type. +/// +/// Note that this accepts potentially more integral types than we whitelist +/// above for is_integral because it is based on merely being convertible +/// implicitly to an integral type. +template class is_integral_or_enum { + // Provide an overload which can be called with anything implicitly + // convertible to an unsigned long long. This should catch integer types and + // enumeration types at least. We blacklist classes with conversion operators + // below. + static double check_int_convertible(unsigned long long); + static char check_int_convertible(...); + + typedef typename remove_reference::type UnderlyingT; + static UnderlyingT &nonce_instance; + +public: + static const bool + value = (!is_class::value && !is_pointer::value && + !is_same::value && + !is_same::value && + sizeof(char) != sizeof(check_int_convertible(nonce_instance))); +}; + +// enable_if_c - Enable/disable a template based on a metafunction +template +struct enable_if_c { + typedef T type; +}; + +template struct enable_if_c { }; + +// enable_if - Enable/disable a template based on a metafunction +template +struct enable_if : public enable_if_c { }; + +namespace dont_use { + template char base_of_helper(const volatile Base*); + template double base_of_helper(...); +} + +/// is_base_of - Metafunction to determine whether one type is a base class of +/// (or identical to) another type. +template +struct is_base_of { + static const bool value + = is_class::value && is_class::value && + sizeof(char) == sizeof(dont_use::base_of_helper((Derived*)0)); +}; + +// remove_pointer - Metafunction to turn Foo* into Foo. Defined in +// C++0x [meta.trans.ptr]. +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { typedef T type; }; +template struct remove_pointer { + typedef T type; }; + +template +struct conditional { typedef T type; }; + +template +struct conditional { typedef F type; }; + +} + +#ifdef LLVM_DEFINED_HAS_FEATURE +#undef __has_feature +#endif + +#endif diff --git a/runtime/maptable.h b/runtime/maptable.h new file mode 100644 index 0000000..e43da78 --- /dev/null +++ b/runtime/maptable.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 1999-2003, 2006-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* maptable.h + Scalable hash table of mappings. + Bertrand, August 1990 + Copyright 1990-1996 NeXT Software, Inc. +*/ + +#ifndef _OBJC_MAPTABLE_H_ +#define _OBJC_MAPTABLE_H_ + +#ifndef _OBJC_PRIVATE_H_ +# define OBJC_MAP_AVAILABILITY \ + __OSX_DEPRECATED(10.0, 10.1, "NXMapTable is deprecated") \ + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +#else +# define OBJC_MAP_AVAILABILITY +#endif + +#include + +__BEGIN_DECLS + +/*************** Definitions ***************/ + + /* This module allows hashing of arbitrary associations [key -> value]. Keys and values must be pointers or integers, and client is responsible for allocating/deallocating this data. A deallocation call-back is provided. + NX_MAPNOTAKEY (-1) is used internally as a marker, and therefore keys must always be different from -1. + As well-behaved scalable data structures, hash tables double in size when they start becoming full, thus guaranteeing both average constant time access and linear size. */ + +typedef struct _NXMapTable { + /* private data structure; may change */ + const struct _NXMapTablePrototype *prototype; + unsigned count; + unsigned nbBucketsMinusOne; + void *buckets; +} NXMapTable OBJC_MAP_AVAILABILITY; + +typedef struct _NXMapTablePrototype { + unsigned (*hash)(NXMapTable *, const void *key); + int (*isEqual)(NXMapTable *, const void *key1, const void *key2); + void (*free)(NXMapTable *, void *key, void *value); + int style; /* reserved for future expansion; currently 0 */ +} NXMapTablePrototype OBJC_MAP_AVAILABILITY; + + /* invariants assumed by the implementation: + A - key != -1 + B - key1 == key2 => hash(key1) == hash(key2) + when key varies over time, hash(key) must remain invariant + e.g. if string key, the string must not be changed + C - isEqual(key1, key2) => key1 == key2 + */ + +#define NX_MAPNOTAKEY ((void *)(-1)) + +/*************** Functions ***************/ + +OBJC_EXPORT NXMapTable *NXCreateMapTableFromZone(NXMapTablePrototype prototype, unsigned capacity, void *z) OBJC_MAP_AVAILABILITY; +OBJC_EXPORT NXMapTable *NXCreateMapTable(NXMapTablePrototype prototype, unsigned capacity) OBJC_MAP_AVAILABILITY; + /* capacity is only a hint; 0 creates a small table */ + +OBJC_EXPORT void NXFreeMapTable(NXMapTable *table) OBJC_MAP_AVAILABILITY; + /* call free for each pair, and recovers table */ + +OBJC_EXPORT void NXResetMapTable(NXMapTable *table) OBJC_MAP_AVAILABILITY; + /* free each pair; keep current capacity */ + +OBJC_EXPORT BOOL NXCompareMapTables(NXMapTable *table1, NXMapTable *table2) OBJC_MAP_AVAILABILITY; + /* Returns YES if the two sets are equal (each member of table1 in table2, and table have same size) */ + +OBJC_EXPORT unsigned NXCountMapTable(NXMapTable *table) OBJC_MAP_AVAILABILITY; + /* current number of data in table */ + +OBJC_EXPORT void *NXMapMember(NXMapTable *table, const void *key, void **value) OBJC_MAP_AVAILABILITY; + /* return original table key or NX_MAPNOTAKEY. If key is found, value is set */ + +OBJC_EXPORT void *NXMapGet(NXMapTable *table, const void *key) OBJC_MAP_AVAILABILITY; + /* return original corresponding value or NULL. When NULL need be stored as value, NXMapMember can be used to test for presence */ + +OBJC_EXPORT void *NXMapInsert(NXMapTable *table, const void *key, const void *value) OBJC_MAP_AVAILABILITY; + /* override preexisting pair; Return previous value or NULL. */ + +OBJC_EXPORT void *NXMapRemove(NXMapTable *table, const void *key) OBJC_MAP_AVAILABILITY; + /* previous value or NULL is returned */ + +/* Iteration over all elements of a table consists in setting up an iteration state and then to progress until all entries have been visited. An example of use for counting elements in a table is: + unsigned count = 0; + const MyKey *key; + const MyValue *value; + NXMapState state = NXInitMapState(table); + while(NXNextMapState(table, &state, &key, &value)) { + count++; + } +*/ + +typedef struct {int index;} NXMapState OBJC_MAP_AVAILABILITY; + /* callers should not rely on actual contents of the struct */ + +OBJC_EXPORT NXMapState NXInitMapState(NXMapTable *table) OBJC_MAP_AVAILABILITY; + +OBJC_EXPORT int NXNextMapState(NXMapTable *table, NXMapState *state, const void **key, const void **value) OBJC_MAP_AVAILABILITY; + /* returns 0 when all elements have been visited */ + +/*************** Conveniences ***************/ + +OBJC_EXPORT const NXMapTablePrototype NXPtrValueMapPrototype OBJC_MAP_AVAILABILITY; + /* hashing is pointer/integer hashing; + isEqual is identity; + free is no-op. */ +OBJC_EXPORT const NXMapTablePrototype NXStrValueMapPrototype OBJC_MAP_AVAILABILITY; + /* hashing is string hashing; + isEqual is strcmp; + free is no-op. */ +OBJC_EXPORT const NXMapTablePrototype NXObjectMapPrototype OBJC2_UNAVAILABLE; + /* for objects; uses methods: hash, isEqual:, free, all for key. */ + +__END_DECLS + +#endif /* _OBJC_MAPTABLE_H_ */ diff --git a/runtime/maptable.mm b/runtime/maptable.mm new file mode 100644 index 0000000..0413ecd --- /dev/null +++ b/runtime/maptable.mm @@ -0,0 +1,453 @@ +/* + * Copyright (c) 1999-2003, 2005-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* maptable.m + Copyright 1990-1996 NeXT Software, Inc. + Created by Bertrand Serlet, August 1990 + */ + + +#include +#include +#include + +#include "objc-private.h" +#include "maptable.h" +#include "hashtable2.h" + + +/****** Macros and utilities ****************************/ + +#if defined(DEBUG) + #define INLINE +#else + #define INLINE inline +#endif + +typedef struct _MapPair { + const void *key; + const void *value; +} MapPair; + +static INLINE unsigned xorHash(unsigned hash) { + unsigned xored = (hash & 0xffff) ^ (hash >> 16); + return ((xored * 65521) + hash); +} + +static INLINE unsigned bucketOf(NXMapTable *table, const void *key) { + unsigned hash = (table->prototype->hash)(table, key); + return hash & table->nbBucketsMinusOne; +} + +static INLINE int isEqual(NXMapTable *table, const void *key1, const void *key2) { + return (key1 == key2) ? 1 : (table->prototype->isEqual)(table, key1, key2); +} + +static INLINE unsigned nextIndex(NXMapTable *table, unsigned index) { + return (index + 1) & table->nbBucketsMinusOne; +} + +static INLINE void *allocBuckets(void *z, unsigned nb) { + MapPair *pairs = 1+(MapPair *)malloc_zone_malloc((malloc_zone_t *)z, ((nb+1) * sizeof(MapPair))); + MapPair *pair = pairs; + while (nb--) { pair->key = NX_MAPNOTAKEY; pair->value = NULL; pair++; } + return pairs; +} + +static INLINE void freeBuckets(void *p) { + free(-1+(MapPair *)p); +} + +/***** Global data and bootstrap **********************/ + +static int isEqualPrototype (const void *info, const void *data1, const void *data2) { + NXHashTablePrototype *proto1 = (NXHashTablePrototype *) data1; + NXHashTablePrototype *proto2 = (NXHashTablePrototype *) data2; + + return (proto1->hash == proto2->hash) && (proto1->isEqual == proto2->isEqual) && (proto1->free == proto2->free) && (proto1->style == proto2->style); + }; + +static uintptr_t hashPrototype (const void *info, const void *data) { + NXHashTablePrototype *proto = (NXHashTablePrototype *) data; + + return NXPtrHash(info, (void*)proto->hash) ^ NXPtrHash(info, (void*)proto->isEqual) ^ NXPtrHash(info, (void*)proto->free) ^ (uintptr_t) proto->style; + }; + +static NXHashTablePrototype protoPrototype = { + hashPrototype, isEqualPrototype, NXNoEffectFree, 0 +}; + +static NXHashTable *prototypes = NULL; + /* table of all prototypes */ + +/**** Fundamentals Operations **************/ + +NXMapTable *NXCreateMapTableFromZone(NXMapTablePrototype prototype, unsigned capacity, void *z) { + NXMapTable *table = (NXMapTable *)malloc_zone_malloc((malloc_zone_t *)z, sizeof(NXMapTable)); + NXMapTablePrototype *proto; + if (! prototypes) prototypes = NXCreateHashTable(protoPrototype, 0, NULL); + if (! prototype.hash || ! prototype.isEqual || ! prototype.free || prototype.style) { + _objc_inform("*** NXCreateMapTable: invalid creation parameters\n"); + return NULL; + } + proto = (NXMapTablePrototype *)NXHashGet(prototypes, &prototype); + if (! proto) { + proto = (NXMapTablePrototype *)malloc(sizeof(NXMapTablePrototype)); + *proto = prototype; + (void)NXHashInsert(prototypes, proto); + } + table->prototype = proto; table->count = 0; + table->nbBucketsMinusOne = exp2u(log2u(capacity)+1) - 1; + table->buckets = allocBuckets(z, table->nbBucketsMinusOne + 1); + return table; +} + +NXMapTable *NXCreateMapTable(NXMapTablePrototype prototype, unsigned capacity) { + return NXCreateMapTableFromZone(prototype, capacity, malloc_default_zone()); +} + +void NXFreeMapTable(NXMapTable *table) { + NXResetMapTable(table); + freeBuckets(table->buckets); + free(table); +} + +void NXResetMapTable(NXMapTable *table) { + MapPair *pairs = (MapPair *)table->buckets; + void (*freeProc)(struct _NXMapTable *, void *, void *) = table->prototype->free; + unsigned index = table->nbBucketsMinusOne + 1; + while (index--) { + if (pairs->key != NX_MAPNOTAKEY) { + freeProc(table, (void *)pairs->key, (void *)pairs->value); + pairs->key = NX_MAPNOTAKEY; pairs->value = NULL; + } + pairs++; + } + table->count = 0; +} + +BOOL NXCompareMapTables(NXMapTable *table1, NXMapTable *table2) { + if (table1 == table2) return YES; + if (table1->count != table2->count) return NO; + else { + const void *key; + const void *value; + NXMapState state = NXInitMapState(table1); + while (NXNextMapState(table1, &state, &key, &value)) { + if (NXMapMember(table2, key, (void**)&value) == NX_MAPNOTAKEY) return NO; + } + return YES; + } +} + +unsigned NXCountMapTable(NXMapTable *table) { return table->count; } + +static INLINE void *_NXMapMember(NXMapTable *table, const void *key, void **value) { + MapPair *pairs = (MapPair *)table->buckets; + unsigned index = bucketOf(table, key); + MapPair *pair = pairs + index; + if (pair->key == NX_MAPNOTAKEY) return NX_MAPNOTAKEY; + if (isEqual(table, pair->key, key)) { + *value = (void *)pair->value; + return (void *)pair->key; + } else { + unsigned index2 = index; + while ((index2 = nextIndex(table, index2)) != index) { + pair = pairs + index2; + if (pair->key == NX_MAPNOTAKEY) return NX_MAPNOTAKEY; + if (isEqual(table, pair->key, key)) { + *value = (void *)pair->value; + return (void *)pair->key; + } + } + return NX_MAPNOTAKEY; + } +} + +void *NXMapMember(NXMapTable *table, const void *key, void **value) { + return _NXMapMember(table, key, value); +} + +void *NXMapGet(NXMapTable *table, const void *key) { + void *value; + return (_NXMapMember(table, key, &value) != NX_MAPNOTAKEY) ? value : NULL; +} + +static void _NXMapRehash(NXMapTable *table) { + MapPair *pairs = (MapPair *)table->buckets; + MapPair *pair = pairs; + unsigned numBuckets = table->nbBucketsMinusOne + 1; + unsigned index = numBuckets; + unsigned oldCount = table->count; + + table->nbBucketsMinusOne = 2 * numBuckets - 1; + table->count = 0; + table->buckets = allocBuckets(malloc_zone_from_ptr(table), table->nbBucketsMinusOne + 1); + while (index--) { + if (pair->key != NX_MAPNOTAKEY) { + (void)NXMapInsert(table, pair->key, pair->value); + } + pair++; + } + if (oldCount != table->count) + _objc_inform("*** maptable: count differs after rehashing; probably indicates a broken invariant: there are x and y such as isEqual(x, y) is TRUE but hash(x) != hash (y)\n"); + freeBuckets(pairs); +} + +void *NXMapInsert(NXMapTable *table, const void *key, const void *value) { + MapPair *pairs = (MapPair *)table->buckets; + unsigned index = bucketOf(table, key); + MapPair *pair = pairs + index; + if (key == NX_MAPNOTAKEY) { + _objc_inform("*** NXMapInsert: invalid key: -1\n"); + return NULL; + } + + unsigned numBuckets = table->nbBucketsMinusOne + 1; + + if (pair->key == NX_MAPNOTAKEY) { + pair->key = key; pair->value = value; + table->count++; + if (table->count * 4 > numBuckets * 3) _NXMapRehash(table); + return NULL; + } + + if (isEqual(table, pair->key, key)) { + const void *old = pair->value; + if (old != value) pair->value = value;/* avoid writing unless needed! */ + return (void *)old; + } else if (table->count == numBuckets) { + /* no room: rehash and retry */ + _NXMapRehash(table); + return NXMapInsert(table, key, value); + } else { + unsigned index2 = index; + while ((index2 = nextIndex(table, index2)) != index) { + pair = pairs + index2; + if (pair->key == NX_MAPNOTAKEY) { + pair->key = key; pair->value = value; + table->count++; + if (table->count * 4 > numBuckets * 3) _NXMapRehash(table); + return NULL; + } + if (isEqual(table, pair->key, key)) { + const void *old = pair->value; + if (old != value) pair->value = value;/* avoid writing unless needed! */ + return (void *)old; + } + } + /* no room: can't happen! */ + _objc_inform("**** NXMapInsert: bug\n"); + return NULL; + } +} + +static int mapRemove = 0; + +void *NXMapRemove(NXMapTable *table, const void *key) { + MapPair *pairs = (MapPair *)table->buckets; + unsigned index = bucketOf(table, key); + MapPair *pair = pairs + index; + unsigned chain = 1; /* number of non-nil pairs in a row */ + int found = 0; + const void *old = NULL; + if (pair->key == NX_MAPNOTAKEY) return NULL; + mapRemove ++; + /* compute chain */ + { + unsigned index2 = index; + if (isEqual(table, pair->key, key)) {found ++; old = pair->value; } + while ((index2 = nextIndex(table, index2)) != index) { + pair = pairs + index2; + if (pair->key == NX_MAPNOTAKEY) break; + if (isEqual(table, pair->key, key)) {found ++; old = pair->value; } + chain++; + } + } + if (! found) return NULL; + if (found != 1) _objc_inform("**** NXMapRemove: incorrect table\n"); + /* remove then reinsert */ + { + MapPair buffer[16]; + MapPair *aux = (chain > 16) ? (MapPair *)malloc(sizeof(MapPair)*(chain-1)) : buffer; + unsigned auxnb = 0; + int nb = chain; + unsigned index2 = index; + while (nb--) { + pair = pairs + index2; + if (! isEqual(table, pair->key, key)) aux[auxnb++] = *pair; + pair->key = NX_MAPNOTAKEY; pair->value = NULL; + index2 = nextIndex(table, index2); + } + table->count -= chain; + if (auxnb != chain-1) _objc_inform("**** NXMapRemove: bug\n"); + while (auxnb--) NXMapInsert(table, aux[auxnb].key, aux[auxnb].value); + if (chain > 16) free(aux); + } + return (void *)old; +} + +NXMapState NXInitMapState(NXMapTable *table) { + NXMapState state; + state.index = table->nbBucketsMinusOne + 1; + return state; +} + +int NXNextMapState(NXMapTable *table, NXMapState *state, const void **key, const void **value) { + MapPair *pairs = (MapPair *)table->buckets; + while (state->index--) { + MapPair *pair = pairs + state->index; + if (pair->key != NX_MAPNOTAKEY) { + *key = pair->key; *value = pair->value; + return YES; + } + } + return NO; +} + + +/*********************************************************************** +* NXMapKeyCopyingInsert +* Like NXMapInsert, but strdups the key if necessary. +* Used to prevent stale pointers when bundles are unloaded. +**********************************************************************/ +void *NXMapKeyCopyingInsert(NXMapTable *table, const void *key, const void *value) +{ + void *realKey; + void *realValue = NULL; + + if ((realKey = NXMapMember(table, key, &realValue)) != NX_MAPNOTAKEY) { + // key DOES exist in table - use table's key for insertion + } else { + // key DOES NOT exist in table - copy the new key before insertion + realKey = (void *)strdupIfMutable((char *)key); + } + return NXMapInsert(table, realKey, value); +} + + +/*********************************************************************** +* NXMapKeyFreeingRemove +* Like NXMapRemove, but frees the existing key if necessary. +* Used to prevent stale pointers when bundles are unloaded. +**********************************************************************/ +void *NXMapKeyFreeingRemove(NXMapTable *table, const void *key) +{ + void *realKey; + void *realValue = NULL; + + if ((realKey = NXMapMember(table, key, &realValue)) != NX_MAPNOTAKEY) { + // key DOES exist in table - remove pair and free key + realValue = NXMapRemove(table, realKey); + // free the key from the table, not necessarily the one given + freeIfMutable((char *)realKey); + return realValue; + } else { + // key DOES NOT exist in table - nothing to do + return NULL; + } +} + + +/**** Conveniences *************************************/ + +static unsigned _mapPtrHash(NXMapTable *table, const void *key) { +#ifdef __LP64__ + return (unsigned)(((uintptr_t)key) >> 3); +#else + return ((uintptr_t)key) >> 2; +#endif +} + +static unsigned _mapStrHash(NXMapTable *table, const void *key) { + unsigned hash = 0; + unsigned char *s = (unsigned char *)key; + /* unsigned to avoid a sign-extend */ + /* unroll the loop */ + if (s) for (; ; ) { + if (*s == '\0') break; + hash ^= *s++; + if (*s == '\0') break; + hash ^= *s++ << 8; + if (*s == '\0') break; + hash ^= *s++ << 16; + if (*s == '\0') break; + hash ^= *s++ << 24; + } + return xorHash(hash); +} + +static int _mapPtrIsEqual(NXMapTable *table, const void *key1, const void *key2) { + return key1 == key2; +} + +static int _mapStrIsEqual(NXMapTable *table, const void *key1, const void *key2) { + if (key1 == key2) return YES; + if (! key1) return ! strlen ((char *) key2); + if (! key2) return ! strlen ((char *) key1); + if (((char *) key1)[0] != ((char *) key2)[0]) return NO; + return (strcmp((char *) key1, (char *) key2)) ? NO : YES; +} + +static void _mapNoFree(NXMapTable *table, void *key, void *value) {} + +const NXMapTablePrototype NXPtrValueMapPrototype = { + _mapPtrHash, _mapPtrIsEqual, _mapNoFree, 0 +}; + +const NXMapTablePrototype NXStrValueMapPrototype = { + _mapStrHash, _mapStrIsEqual, _mapNoFree, 0 +}; + + +#if !__OBJC2__ && !TARGET_OS_WIN32 + +/* This only works with class Object, which is unavailable. */ + +/* Method prototypes */ +@interface DoesNotExist ++ (id)class; ++ (id)initialize; +- (id)description; +- (const char *)UTF8String; +- (unsigned long)hash; +- (BOOL)isEqual:(id)object; +- (void)free; +@end + +static unsigned _mapObjectHash(NXMapTable *table, const void *key) { + return [(id)key hash]; +} + +static int _mapObjectIsEqual(NXMapTable *table, const void *key1, const void *key2) { + return [(id)key1 isEqual:(id)key2]; +} + +static void _mapObjectFree(NXMapTable *table, void *key, void *value) { + [(id)key free]; +} + +const NXMapTablePrototype NXObjectMapPrototype = { + _mapObjectHash, _mapObjectIsEqual, _mapObjectFree, 0 +}; + +#endif diff --git a/runtime/message.h b/runtime/message.h new file mode 100644 index 0000000..725e912 --- /dev/null +++ b/runtime/message.h @@ -0,0 +1,333 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_MESSAGE_H +#define _OBJC_MESSAGE_H + +#pragma GCC system_header + +#include +#include + +#pragma GCC system_header + +#ifndef OBJC_SUPER +#define OBJC_SUPER + +/// Specifies the superclass of an instance. +struct objc_super { + /// Specifies an instance of a class. + __unsafe_unretained id receiver; + + /// Specifies the particular superclass of the instance to message. +#if !defined(__cplusplus) && !__OBJC2__ + /* For compatibility with old objc-runtime.h header */ + __unsafe_unretained Class class; +#else + __unsafe_unretained Class super_class; +#endif + /* super_class is the first class to search */ +}; +#endif + + +/* Basic Messaging Primitives + * + * On some architectures, use objc_msgSend_stret for some struct return types. + * On some architectures, use objc_msgSend_fpret for some float return types. + * On some architectures, use objc_msgSend_fp2ret for some float return types. + * + * These functions must be cast to an appropriate function pointer type + * before being called. + */ +#if !OBJC_OLD_DISPATCH_PROTOTYPES +OBJC_EXPORT void objc_msgSend(void /* id self, SEL op, ... */ ) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_msgSendSuper(void /* struct objc_super *super, SEL op, ... */ ) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +#else +/** + * Sends a message with a simple return value to an instance of a class. + * + * @param self A pointer to the instance of the class that is to receive the message. + * @param op The selector of the method that handles the message. + * @param ... + * A variable argument list containing the arguments to the method. + * + * @return The return value of the method. + * + * @note When it encounters a method call, the compiler generates a call to one of the + * functions \c objc_msgSend, \c objc_msgSend_stret, \c objc_msgSendSuper, or \c objc_msgSendSuper_stret. + * Messages sent to an object’s superclass (using the \c super keyword) are sent using \c objc_msgSendSuper; + * other messages are sent using \c objc_msgSend. Methods that have data structures as return values + * are sent using \c objc_msgSendSuper_stret and \c objc_msgSend_stret. + */ +OBJC_EXPORT id objc_msgSend(id self, SEL op, ...) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +/** + * Sends a message with a simple return value to the superclass of an instance of a class. + * + * @param super A pointer to an \c objc_super data structure. Pass values identifying the + * context the message was sent to, including the instance of the class that is to receive the + * message and the superclass at which to start searching for the method implementation. + * @param op A pointer of type SEL. Pass the selector of the method that will handle the message. + * @param ... + * A variable argument list containing the arguments to the method. + * + * @return The return value of the method identified by \e op. + * + * @see objc_msgSend + */ +OBJC_EXPORT id objc_msgSendSuper(struct objc_super *super, SEL op, ...) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +#endif + + +/* Struct-returning Messaging Primitives + * + * Use these functions to call methods that return structs on the stack. + * On some architectures, some structures are returned in registers. + * Consult your local function call ABI documentation for details. + * + * These functions must be cast to an appropriate function pointer type + * before being called. + */ +#if !OBJC_OLD_DISPATCH_PROTOTYPES +OBJC_EXPORT void objc_msgSend_stret(void /* id self, SEL op, ... */ ) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +OBJC_EXPORT void objc_msgSendSuper_stret(void /* struct objc_super *super, SEL op, ... */ ) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +#else +/** + * Sends a message with a data-structure return value to an instance of a class. + * + * @see objc_msgSend + */ +OBJC_EXPORT void objc_msgSend_stret(id self, SEL op, ...) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; + +/** + * Sends a message with a data-structure return value to the superclass of an instance of a class. + * + * @see objc_msgSendSuper + */ +OBJC_EXPORT void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +#endif + + +/* Floating-point-returning Messaging Primitives + * + * Use these functions to call methods that return floating-point values + * on the stack. + * Consult your local function call ABI documentation for details. + * + * arm: objc_msgSend_fpret not used + * i386: objc_msgSend_fpret used for `float`, `double`, `long double`. + * x86-64: objc_msgSend_fpret used for `long double`. + * + * arm: objc_msgSend_fp2ret not used + * i386: objc_msgSend_fp2ret not used + * x86-64: objc_msgSend_fp2ret used for `_Complex long double`. + * + * These functions must be cast to an appropriate function pointer type + * before being called. + */ +#if !OBJC_OLD_DISPATCH_PROTOTYPES + +# if defined(__i386__) + +OBJC_EXPORT void objc_msgSend_fpret(void /* id self, SEL op, ... */ ) + OBJC_AVAILABLE(10.4, 2.0, 9.0, 1.0); + +# elif defined(__x86_64__) + +OBJC_EXPORT void objc_msgSend_fpret(void /* id self, SEL op, ... */ ) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_msgSend_fp2ret(void /* id self, SEL op, ... */ ) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +# endif + +// !OBJC_OLD_DISPATCH_PROTOTYPES +#else +// OBJC_OLD_DISPATCH_PROTOTYPES +# if defined(__i386__) + +/** + * Sends a message with a floating-point return value to an instance of a class. + * + * @see objc_msgSend + * @note On the i386 platform, the ABI for functions returning a floating-point value is + * incompatible with that for functions returning an integral type. On the i386 platform, therefore, + * you must use \c objc_msgSend_fpret for functions returning non-integral type. For \c float or + * \c long \c double return types, cast the function to an appropriate function pointer type first. + */ +OBJC_EXPORT double objc_msgSend_fpret(id self, SEL op, ...) + OBJC_AVAILABLE(10.4, 2.0, 9.0, 1.0); + +/* Use objc_msgSendSuper() for fp-returning messages to super. */ +/* See also objc_msgSendv_fpret() below. */ + +# elif defined(__x86_64__) +/** + * Sends a message with a floating-point return value to an instance of a class. + * + * @see objc_msgSend + */ +OBJC_EXPORT long double objc_msgSend_fpret(id self, SEL op, ...) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +# if __STDC_VERSION__ >= 199901L +OBJC_EXPORT _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +# else +OBJC_EXPORT void objc_msgSend_fp2ret(id self, SEL op, ...) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +# endif + +/* Use objc_msgSendSuper() for fp-returning messages to super. */ +/* See also objc_msgSendv_fpret() below. */ + +# endif + +// OBJC_OLD_DISPATCH_PROTOTYPES +#endif + + +/* Direct Method Invocation Primitives + * Use these functions to call the implementation of a given Method. + * This is faster than calling method_getImplementation() and method_getName(). + * + * The receiver must not be nil. + * + * These functions must be cast to an appropriate function pointer type + * before being called. + */ +#if !OBJC_OLD_DISPATCH_PROTOTYPES +OBJC_EXPORT void method_invoke(void /* id receiver, Method m, ... */ ) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void method_invoke_stret(void /* id receiver, Method m, ... */ ) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +#else +OBJC_EXPORT id method_invoke(id receiver, Method m, ...) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void method_invoke_stret(id receiver, Method m, ...) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +#endif + + +/* Message Forwarding Primitives + * Use these functions to forward a message as if the receiver did not + * respond to it. + * + * The receiver must not be nil. + * + * class_getMethodImplementation() may return (IMP)_objc_msgForward. + * class_getMethodImplementation_stret() may return (IMP)_objc_msgForward_stret + * + * These functions must be cast to an appropriate function pointer type + * before being called. + * + * Before Mac OS X 10.6, _objc_msgForward must not be called directly + * but may be compared to other IMP values. + */ +#if !OBJC_OLD_DISPATCH_PROTOTYPES +OBJC_EXPORT void _objc_msgForward(void /* id receiver, SEL sel, ... */ ) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +OBJC_EXPORT void _objc_msgForward_stret(void /* id receiver, SEL sel, ... */ ) + OBJC_AVAILABLE(10.6, 3.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +#else +OBJC_EXPORT id _objc_msgForward(id receiver, SEL sel, ...) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +OBJC_EXPORT void _objc_msgForward_stret(id receiver, SEL sel, ...) + OBJC_AVAILABLE(10.6, 3.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +#endif + + +/* Variable-argument Messaging Primitives + * + * Use these functions to call methods with a list of arguments, such + * as the one passed to forward:: . + * + * The contents of the argument list are architecture-specific. + * Consult your local function call ABI documentation for details. + * + * These functions must be cast to an appropriate function pointer type + * before being called, except for objc_msgSendv_stret() which must not + * be cast to a struct-returning type. + */ + +typedef void* marg_list; + +OBJC_EXPORT id objc_msgSendv(id self, SEL op, size_t arg_size, marg_list arg_frame) OBJC2_UNAVAILABLE; +OBJC_EXPORT void objc_msgSendv_stret(void *stretAddr, id self, SEL op, size_t arg_size, marg_list arg_frame) OBJC2_UNAVAILABLE; +/* Note that objc_msgSendv_stret() does not return a structure type, + * and should not be cast to do so. This is unlike objc_msgSend_stret() + * and objc_msgSendSuper_stret(). + */ +#if defined(__i386__) +OBJC_EXPORT double objc_msgSendv_fpret(id self, SEL op, unsigned arg_size, marg_list arg_frame) OBJC2_UNAVAILABLE; +#endif + + +/* The following marg_list macros are of marginal utility. They + * are included for compatibility with the old objc-class.h header. */ + +#if !__OBJC2__ + +#define marg_prearg_size 0 + +#define marg_malloc(margs, method) \ + do { \ + margs = (marg_list *)malloc (marg_prearg_size + ((7 + method_getSizeOfArguments(method)) & ~7)); \ + } while (0) + +#define marg_free(margs) \ + do { \ + free(margs); \ + } while (0) + +#define marg_adjustedOffset(method, offset) \ + (marg_prearg_size + offset) + +#define marg_getRef(margs, offset, type) \ + ( (type *)((char *)margs + marg_adjustedOffset(method,offset) ) ) + +#define marg_getValue(margs, offset, type) \ + ( *marg_getRef(margs, offset, type) ) + +#define marg_setValue(margs, offset, type, value) \ + ( marg_getValue(margs, offset, type) = (value) ) + +#endif + +#endif diff --git a/runtime/objc-abi.h b/runtime/objc-abi.h new file mode 100644 index 0000000..7d9e89e --- /dev/null +++ b/runtime/objc-abi.h @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#ifndef _OBJC_ABI_H +#define _OBJC_ABI_H + +/* + * WARNING DANGER HAZARD BEWARE EEK + * + * Everything in this file is for Apple Internal use only. + * These will change in arbitrary OS updates and in unpredictable ways. + * When your program breaks, you get to keep both pieces. + */ + +/* + * objc-abi.h: Declarations for functions used by compiler codegen. + */ + +#include +#include +#include +#include +#include + +/* Runtime startup. */ + +// Old static initializer. Used by old crt1.o and old bug workarounds. +OBJC_EXPORT void _objcInit(void) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/* Images */ + +// Description of an Objective-C image. +// __DATA,__objc_imageinfo stores one of these. +typedef struct objc_image_info { + uint32_t version; // currently 0 + uint32_t flags; + +#if __cplusplus >= 201103L + private: + enum : uint32_t { + IsReplacement = 1<<0, // used for Fix&Continue, now ignored + SupportsGC = 1<<1, // image supports GC + RequiresGC = 1<<2, // image requires GC + OptimizedByDyld = 1<<3, // image is from an optimized shared cache + CorrectedSynthesize = 1<<4, // used for an old workaround, now ignored + IsSimulated = 1<<5, // image compiled for a simulator platform + HasCategoryClassProperties = 1<<6, // class properties in category_t + + SwiftVersionMaskShift = 8, + SwiftVersionMask = 0xff << SwiftVersionMaskShift // Swift ABI version + + }; + public: + enum : uint32_t { + SwiftVersion1 = 1, + SwiftVersion1_2 = 2, + SwiftVersion2 = 3, + SwiftVersion3 = 4 + }; + + public: + bool isReplacement() const { return flags & IsReplacement; } + bool supportsGC() const { return flags & SupportsGC; } + bool requiresGC() const { return flags & RequiresGC; } + bool optimizedByDyld() const { return flags & OptimizedByDyld; } + bool hasCategoryClassProperties() const { return flags & HasCategoryClassProperties; } + bool containsSwift() const { return (flags & SwiftVersionMask) != 0; } + uint32_t swiftVersion() const { return (flags & SwiftVersionMask) >> SwiftVersionMaskShift; } +#endif +} objc_image_info; + +/* +IsReplacement: + Once used for Fix&Continue in old OS X object files (not final linked images) + Not currently used. + +SupportsGC: + App: GC is required. Framework: GC is supported but not required. + +RequiresGC: + Framework: GC is required. + +OptimizedByDyld: + Assorted metadata precooked in the dyld shared cache. + Never set for images outside the shared cache file itself. + +CorrectedSynthesize: + Once used on old iOS to mark images that did not have a particular + miscompile. Not used by the runtime. + +IsSimulated: + Image was compiled for a simulator platform. Not used by the runtime. + +HasClassProperties: + New ABI: category_t.classProperties fields are present. + Old ABI: Set by some compilers. Not used by the runtime. +*/ + + +/* Properties */ + +// Read or write an object property. Not all object properties use these. +OBJC_EXPORT id objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_setProperty(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +OBJC_EXPORT void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); +OBJC_EXPORT void objc_setProperty_nonatomic(id self, SEL _cmd, id newValue, ptrdiff_t offset) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); +OBJC_EXPORT void objc_setProperty_atomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); +OBJC_EXPORT void objc_setProperty_nonatomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); + + +// Read or write a non-object property. Not all uses are C structs, +// and not all C struct properties use this. +OBJC_EXPORT void objc_copyStruct(void *dest, const void *src, ptrdiff_t size, BOOL atomic, BOOL hasStrong) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +// Perform a copy of a C++ object using striped locks. Used by non-POD C++ typed atomic properties. +OBJC_EXPORT void objc_copyCppObjectAtomic(void *dest, const void *src, void (*copyHelper) (void *dest, const void *source)) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); + +/* Classes. */ +#if __OBJC2__ +OBJC_EXPORT IMP _objc_empty_vtable + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +#endif +OBJC_EXPORT struct objc_cache _objc_empty_cache + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + + +/* Messages */ + +#if __OBJC2__ +// objc_msgSendSuper2() takes the current search class, not its superclass. +OBJC_EXPORT id objc_msgSendSuper2(struct objc_super *super, SEL op, ...) + OBJC_AVAILABLE(10.6, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_msgSendSuper2_stret(struct objc_super *super, SEL op,...) + OBJC_AVAILABLE(10.6, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; + +// objc_msgSend_noarg() may be faster for methods with no additional arguments. +OBJC_EXPORT id objc_msgSend_noarg(id self, SEL _cmd) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +#endif + +#if __OBJC2__ +// Debug messengers. Messengers used by the compiler have a debug flavor that +// may perform extra sanity checking. +// Old objc_msgSendSuper() does not have a debug version; this is OBJC2 only. +// *_fixup() do not have debug versions; use non-fixup only for debug mode. +OBJC_EXPORT id objc_msgSend_debug(id self, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +OBJC_EXPORT id objc_msgSendSuper2_debug(struct objc_super *super, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +OBJC_EXPORT void objc_msgSend_stret_debug(id self, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; +OBJC_EXPORT void objc_msgSendSuper2_stret_debug(struct objc_super *super, SEL op,...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; + +# if defined(__i386__) +OBJC_EXPORT double objc_msgSend_fpret_debug(id self, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +# elif defined(__x86_64__) +OBJC_EXPORT long double objc_msgSend_fpret_debug(id self, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +# if __STDC_VERSION__ >= 199901L +OBJC_EXPORT _Complex long double objc_msgSend_fp2ret_debug(id self, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +# else +OBJC_EXPORT void objc_msgSend_fp2ret_debug(id self, SEL op, ...) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); +# endif +# endif + +#endif + +#if __OBJC2__ +// Lookup messengers. +// These are not callable C functions. Do not call them directly. +// The caller should set the method parameters, call objc_msgLookup(), +// then immediately call the returned IMP. +// +// Generic ABI: +// - Callee-saved registers are preserved. +// - Receiver and selector registers may be modified. These values must +// be passed to the called IMP. Other parameter registers are preserved. +// - Caller-saved non-parameter registers are not preserved. Some of +// these registers are used to pass data from objc_msgLookup() to +// the called IMP and must not be disturbed by the caller. +// - Red zone is not preserved. +// See each architecture's implementation for details. + +OBJC_EXPORT void objc_msgLookup(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT void objc_msgLookupSuper2(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT void objc_msgLookup_stret(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0) + OBJC_ARM64_UNAVAILABLE; +OBJC_EXPORT void objc_msgLookupSuper2_stret(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0) + OBJC_ARM64_UNAVAILABLE; + +# if defined(__i386__) +OBJC_EXPORT void objc_msgLookup_fpret(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +# elif defined(__x86_64__) +OBJC_EXPORT void objc_msgLookup_fpret(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT void objc_msgLookup_fp2ret(void) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +# endif + +#endif + +#if TARGET_OS_OSX && defined(__x86_64__) +// objc_msgSend_fixup() is used for vtable-dispatchable call sites. +OBJC_EXPORT void objc_msgSend_fixup(void) + __OSX_DEPRECATED(10.5, 10.8, "fixup dispatch is no longer optimized") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_msgSend_stret_fixup(void) + __OSX_DEPRECATED(10.5, 10.8, "fixup dispatch is no longer optimized") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_msgSendSuper2_fixup(void) + __OSX_DEPRECATED(10.5, 10.8, "fixup dispatch is no longer optimized") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_msgSendSuper2_stret_fixup(void) + __OSX_DEPRECATED(10.5, 10.8, "fixup dispatch is no longer optimized") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_msgSend_fpret_fixup(void) + __OSX_DEPRECATED(10.5, 10.8, "fixup dispatch is no longer optimized") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_msgSend_fp2ret_fixup(void) + __OSX_DEPRECATED(10.5, 10.8, "fixup dispatch is no longer optimized") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +#endif + +/* C++-compatible exception handling. */ +#if __OBJC2__ + +// fixme these conflict with C++ compiler's internal definitions +#if !defined(__cplusplus) + +// Vtable for C++ exception typeinfo for Objective-C types. +OBJC_EXPORT const void *objc_ehtype_vtable[] + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +// C++ exception typeinfo for type `id`. +OBJC_EXPORT struct objc_typeinfo OBJC_EHTYPE_id + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +#endif + +// Exception personality function for Objective-C and Objective-C++ code. +struct _Unwind_Exception; +struct _Unwind_Context; +OBJC_EXPORT int +__objc_personality_v0(int version, + int actions, + uint64_t exceptionClass, + struct _Unwind_Exception *exceptionObject, + struct _Unwind_Context *context) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +#endif + +/* ARC */ + +OBJC_EXPORT id objc_retainBlock(id) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + + +/* Non-pointer isa */ + +#if __OBJC2__ + +// Extract class pointer from an isa field. + +#if TARGET_OS_SIMULATOR + // No simulators use nonpointer isa yet. + +#elif __LP64__ +# define OBJC_HAVE_NONPOINTER_ISA 1 +# define OBJC_HAVE_PACKED_NONPOINTER_ISA 1 + +// Packed-isa version. This one is used directly by Swift code. +// (Class)(isa & (uintptr_t)&objc_absolute_packed_isa_class_mask) == class ptr +OBJC_EXPORT const struct { char c; } objc_absolute_packed_isa_class_mask + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +#elif __ARM_ARCH_7K__ >= 2 +# define OBJC_HAVE_NONPOINTER_ISA 1 +# define OBJC_HAVE_INDEXED_NONPOINTER_ISA 1 + +// Indexed-isa version. +// if (isa & (uintptr_t)&objc_absolute_indexed_isa_magic_mask == (uintptr_t)&objc_absolute_indexed_isa_magic_value) { +// uintptr_t index = (isa & (uintptr_t)&objc_absolute_indexed_isa_index_mask) >> (uintptr_t)&objc_absolute_indexed_isa_index_shift; +// cls = objc_indexed_classes[index]; +// } else +// cls = (Class)isa; +// } +OBJC_EXPORT const struct { char c; } objc_absolute_indexed_isa_magic_mask + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT const struct { char c; } objc_absolute_indexed_isa_magic_value + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT const struct { char c; } objc_absolute_indexed_isa_index_mask + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT const struct { char c; } objc_absolute_indexed_isa_index_shift + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +#endif + +// OBJC2 +#endif + +// _OBJC_ABI_H +#endif diff --git a/runtime/objc-accessors.mm b/runtime/objc-accessors.mm new file mode 100644 index 0000000..25ec71b --- /dev/null +++ b/runtime/objc-accessors.mm @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2006-2008 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#include + +#include "objc-private.h" +#include "runtime.h" + +// stub interface declarations to make compiler happy. + +@interface __NSCopyable +- (id)copyWithZone:(void *)zone; +@end + +@interface __NSMutableCopyable +- (id)mutableCopyWithZone:(void *)zone; +@end + +// These locks must not be at function scope. +static StripedMap PropertyLocks; +static StripedMap StructLocks; +static StripedMap CppObjectLocks; + +#define MUTABLE_COPY 2 + +id objc_getProperty(id self, SEL _cmd, ptrdiff_t offset, BOOL atomic) { + if (offset == 0) { + return object_getClass(self); + } + + // Retain release world + id *slot = (id*) ((char*)self + offset); + if (!atomic) return *slot; + + // Atomic retain release world + spinlock_t& slotlock = PropertyLocks[slot]; + slotlock.lock(); + id value = objc_retain(*slot); + slotlock.unlock(); + + // for performance, we (safely) issue the autorelease OUTSIDE of the spinlock. + return objc_autoreleaseReturnValue(value); +} + + +static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy) __attribute__((always_inline)); + +static inline void reallySetProperty(id self, SEL _cmd, id newValue, ptrdiff_t offset, bool atomic, bool copy, bool mutableCopy) +{ + if (offset == 0) { + object_setClass(self, newValue); + return; + } + + id oldValue; + id *slot = (id*) ((char*)self + offset); + + if (copy) { + newValue = [newValue copyWithZone:nil]; + } else if (mutableCopy) { + newValue = [newValue mutableCopyWithZone:nil]; + } else { + if (*slot == newValue) return; + newValue = objc_retain(newValue); + } + + if (!atomic) { + oldValue = *slot; + *slot = newValue; + } else { + spinlock_t& slotlock = PropertyLocks[slot]; + slotlock.lock(); + oldValue = *slot; + *slot = newValue; + slotlock.unlock(); + } + + objc_release(oldValue); +} + +void objc_setProperty(id self, SEL _cmd, ptrdiff_t offset, id newValue, BOOL atomic, signed char shouldCopy) +{ + bool copy = (shouldCopy && shouldCopy != MUTABLE_COPY); + bool mutableCopy = (shouldCopy == MUTABLE_COPY); + reallySetProperty(self, _cmd, newValue, offset, atomic, copy, mutableCopy); +} + +void objc_setProperty_atomic(id self, SEL _cmd, id newValue, ptrdiff_t offset) +{ + reallySetProperty(self, _cmd, newValue, offset, true, false, false); +} + +void objc_setProperty_nonatomic(id self, SEL _cmd, id newValue, ptrdiff_t offset) +{ + reallySetProperty(self, _cmd, newValue, offset, false, false, false); +} + + +void objc_setProperty_atomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset) +{ + reallySetProperty(self, _cmd, newValue, offset, true, true, false); +} + +void objc_setProperty_nonatomic_copy(id self, SEL _cmd, id newValue, ptrdiff_t offset) +{ + reallySetProperty(self, _cmd, newValue, offset, false, true, false); +} + + +// This entry point was designed wrong. When used as a getter, src needs to be locked so that +// if simultaneously used for a setter then there would be contention on src. +// So we need two locks - one of which will be contended. +void objc_copyStruct(void *dest, const void *src, ptrdiff_t size, BOOL atomic, BOOL hasStrong __unused) { + spinlock_t *srcLock = nil; + spinlock_t *dstLock = nil; + if (atomic) { + srcLock = &StructLocks[src]; + dstLock = &StructLocks[dest]; + spinlock_t::lockTwo(srcLock, dstLock); + } + + memmove(dest, src, size); + + if (atomic) { + spinlock_t::unlockTwo(srcLock, dstLock); + } +} + +void objc_copyCppObjectAtomic(void *dest, const void *src, void (*copyHelper) (void *dest, const void *source)) { + spinlock_t *srcLock = &CppObjectLocks[src]; + spinlock_t *dstLock = &CppObjectLocks[dest]; + spinlock_t::lockTwo(srcLock, dstLock); + + // let C++ code perform the actual copy. + copyHelper(dest, src); + + spinlock_t::unlockTwo(srcLock, dstLock); +} diff --git a/runtime/objc-api.h b/runtime/objc-api.h new file mode 100644 index 0000000..42f88e7 --- /dev/null +++ b/runtime/objc-api.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 1999-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +// Copyright 1988-1996 NeXT Software, Inc. + +#ifndef _OBJC_OBJC_API_H_ +#define _OBJC_OBJC_API_H_ + +#include +#include +#include + +#ifndef __has_feature +# define __has_feature(x) 0 +#endif + +#ifndef __has_extension +# define __has_extension __has_feature +#endif + +#ifndef __has_attribute +# define __has_attribute(x) 0 +#endif + + +/* + * OBJC_API_VERSION 0 or undef: Tiger and earlier API only + * OBJC_API_VERSION 2: Leopard and later API available + */ +#if !defined(OBJC_API_VERSION) +# if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED < __MAC_10_5 +# define OBJC_API_VERSION 0 +# else +# define OBJC_API_VERSION 2 +# endif +#endif + + +/* + * OBJC_NO_GC 1: GC is not supported + * OBJC_NO_GC undef: GC is supported. This SDK no longer supports this mode. + * + * OBJC_NO_GC_API undef: Libraries must export any symbols that + * dual-mode code may links to. + * OBJC_NO_GC_API 1: Libraries need not export GC-related symbols. + */ +#if defined(__OBJC_GC__) +# error Objective-C garbage collection is not supported. +#elif TARGET_OS_OSX + /* GC is unsupported. GC API symbols are exported. */ +# define OBJC_NO_GC 1 +# undef OBJC_NO_GC_API +#else + /* GC is unsupported. GC API symbols are not exported. */ +# define OBJC_NO_GC 1 +# define OBJC_NO_GC_API 1 +#endif + + +/* NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER == 1 + * marks -[NSObject init] as a designated initializer. */ +#if !defined(NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER) +# define NS_ENFORCE_NSOBJECT_DESIGNATED_INITIALIZER 1 +#endif + + +/* OBJC_OLD_DISPATCH_PROTOTYPES == 0 enforces the rule that the dispatch + * functions must be cast to an appropriate function pointer type. */ +#if !defined(OBJC_OLD_DISPATCH_PROTOTYPES) +# define OBJC_OLD_DISPATCH_PROTOTYPES 1 +#endif + + +/* OBJC_AVAILABLE: shorthand for all-OS availability */ +#if !defined(OBJC_AVAILABLE) +# define OBJC_AVAILABLE(x, i, t, w) \ + __OSX_AVAILABLE(x) __IOS_AVAILABLE(i) \ + __TVOS_AVAILABLE(t) __WATCHOS_AVAILABLE(w) +#endif + + +/* OBJC_ISA_AVAILABILITY: `isa` will be deprecated or unavailable + * in the future */ +#if !defined(OBJC_ISA_AVAILABILITY) +# if __OBJC2__ +# define OBJC_ISA_AVAILABILITY __attribute__((deprecated)) +# else +# define OBJC_ISA_AVAILABILITY /* still available */ +# endif +#endif + + +/* OBJC2_UNAVAILABLE: unavailable in objc 2.0, deprecated in Leopard */ +#if !defined(OBJC2_UNAVAILABLE) +# if __OBJC2__ +# define OBJC2_UNAVAILABLE UNAVAILABLE_ATTRIBUTE +# else + /* plain C code also falls here, but this is close enough */ +# define OBJC2_UNAVAILABLE \ + __OSX_DEPRECATED(10.5, 10.5, "not available in __OBJC2__") \ + __IOS_DEPRECATED(2.0, 2.0, "not available in __OBJC2__") \ + __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE +# endif +#endif + +/* OBJC_UNAVAILABLE: unavailable, with a message where supported */ +#if !defined(OBJC_UNAVAILABLE) +# if __has_extension(attribute_unavailable_with_message) +# define OBJC_UNAVAILABLE(_msg) __attribute__((unavailable(_msg))) +# else +# define OBJC_UNAVAILABLE(_msg) __attribute__((unavailable)) +# endif +#endif + +/* OBJC_DEPRECATED: deprecated, with a message where supported */ +#if !defined(OBJC_DEPRECATED) +# if __has_extension(attribute_deprecated_with_message) +# define OBJC_DEPRECATED(_msg) __attribute__((deprecated(_msg))) +# else +# define OBJC_DEPRECATED(_msg) __attribute__((deprecated)) +# endif +#endif + +/* OBJC_ARC_UNAVAILABLE: unavailable with -fobjc-arc */ +#if !defined(OBJC_ARC_UNAVAILABLE) +# if __has_feature(objc_arc) +# define OBJC_ARC_UNAVAILABLE OBJC_UNAVAILABLE("not available in automatic reference counting mode") +# else +# define OBJC_ARC_UNAVAILABLE +# endif +#endif + +/* OBJC_SWIFT_UNAVAILABLE: unavailable in Swift */ +#if !defined(OBJC_SWIFT_UNAVAILABLE) +# if __has_feature(attribute_availability_swift) +# define OBJC_SWIFT_UNAVAILABLE(_msg) __attribute__((availability(swift, unavailable, message=_msg))) +# else +# define OBJC_SWIFT_UNAVAILABLE(_msg) +# endif +#endif + +/* OBJC_ARM64_UNAVAILABLE: unavailable on arm64 (i.e. stret dispatch) */ +#if !defined(OBJC_ARM64_UNAVAILABLE) +# if defined(__arm64__) +# define OBJC_ARM64_UNAVAILABLE OBJC_UNAVAILABLE("not available in arm64") +# else +# define OBJC_ARM64_UNAVAILABLE +# endif +#endif + +/* OBJC_GC_UNAVAILABLE: unavailable with -fobjc-gc or -fobjc-gc-only */ +#if !defined(OBJC_GC_UNAVAILABLE) +# define OBJC_GC_UNAVAILABLE +#endif + +#if !defined(OBJC_EXTERN) +# if defined(__cplusplus) +# define OBJC_EXTERN extern "C" +# else +# define OBJC_EXTERN extern +# endif +#endif + +#if !defined(OBJC_VISIBLE) +# if TARGET_OS_WIN32 +# if defined(BUILDING_OBJC) +# define OBJC_VISIBLE __declspec(dllexport) +# else +# define OBJC_VISIBLE __declspec(dllimport) +# endif +# else +# define OBJC_VISIBLE __attribute__((visibility("default"))) +# endif +#endif + +#if !defined(OBJC_EXPORT) +# define OBJC_EXPORT OBJC_EXTERN OBJC_VISIBLE +#endif + +#if !defined(OBJC_IMPORT) +# define OBJC_IMPORT extern +#endif + +#if !defined(OBJC_ROOT_CLASS) +# if __has_attribute(objc_root_class) +# define OBJC_ROOT_CLASS __attribute__((objc_root_class)) +# else +# define OBJC_ROOT_CLASS +# endif +#endif + +#ifndef __DARWIN_NULL +#define __DARWIN_NULL NULL +#endif + +#if !defined(OBJC_INLINE) +# define OBJC_INLINE __inline +#endif + +// Declares an enum type or option bits type as appropriate for each language. +#if (__cplusplus && __cplusplus >= 201103L && (__has_extension(cxx_strong_enums) || __has_feature(objc_fixed_enum))) || (!__cplusplus && __has_feature(objc_fixed_enum)) +#define OBJC_ENUM(_type, _name) enum _name : _type _name; enum _name : _type +#if (__cplusplus) +#define OBJC_OPTIONS(_type, _name) _type _name; enum : _type +#else +#define OBJC_OPTIONS(_type, _name) enum _name : _type _name; enum _name : _type +#endif +#else +#define OBJC_ENUM(_type, _name) _type _name; enum +#define OBJC_OPTIONS(_type, _name) _type _name; enum +#endif + +#endif diff --git a/runtime/objc-auto.h b/runtime/objc-auto.h new file mode 100644 index 0000000..3624af5 --- /dev/null +++ b/runtime/objc-auto.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2004-2007 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_AUTO_H_ +#define _OBJC_AUTO_H_ + +#pragma GCC system_header + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +// Define OBJC_SILENCE_GC_DEPRECATIONS=1 to temporarily +// silence deprecation warnings for GC functions. + +#if OBJC_SILENCE_GC_DEPRECATIONS +# define OBJC_GC_DEPRECATED(message) +#elif __has_extension(attribute_deprecated_with_message) +# define OBJC_GC_DEPRECATED(message) __attribute__((deprecated(message ". Define OBJC_SILENCE_GC_DEPRECATIONS=1 to temporarily silence this diagnostic."))) +#else +# define OBJC_GC_DEPRECATED(message) __attribute__((deprecated)) +#endif + + +enum { + OBJC_RATIO_COLLECTION = (0 << 0), + OBJC_GENERATIONAL_COLLECTION = (1 << 0), + OBJC_FULL_COLLECTION = (2 << 0), + OBJC_EXHAUSTIVE_COLLECTION = (3 << 0), + + OBJC_COLLECT_IF_NEEDED = (1 << 3), + OBJC_WAIT_UNTIL_DONE = (1 << 4), +}; + +enum { + OBJC_CLEAR_RESIDENT_STACK = (1 << 0) +}; + + +#ifndef OBJC_NO_GC + + +/* Out-of-line declarations */ + +OBJC_EXPORT void objc_collect(unsigned long options) + __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT BOOL objc_collectingEnabled(void) + __OSX_DEPRECATED(10.5, 10.8, "it always returns NO") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT malloc_zone_t *objc_collectableZone(void) + __OSX_DEPRECATED(10.7, 10.8, "it always returns nil") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold) + __OSX_DEPRECATED(10.5, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_setCollectionRatio(size_t ratio) + __OSX_DEPRECATED(10.5, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) + __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) + __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) + __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) + __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) + __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtr instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) + __OSX_DEPRECATED(10.6, 10.8, "use OSAtomicCompareAndSwapPtrBarrier instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT id objc_assign_strongCast(id val, id *dest) + __OSX_DEPRECATED(10.4, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_assign_global(id val, id *dest) + __OSX_DEPRECATED(10.4, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_assign_threadlocal(id val, id *dest) + __OSX_DEPRECATED(10.7, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_assign_ivar(id value, id dest, ptrdiff_t offset) + __OSX_DEPRECATED(10.4, 10.8, "use a simple assignment instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void *objc_memmove_collectable(void *dst, const void *src, size_t size) + __OSX_DEPRECATED(10.4, 10.8, "use memmove instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_read_weak(id *location) + __OSX_DEPRECATED(10.5, 10.8, "use a simple read instead, or convert to zeroing __weak") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_assign_weak(id value, id *location) + __OSX_DEPRECATED(10.5, 10.8, "use a simple assignment instead, or convert to zeroing __weak") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_registerThreadWithCollector(void) + __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_unregisterThreadWithCollector(void) + __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_assertRegisteredThreadWithCollector(void) + __OSX_DEPRECATED(10.6, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_clear_stack(unsigned long options) + __OSX_DEPRECATED(10.5, 10.8, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT BOOL objc_is_finalized(void *ptr) + __OSX_DEPRECATED(10.4, 10.8, "it always returns NO") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_finalizeOnMainThread(Class cls) + __OSX_DEPRECATED(10.5, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT BOOL objc_collecting_enabled(void) + __OSX_DEPRECATED(10.4, 10.5, "it always returns NO") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_set_collection_threshold(size_t threshold) + __OSX_DEPRECATED(10.4, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_set_collection_ratio(size_t ratio) + __OSX_DEPRECATED(10.4, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_start_collector_thread(void) + __OSX_DEPRECATED(10.4, 10.5, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_startCollectorThread(void) + __OSX_DEPRECATED(10.5, 10.7, "it does nothing") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_allocate_object(Class cls, int extra) + __OSX_DEPRECATED(10.4, 10.4, "use class_createInstance instead") __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + + +/* !defined(OBJC_NO_GC) */ +#else +/* defined(OBJC_NO_GC) */ + + +/* Inline declarations */ + +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_collect(unsigned long options __unused) { } +OBJC_GC_DEPRECATED("it always returns NO") +static OBJC_INLINE BOOL objc_collectingEnabled(void) { return NO; } +#if TARGET_OS_OSX +OBJC_GC_DEPRECATED("it always returns nil") +static OBJC_INLINE malloc_zone_t *objc_collectableZone(void) { return nil; } +#endif +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_setCollectionThreshold(size_t threshold __unused) { } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_setCollectionRatio(size_t ratio __unused) { } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_startCollectorThread(void) { } + +#if __has_feature(objc_arc) + +/* Covers for GC memory operations are unavailable in ARC */ + +#else + +OBJC_GC_DEPRECATED("use OSAtomicCompareAndSwapPtr instead") +static OBJC_INLINE BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) + { return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); } + +OBJC_GC_DEPRECATED("use OSAtomicCompareAndSwapPtrBarrier instead") +static OBJC_INLINE BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) + { return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); } + +OBJC_GC_DEPRECATED("use OSAtomicCompareAndSwapPtr instead") +static OBJC_INLINE BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation); } + +OBJC_GC_DEPRECATED("use OSAtomicCompareAndSwapPtrBarrier instead") +static OBJC_INLINE BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtrBarrier(predicate, replacement, objectLocation); } + +OBJC_GC_DEPRECATED("use OSAtomicCompareAndSwapPtr instead") +static OBJC_INLINE BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation); } + +OBJC_GC_DEPRECATED("use OSAtomicCompareAndSwapPtrBarrier instead") +static OBJC_INLINE BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtrBarrier(predicate, replacement, objectLocation); } + + +OBJC_GC_DEPRECATED("use a simple assignment instead") +static OBJC_INLINE id objc_assign_strongCast(id val, id *dest) + { return (*dest = val); } + +OBJC_GC_DEPRECATED("use a simple assignment instead") +static OBJC_INLINE id objc_assign_global(id val, id *dest) + { return (*dest = val); } + +OBJC_GC_DEPRECATED("use a simple assignment instead") +static OBJC_INLINE id objc_assign_threadlocal(id val, id *dest) + { return (*dest = val); } + +OBJC_GC_DEPRECATED("use a simple assignment instead") +static OBJC_INLINE id objc_assign_ivar(id val, id dest, ptrdiff_t offset) + { return (*(id*)((char *)dest+offset) = val); } + +OBJC_GC_DEPRECATED("use a simple read instead, or convert to zeroing __weak") +static OBJC_INLINE id objc_read_weak(id *location) + { return *location; } + +OBJC_GC_DEPRECATED("use a simple assignment instead, or convert to zeroing __weak") +static OBJC_INLINE id objc_assign_weak(id value, id *location) + { return (*location = value); } + +/* MRC */ +#endif + +OBJC_GC_DEPRECATED("use memmove instead") +static OBJC_INLINE void *objc_memmove_collectable(void *dst, const void *src, size_t size) + { return memmove(dst, src, size); } + +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_finalizeOnMainThread(Class cls __unused) { } +OBJC_GC_DEPRECATED("it always returns NO") +static OBJC_INLINE BOOL objc_is_finalized(void *ptr __unused) { return NO; } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_clear_stack(unsigned long options __unused) { } +OBJC_GC_DEPRECATED("it always returns NO") +static OBJC_INLINE BOOL objc_collecting_enabled(void) { return NO; } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_set_collection_threshold(size_t threshold __unused) { } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_set_collection_ratio(size_t ratio __unused) { } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_start_collector_thread(void) { } + +#if __has_feature(objc_arc) +extern id objc_allocate_object(Class cls, int extra) UNAVAILABLE_ATTRIBUTE; +#else +OBJC_EXPORT id class_createInstance(Class cls, size_t extraBytes) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); +OBJC_GC_DEPRECATED("use class_createInstance instead") +static OBJC_INLINE id objc_allocate_object(Class cls, int extra) + { return class_createInstance(cls, extra); } +#endif + +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_registerThreadWithCollector() { } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_unregisterThreadWithCollector() { } +OBJC_GC_DEPRECATED("it does nothing") +static OBJC_INLINE void objc_assertRegisteredThreadWithCollector() { } + +/* defined(OBJC_NO_GC) */ +#endif + + +#endif diff --git a/runtime/objc-auto.mm b/runtime/objc-auto.mm new file mode 100644 index 0000000..bf7dceb --- /dev/null +++ b/runtime/objc-auto.mm @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2004-2007 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "objc-private.h" + +// GC is no longer supported. + +#if OBJC_NO_GC_API + +// No GC and no GC symbols needed. We're done here. +# if SUPPORT_GC_COMPAT +# error inconsistent config settings +# endif + +#else + +// No GC but we do need to export GC symbols. +// These are mostly the same as the OBJC_NO_GC inline versions in objc-auto.h. + +# if !SUPPORT_GC_COMPAT +# error inconsistent config settings +# endif + +OBJC_EXPORT void objc_collect(unsigned long options __unused) { } +OBJC_EXPORT BOOL objc_collectingEnabled(void) { return NO; } +OBJC_EXPORT void objc_setCollectionThreshold(size_t threshold __unused) { } +OBJC_EXPORT void objc_setCollectionRatio(size_t ratio __unused) { } +OBJC_EXPORT void objc_startCollectorThread(void) { } + +#if TARGET_OS_WIN32 +OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) + { void *original = InterlockedCompareExchangePointer((void * volatile *)objectLocation, (void *)replacement, (void *)predicate); return (original == predicate); } + +OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) + { void *original = InterlockedCompareExchangePointer((void * volatile *)objectLocation, (void *)replacement, (void *)predicate); return (original == predicate); } +#else +OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation) + { return OSAtomicCompareAndSwapPtr((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); } + +OBJC_EXPORT BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation) + { return OSAtomicCompareAndSwapPtrBarrier((void *)predicate, (void *)replacement, (void * volatile *)objectLocation); } +#endif + +OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation); } + +OBJC_EXPORT BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtrBarrier(predicate, replacement, objectLocation); } + +OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation); } + +OBJC_EXPORT BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation) + { return objc_atomicCompareAndSwapPtrBarrier(predicate, replacement, objectLocation); } + +OBJC_EXPORT id objc_assign_strongCast(id val, id *dest) + { return (*dest = val); } + +OBJC_EXPORT id objc_assign_global(id val, id *dest) + { return (*dest = val); } + +OBJC_EXPORT id objc_assign_threadlocal(id val, id *dest) + { return (*dest = val); } + +OBJC_EXPORT id objc_assign_ivar(id val, id dest, ptrdiff_t offset) + { return (*(id*)((char *)dest+offset) = val); } + +OBJC_EXPORT id objc_read_weak(id *location) + { return *location; } + +OBJC_EXPORT id objc_assign_weak(id value, id *location) + { return (*location = value); } + +OBJC_EXPORT void *objc_memmove_collectable(void *dst, const void *src, size_t size) + { return memmove(dst, src, size); } + +OBJC_EXPORT void objc_finalizeOnMainThread(Class cls __unused) { } +OBJC_EXPORT BOOL objc_is_finalized(void *ptr __unused) { return NO; } +OBJC_EXPORT void objc_clear_stack(unsigned long options __unused) { } + +OBJC_EXPORT BOOL objc_collecting_enabled(void) { return NO; } +OBJC_EXPORT void objc_set_collection_threshold(size_t threshold __unused) { } +OBJC_EXPORT void objc_set_collection_ratio(size_t ratio __unused) { } +OBJC_EXPORT void objc_start_collector_thread(void) { } + +OBJC_EXPORT id objc_allocate_object(Class cls, int extra) + { return class_createInstance(cls, extra); } + +OBJC_EXPORT void objc_registerThreadWithCollector() { } +OBJC_EXPORT void objc_unregisterThreadWithCollector() { } +OBJC_EXPORT void objc_assertRegisteredThreadWithCollector() { } + +OBJC_EXPORT malloc_zone_t* objc_collect_init(int(*callback)() __unused) { return nil; } +OBJC_EXPORT void* objc_collectableZone() { return nil; } + +OBJC_EXPORT BOOL objc_isAuto(id object __unused) { return NO; } +OBJC_EXPORT BOOL objc_dumpHeap(char *filename __unused, unsigned long length __unused) + { return NO; } + +// not OBJC_NO_GC_API +#endif diff --git a/runtime/objc-block-trampolines.mm b/runtime/objc-block-trampolines.mm new file mode 100644 index 0000000..ccde02a --- /dev/null +++ b/runtime/objc-block-trampolines.mm @@ -0,0 +1,477 @@ +/* + * Copyright (c) 2010 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*********************************************************************** + * objc-block-trampolines.m + * Author: b.bum + * + **********************************************************************/ + +/*********************************************************************** + * Imports. + **********************************************************************/ +#include "objc-private.h" +#include "runtime.h" + +#include +#include +#include + +// symbols defined in assembly files +// Don't use the symbols directly; they're thumb-biased on some ARM archs. +#define TRAMP(tramp) \ + static inline __unused uintptr_t tramp(void) { \ + extern void *_##tramp; \ + return ((uintptr_t)&_##tramp) & ~1UL; \ + } +// Scalar return +TRAMP(a1a2_tramphead); // trampoline header code +TRAMP(a1a2_firsttramp); // first trampoline +TRAMP(a1a2_trampend); // after the last trampoline + +#if SUPPORT_STRET +// Struct return +TRAMP(a2a3_tramphead); +TRAMP(a2a3_firsttramp); +TRAMP(a2a3_trampend); +#endif + +// argument mode identifier +typedef enum { + ReturnValueInRegisterArgumentMode, +#if SUPPORT_STRET + ReturnValueOnStackArgumentMode, +#endif + + ArgumentModeCount +} ArgumentMode; + + +// We must take care with our data layout on architectures that support +// multiple page sizes. +// +// The trampoline template in __TEXT is sized and aligned with PAGE_MAX_SIZE. +// On some platforms this requires additional linker flags. +// +// When we allocate a page pair, we use PAGE_MAX_SIZE size. +// This allows trampoline code to find its data by subtracting PAGE_MAX_SIZE. +// +// When we allocate a page pair, we use the process's page alignment. +// This simplifies allocation because we don't need to force greater than +// default alignment when running with small pages, but it also means +// the trampoline code MUST NOT look for its data by masking with PAGE_MAX_MASK. + +struct TrampolineBlockPagePair +{ + TrampolineBlockPagePair *nextPagePair; // linked list of all pages + TrampolineBlockPagePair *nextAvailablePage; // linked list of pages with available slots + + uintptr_t nextAvailable; // index of next available slot, endIndex() if no more available + + // Payload data: block pointers and free list. + // Bytes parallel with trampoline header code are the fields above or unused + // uint8_t blocks[ PAGE_MAX_SIZE - sizeof(TrampolineBlockPagePair) ] + + // Code: trampoline header followed by trampolines. + // uint8_t trampolines[PAGE_MAX_SIZE]; + + // Per-trampoline block data format: + // initial value is 0 while page data is filled sequentially + // when filled, value is reference to Block_copy()d block + // when empty, value is index of next available slot OR 0 if never used yet + + union Payload { + id block; + uintptr_t nextAvailable; // free list + }; + + static uintptr_t headerSize() { + return (uintptr_t) (a1a2_firsttramp() - a1a2_tramphead()); + } + + static uintptr_t slotSize() { + return 8; + } + + static uintptr_t startIndex() { + // headerSize is assumed to be slot-aligned + return headerSize() / slotSize(); + } + + static uintptr_t endIndex() { + return (uintptr_t)PAGE_MAX_SIZE / slotSize(); + } + + static bool validIndex(uintptr_t index) { + return (index >= startIndex() && index < endIndex()); + } + + Payload *payload(uintptr_t index) { + assert(validIndex(index)); + return (Payload *)((char *)this + index*slotSize()); + } + + IMP trampoline(uintptr_t index) { + assert(validIndex(index)); + char *imp = (char *)this + index*slotSize() + PAGE_MAX_SIZE; +#if __arm__ + imp++; // trampoline is Thumb instructions +#endif + return (IMP)imp; + } + + uintptr_t indexForTrampoline(IMP tramp) { + uintptr_t tramp0 = (uintptr_t)this + PAGE_MAX_SIZE; + uintptr_t start = tramp0 + headerSize(); + uintptr_t end = tramp0 + PAGE_MAX_SIZE; + uintptr_t address = (uintptr_t)tramp; + if (address >= start && address < end) { + return (uintptr_t)(address - tramp0) / slotSize(); + } + return 0; + } + + static void check() { + assert(TrampolineBlockPagePair::slotSize() == 8); + assert(TrampolineBlockPagePair::headerSize() >= sizeof(TrampolineBlockPagePair)); + assert(TrampolineBlockPagePair::headerSize() % TrampolineBlockPagePair::slotSize() == 0); + + // _objc_inform("%p %p %p", a1a2_tramphead(), a1a2_firsttramp(), + // a1a2_trampend()); + assert(a1a2_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE + assert(a1a2_tramphead() + PAGE_MAX_SIZE == a1a2_trampend()); +#if SUPPORT_STRET + // _objc_inform("%p %p %p", a2a3_tramphead(), a2a3_firsttramp(), + // a2a3_trampend()); + assert(a2a3_tramphead() % PAGE_SIZE == 0); // not PAGE_MAX_SIZE + assert(a2a3_tramphead() + PAGE_MAX_SIZE == a2a3_trampend()); +#endif + +#if __arm__ + // make sure trampolines are Thumb + extern void *_a1a2_firsttramp; + extern void *_a2a3_firsttramp; + assert(((uintptr_t)&_a1a2_firsttramp) % 2 == 1); + assert(((uintptr_t)&_a2a3_firsttramp) % 2 == 1); +#endif + } + +}; + +// two sets of trampoline pages; one for stack returns and one for register returns +static TrampolineBlockPagePair *headPagePairs[ArgumentModeCount]; + +#pragma mark Utility Functions + +static inline void _lock() { +#if __OBJC2__ + runtimeLock.write(); +#else + classLock.lock(); +#endif +} + +static inline void _unlock() { +#if __OBJC2__ + runtimeLock.unlockWrite(); +#else + classLock.unlock(); +#endif +} + +static inline void _assert_locked() { +#if __OBJC2__ + runtimeLock.assertWriting(); +#else + classLock.assertLocked(); +#endif +} + +#pragma mark Trampoline Management Functions +static TrampolineBlockPagePair *_allocateTrampolinesAndData(ArgumentMode aMode) +{ + _assert_locked(); + + vm_address_t dataAddress; + + TrampolineBlockPagePair::check(); + + TrampolineBlockPagePair *headPagePair = headPagePairs[aMode]; + + if (headPagePair) { + assert(headPagePair->nextAvailablePage == nil); + } + + kern_return_t result; + for (int i = 0; i < 5; i++) { + result = vm_allocate(mach_task_self(), &dataAddress, + PAGE_MAX_SIZE * 2, + TRUE | VM_MAKE_TAG(VM_MEMORY_FOUNDATION)); + if (result != KERN_SUCCESS) { + mach_error("vm_allocate failed", result); + return nil; + } + + vm_address_t codeAddress = dataAddress + PAGE_MAX_SIZE; + result = vm_deallocate(mach_task_self(), codeAddress, PAGE_MAX_SIZE); + if (result != KERN_SUCCESS) { + mach_error("vm_deallocate failed", result); + return nil; + } + + uintptr_t codePage; + switch(aMode) { + case ReturnValueInRegisterArgumentMode: + codePage = a1a2_tramphead(); + break; +#if SUPPORT_STRET + case ReturnValueOnStackArgumentMode: + codePage = a2a3_tramphead(); + break; +#endif + default: + _objc_fatal("unknown return mode %d", (int)aMode); + break; + } + vm_prot_t currentProtection, maxProtection; + result = vm_remap(mach_task_self(), &codeAddress, PAGE_MAX_SIZE, + 0, FALSE, mach_task_self(), codePage, TRUE, + ¤tProtection, &maxProtection, VM_INHERIT_SHARE); + if (result != KERN_SUCCESS) { + result = vm_deallocate(mach_task_self(), + dataAddress, PAGE_MAX_SIZE); + if (result != KERN_SUCCESS) { + mach_error("vm_deallocate for retry failed.", result); + return nil; + } + } else { + break; + } + } + + if (result != KERN_SUCCESS) { + return nil; + } + + TrampolineBlockPagePair *pagePair = (TrampolineBlockPagePair *) dataAddress; + pagePair->nextAvailable = pagePair->startIndex(); + pagePair->nextPagePair = nil; + pagePair->nextAvailablePage = nil; + + if (headPagePair) { + TrampolineBlockPagePair *lastPagePair = headPagePair; + while(lastPagePair->nextPagePair) + lastPagePair = lastPagePair->nextPagePair; + + lastPagePair->nextPagePair = pagePair; + headPagePairs[aMode]->nextAvailablePage = pagePair; + } else { + headPagePairs[aMode] = pagePair; + } + + return pagePair; +} + +static TrampolineBlockPagePair * +_getOrAllocatePagePairWithNextAvailable(ArgumentMode aMode) +{ + _assert_locked(); + + TrampolineBlockPagePair *headPagePair = headPagePairs[aMode]; + + if (!headPagePair) + return _allocateTrampolinesAndData(aMode); + + // make sure head page is filled first + if (headPagePair->nextAvailable != headPagePair->endIndex()) + return headPagePair; + + if (headPagePair->nextAvailablePage) // check if there is a page w/a hole + return headPagePair->nextAvailablePage; + + return _allocateTrampolinesAndData(aMode); // tack on a new one +} + +static TrampolineBlockPagePair * +_pageAndIndexContainingIMP(IMP anImp, uintptr_t *outIndex, + TrampolineBlockPagePair **outHeadPagePair) +{ + _assert_locked(); + + for (int arg = 0; arg < ArgumentModeCount; arg++) { + for (TrampolineBlockPagePair *pagePair = headPagePairs[arg]; + pagePair; + pagePair = pagePair->nextPagePair) + { + uintptr_t index = pagePair->indexForTrampoline(anImp); + if (index) { + if (outIndex) *outIndex = index; + if (outHeadPagePair) *outHeadPagePair = headPagePairs[arg]; + return pagePair; + } + } + } + + return nil; +} + + +static ArgumentMode +_argumentModeForBlock(id block) +{ + ArgumentMode aMode = ReturnValueInRegisterArgumentMode; + +#if SUPPORT_STRET + if (_Block_has_signature(block) && _Block_use_stret(block)) + aMode = ReturnValueOnStackArgumentMode; +#else + assert(! (_Block_has_signature(block) && _Block_use_stret(block))); +#endif + + return aMode; +} + + +// `block` must already have been copied +IMP +_imp_implementationWithBlockNoCopy(id block) +{ + _assert_locked(); + + ArgumentMode aMode = _argumentModeForBlock(block); + + TrampolineBlockPagePair *pagePair = + _getOrAllocatePagePairWithNextAvailable(aMode); + if (!headPagePairs[aMode]) + headPagePairs[aMode] = pagePair; + + uintptr_t index = pagePair->nextAvailable; + assert(index >= pagePair->startIndex() && index < pagePair->endIndex()); + TrampolineBlockPagePair::Payload *payload = pagePair->payload(index); + + uintptr_t nextAvailableIndex = payload->nextAvailable; + if (nextAvailableIndex == 0) { + // First time through (unused slots are zero). Fill sequentially. + // If the page is now full this will now be endIndex(), handled below. + nextAvailableIndex = index + 1; + } + pagePair->nextAvailable = nextAvailableIndex; + if (nextAvailableIndex == pagePair->endIndex()) { + // PagePair is now full (free list or wilderness exhausted) + // Remove from available page linked list + TrampolineBlockPagePair *iterator = headPagePairs[aMode]; + while(iterator && (iterator->nextAvailablePage != pagePair)) { + iterator = iterator->nextAvailablePage; + } + if (iterator) { + iterator->nextAvailablePage = pagePair->nextAvailablePage; + pagePair->nextAvailablePage = nil; + } + } + + payload->block = block; + return pagePair->trampoline(index); +} + + +#pragma mark Public API +IMP imp_implementationWithBlock(id block) +{ + block = Block_copy(block); + _lock(); + IMP returnIMP = _imp_implementationWithBlockNoCopy(block); + _unlock(); + return returnIMP; +} + + +id imp_getBlock(IMP anImp) { + uintptr_t index; + TrampolineBlockPagePair *pagePair; + + if (!anImp) return nil; + + _lock(); + + pagePair = _pageAndIndexContainingIMP(anImp, &index, nil); + + if (!pagePair) { + _unlock(); + return nil; + } + + TrampolineBlockPagePair::Payload *payload = pagePair->payload(index); + + if (payload->nextAvailable <= TrampolineBlockPagePair::endIndex()) { + // unallocated + _unlock(); + return nil; + } + + _unlock(); + + return payload->block; +} + +BOOL imp_removeBlock(IMP anImp) { + TrampolineBlockPagePair *pagePair; + TrampolineBlockPagePair *headPagePair; + uintptr_t index; + + if (!anImp) return NO; + + _lock(); + pagePair = _pageAndIndexContainingIMP(anImp, &index, &headPagePair); + + if (!pagePair) { + _unlock(); + return NO; + } + + TrampolineBlockPagePair::Payload *payload = pagePair->payload(index); + id block = payload->block; + // block is released below + + payload->nextAvailable = pagePair->nextAvailable; + pagePair->nextAvailable = index; + + // make sure this page is on available linked list + TrampolineBlockPagePair *pagePairIterator = headPagePair; + + // see if page is the next available page for any existing pages + while (pagePairIterator->nextAvailablePage && + pagePairIterator->nextAvailablePage != pagePair) + { + pagePairIterator = pagePairIterator->nextAvailablePage; + } + + if (! pagePairIterator->nextAvailablePage) { + // if iteration stopped because nextAvail was nil + // add to end of list. + pagePairIterator->nextAvailablePage = pagePair; + pagePair->nextAvailablePage = nil; + } + + _unlock(); + Block_release(block); + return YES; +} diff --git a/runtime/objc-cache-old.h b/runtime/objc-cache-old.h new file mode 100644 index 0000000..7f0245e --- /dev/null +++ b/runtime/objc-cache-old.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2012 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_CACHE_OLD_H +#define _OBJC_CACHE_OLD_H + +#include "objc-private.h" + +__BEGIN_DECLS + +extern IMP _cache_getImp(Class cls, SEL sel); +extern Method _cache_getMethod(Class cls, SEL sel, IMP objc_msgForward_internal_imp); + +extern void flush_cache(Class cls); +extern bool _cache_fill(Class cls, Method meth, SEL sel); +extern void _cache_addForwardEntry(Class cls, SEL sel); +extern IMP _cache_addIgnoredEntry(Class cls, SEL sel); +extern void _cache_free(Cache cache); +extern void _cache_collect(bool collectALot); + +__END_DECLS + +#endif diff --git a/runtime/objc-cache-old.mm b/runtime/objc-cache-old.mm new file mode 100644 index 0000000..12baf36 --- /dev/null +++ b/runtime/objc-cache-old.mm @@ -0,0 +1,1793 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-cache.m +* Method cache management +* Cache flushing +* Cache garbage collection +* Cache instrumentation +* Dedicated allocator for large caches +**********************************************************************/ + + +/*********************************************************************** + * Method cache locking (GrP 2001-1-14) + * + * For speed, objc_msgSend does not acquire any locks when it reads + * method caches. Instead, all cache changes are performed so that any + * objc_msgSend running concurrently with the cache mutator will not + * crash or hang or get an incorrect result from the cache. + * + * When cache memory becomes unused (e.g. the old cache after cache + * expansion), it is not immediately freed, because a concurrent + * objc_msgSend could still be using it. Instead, the memory is + * disconnected from the data structures and placed on a garbage list. + * The memory is now only accessible to instances of objc_msgSend that + * were running when the memory was disconnected; any further calls to + * objc_msgSend will not see the garbage memory because the other data + * structures don't point to it anymore. The collecting_in_critical + * function checks the PC of all threads and returns FALSE when all threads + * are found to be outside objc_msgSend. This means any call to objc_msgSend + * that could have had access to the garbage has finished or moved past the + * cache lookup stage, so it is safe to free the memory. + * + * All functions that modify cache data or structures must acquire the + * cacheUpdateLock to prevent interference from concurrent modifications. + * The function that frees cache garbage must acquire the cacheUpdateLock + * and use collecting_in_critical() to flush out cache readers. + * The cacheUpdateLock is also used to protect the custom allocator used + * for large method cache blocks. + * + * Cache readers (PC-checked by collecting_in_critical()) + * objc_msgSend* + * _cache_getImp + * _cache_getMethod + * + * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked) + * _cache_fill (acquires lock) + * _cache_expand (only called from cache_fill) + * _cache_create (only called from cache_expand) + * bcopy (only called from instrumented cache_expand) + * flush_caches (acquires lock) + * _cache_flush (only called from cache_fill and flush_caches) + * _cache_collect_free (only called from cache_expand and cache_flush) + * + * UNPROTECTED cache readers (NOT thread-safe; used for debug info only) + * _cache_print + * _class_printMethodCaches + * _class_printDuplicateCacheEntries + * _class_printMethodCacheStatistics + * + * _class_lookupMethodAndLoadCache is a special case. It may read a + * method triplet out of one cache and store it in another cache. This + * is unsafe if the method triplet is a forward:: entry, because the + * triplet itself could be freed unless _class_lookupMethodAndLoadCache + * were PC-checked or used a lock. Additionally, storing the method + * triplet in both caches would result in double-freeing if both caches + * were flushed or expanded. The solution is for _cache_getMethod to + * ignore all entries whose implementation is _objc_msgForward_impcache, + * so _class_lookupMethodAndLoadCache cannot look at a forward:: entry + * unsafely or place it in multiple caches. + ***********************************************************************/ + +#if !__OBJC2__ + +#include "objc-private.h" +#include "objc-cache-old.h" +#include "hashtable2.h" + +typedef struct { + SEL name; // same layout as struct old_method + void *unused; + IMP imp; // same layout as struct old_method +} cache_entry; + + +/* When _class_slow_grow is non-zero, any given cache is actually grown + * only on the odd-numbered times it becomes full; on the even-numbered + * times, it is simply emptied and re-used. When this flag is zero, + * caches are grown every time. */ +static const int _class_slow_grow = 1; + +/* For min cache size: clear_cache=1, slow_grow=1 + For max cache size: clear_cache=0, slow_grow=0 */ + +/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */ +enum { + INIT_CACHE_SIZE_LOG2 = 2, + INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2) +}; + + +/* Amount of space required for `count` hash table buckets, knowing that + * one entry is embedded in the cache structure itself. */ +#define TABLE_SIZE(count) ((count - 1) * sizeof(cache_entry *)) + + +#if !TARGET_OS_WIN32 +# define CACHE_ALLOCATOR +#endif + +/* Custom cache allocator parameters. + * CACHE_REGION_SIZE must be a multiple of CACHE_QUANTUM. */ +#define CACHE_ALLOCATOR_MIN 512 +#define CACHE_QUANTUM (CACHE_ALLOCATOR_MIN+sizeof(struct objc_cache)-sizeof(cache_entry*)) +#define CACHE_REGION_SIZE ((128*1024 / CACHE_QUANTUM) * CACHE_QUANTUM) +// #define CACHE_REGION_SIZE ((256*1024 / CACHE_QUANTUM) * CACHE_QUANTUM) + +static uintptr_t cache_allocator_mask_for_size(size_t size) +{ + return (size - sizeof(struct objc_cache)) / sizeof(cache_entry *); +} + +static size_t cache_allocator_size_for_mask(uintptr_t mask) +{ + size_t requested = sizeof(struct objc_cache) + TABLE_SIZE(mask+1); + size_t actual = CACHE_QUANTUM; + while (actual < requested) actual += CACHE_QUANTUM; + return actual; +} + + +/* Cache instrumentation data. Immediately follows the cache block itself. */ +#ifdef OBJC_INSTRUMENTED +typedef struct +{ + unsigned int hitCount; // cache lookup success tally + unsigned int hitProbes; // sum entries checked to hit + unsigned int maxHitProbes; // max entries checked to hit + unsigned int missCount; // cache lookup no-find tally + unsigned int missProbes; // sum entries checked to miss + unsigned int maxMissProbes; // max entries checked to miss + unsigned int flushCount; // cache flush tally + unsigned int flushedEntries; // sum cache entries flushed + unsigned int maxFlushedEntries; // max cache entries flushed +} CacheInstrumentation; + +#define CACHE_INSTRUMENTATION(cache) (CacheInstrumentation *) &cache->buckets[cache->mask + 1]; +#endif + +/* Cache filling and flushing instrumentation */ + +static int totalCacheFills = 0; + +#ifdef OBJC_INSTRUMENTED +unsigned int LinearFlushCachesCount = 0; +unsigned int LinearFlushCachesVisitedCount = 0; +unsigned int MaxLinearFlushCachesVisitedCount = 0; +unsigned int NonlinearFlushCachesCount = 0; +unsigned int NonlinearFlushCachesClassCount = 0; +unsigned int NonlinearFlushCachesVisitedCount = 0; +unsigned int MaxNonlinearFlushCachesVisitedCount = 0; +unsigned int IdealFlushCachesCount = 0; +unsigned int MaxIdealFlushCachesCount = 0; +#endif + + +/*********************************************************************** +* A static empty cache. All classes initially point at this cache. +* When the first message is sent it misses in the cache, and when +* the cache is grown it checks for this case and uses malloc rather +* than realloc. This avoids the need to check for NULL caches in the +* messenger. +***********************************************************************/ + +struct objc_cache _objc_empty_cache = +{ + 0, // mask + 0, // occupied + { NULL } // buckets +}; +#ifdef OBJC_INSTRUMENTED +CacheInstrumentation emptyCacheInstrumentation = {0}; +#endif + + +/* Local prototypes */ + +static bool _cache_isEmpty(Cache cache); +static Cache _cache_malloc(uintptr_t slotCount); +static Cache _cache_create(Class cls); +static Cache _cache_expand(Class cls); + +static int _collecting_in_critical(void); +static void _garbage_make_room(void); +static void _cache_collect_free(void *data, size_t size); + +#if defined(CACHE_ALLOCATOR) +static bool cache_allocator_is_block(void *block); +static Cache cache_allocator_calloc(size_t size); +static void cache_allocator_free(void *block); +#endif + +/*********************************************************************** +* Cache statistics for OBJC_PRINT_CACHE_SETUP +**********************************************************************/ +static unsigned int cache_counts[16]; +static size_t cache_allocations; +static size_t cache_collections; +static size_t cache_allocator_regions; + +static size_t log2u(size_t x) +{ + unsigned int log; + + log = 0; + while (x >>= 1) + log += 1; + + return log; +} + + +/*********************************************************************** +* _cache_isEmpty. +* Returns YES if the given cache is some empty cache. +* Empty caches should never be allocated on the heap. +**********************************************************************/ +static bool _cache_isEmpty(Cache cache) +{ + return (cache == NULL || cache == (Cache)&_objc_empty_cache || cache->mask == 0); +} + + +/*********************************************************************** +* _cache_malloc. +* +* Called from _cache_create() and cache_expand() +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static Cache _cache_malloc(uintptr_t slotCount) +{ + Cache new_cache; + size_t size; + + cacheUpdateLock.assertLocked(); + + // Allocate table (why not check for failure?) + size = sizeof(struct objc_cache) + TABLE_SIZE(slotCount); +#if defined(OBJC_INSTRUMENTED) + // Custom cache allocator can't handle instrumentation. + size += sizeof(CacheInstrumentation); + new_cache = calloc(size, 1); + new_cache->mask = slotCount - 1; +#elif !defined(CACHE_ALLOCATOR) + // fixme cache allocator implementation isn't 64-bit clean + new_cache = calloc(size, 1); + new_cache->mask = (unsigned int)(slotCount - 1); +#else + if (size < CACHE_ALLOCATOR_MIN) { + new_cache = (Cache)calloc(size, 1); + new_cache->mask = slotCount - 1; + // occupied and buckets and instrumentation are all zero + } else { + new_cache = cache_allocator_calloc(size); + // mask is already set + // occupied and buckets and instrumentation are all zero + } +#endif + + if (PrintCaches) { + size_t bucket = log2u(slotCount); + if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) { + cache_counts[bucket]++; + } + cache_allocations++; + } + + return new_cache; +} + +/*********************************************************************** +* _cache_free_block. +* +* Called from _cache_free() and _cache_collect_free(). +* block may be a cache or a forward:: entry. +* If block is a cache, forward:: entries it points to will NOT be freed. +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static inline int isPowerOf2(unsigned long l) { return 1 == __builtin_popcountl(l); } +static void _cache_free_block(void *block) +{ + cacheUpdateLock.assertLocked(); + +#if !TARGET_OS_WIN32 + if (PrintCaches) { + Cache cache = (Cache)block; + size_t slotCount = cache->mask + 1; + if (isPowerOf2(slotCount)) { + size_t bucket = log2u(slotCount); + if (bucket < sizeof(cache_counts) / sizeof(cache_counts[0])) { + cache_counts[bucket]--; + } + } + } +#endif + +#if defined(CACHE_ALLOCATOR) + if (cache_allocator_is_block(block)) { + cache_allocator_free(block); + } else +#endif + { + free(block); + } +} + + +/*********************************************************************** +* _cache_free. +* +* Called from _objc_remove_classes_in_image(). +* forward:: entries in the cache ARE freed. +* Cache locks: cacheUpdateLock must NOT be held by the caller. +**********************************************************************/ +void _cache_free(Cache cache) +{ + unsigned int i; + + mutex_locker_t lock(cacheUpdateLock); + + for (i = 0; i < cache->mask + 1; i++) { + cache_entry *entry = (cache_entry *)cache->buckets[i]; + if (entry && entry->imp == _objc_msgForward_impcache) { + _cache_free_block(entry); + } + } + + _cache_free_block(cache); +} + + +/*********************************************************************** +* _cache_create. +* +* Called from _cache_expand(). +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static Cache _cache_create(Class cls) +{ + Cache new_cache; + + cacheUpdateLock.assertLocked(); + + // Allocate new cache block + new_cache = _cache_malloc(INIT_CACHE_SIZE); + + // Install the cache + cls->cache = new_cache; + + // Clear the grow flag so that we will re-use the current storage, + // rather than actually grow the cache, when expanding the cache + // for the first time + if (_class_slow_grow) { + cls->setShouldGrowCache(false); + } + + // Return our creation + return new_cache; +} + + +/*********************************************************************** +* _cache_expand. +* +* Called from _cache_fill () +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static Cache _cache_expand(Class cls) +{ + Cache old_cache; + Cache new_cache; + uintptr_t slotCount; + uintptr_t index; + + cacheUpdateLock.assertLocked(); + + // First growth goes from empty cache to a real one + old_cache = cls->cache; + if (_cache_isEmpty(old_cache)) + return _cache_create (cls); + + if (_class_slow_grow) { + // Cache grows every other time only. + if (cls->shouldGrowCache()) { + // Grow the cache this time. Don't grow next time. + cls->setShouldGrowCache(false); + } + else { + // Reuse the current cache storage this time. Do grow next time. + cls->setShouldGrowCache(true); + + // Clear the valid-entry counter + old_cache->occupied = 0; + + // Invalidate all the cache entries + for (index = 0; index < old_cache->mask + 1; index += 1) + { + // Remember what this entry was, so we can possibly + // deallocate it after the bucket has been invalidated + cache_entry *oldEntry = (cache_entry *)old_cache->buckets[index]; + + // Skip invalid entry + if (!oldEntry) + continue; + + // Invalidate this entry + old_cache->buckets[index] = NULL; + + // Deallocate "forward::" entry + if (oldEntry->imp == _objc_msgForward_impcache) { + _cache_collect_free (oldEntry, sizeof(cache_entry)); + } + } + + // Return the same old cache, freshly emptied + return old_cache; + } + } + + // Double the cache size + slotCount = (old_cache->mask + 1) << 1; + + new_cache = _cache_malloc(slotCount); + +#ifdef OBJC_INSTRUMENTED + // Propagate the instrumentation data + { + CacheInstrumentation *oldCacheData; + CacheInstrumentation *newCacheData; + + oldCacheData = CACHE_INSTRUMENTATION(old_cache); + newCacheData = CACHE_INSTRUMENTATION(new_cache); + bcopy ((const char *)oldCacheData, (char *)newCacheData, sizeof(CacheInstrumentation)); + } +#endif + + // Deallocate "forward::" entries from the old cache + for (index = 0; index < old_cache->mask + 1; index++) { + cache_entry *entry = (cache_entry *)old_cache->buckets[index]; + if (entry && entry->imp == _objc_msgForward_impcache) { + _cache_collect_free (entry, sizeof(cache_entry)); + } + } + + // Install new cache + cls->cache = new_cache; + + // Deallocate old cache, try freeing all the garbage + _cache_collect_free (old_cache, old_cache->mask * sizeof(cache_entry *)); + _cache_collect(false); + + return new_cache; +} + + +/*********************************************************************** +* _cache_fill. Add the specified method to the specified class' cache. +* Returns NO if the cache entry wasn't added: cache was busy, +* class is still being initialized, new entry is a duplicate. +* +* Called only from _class_lookupMethodAndLoadCache and +* class_respondsToMethod and _cache_addForwardEntry. +* +* Cache locks: cacheUpdateLock must not be held. +**********************************************************************/ +bool _cache_fill(Class cls, Method smt, SEL sel) +{ + uintptr_t newOccupied; + uintptr_t index; + cache_entry **buckets; + cache_entry *entry; + Cache cache; + + cacheUpdateLock.assertUnlocked(); + + // Never cache before +initialize is done + if (!cls->isInitialized()) { + return NO; + } + + // Keep tally of cache additions + totalCacheFills += 1; + + mutex_locker_t lock(cacheUpdateLock); + + entry = (cache_entry *)smt; + + cache = cls->cache; + + // Make sure the entry wasn't added to the cache by some other thread + // before we grabbed the cacheUpdateLock. + // Don't use _cache_getMethod() because _cache_getMethod() doesn't + // return forward:: entries. + if (_cache_getImp(cls, sel)) { + return NO; // entry is already cached, didn't add new one + } + + // Use the cache as-is if it is less than 3/4 full + newOccupied = cache->occupied + 1; + if ((newOccupied * 4) <= (cache->mask + 1) * 3) { + // Cache is less than 3/4 full. + cache->occupied = (unsigned int)newOccupied; + } else { + // Cache is too full. Expand it. + cache = _cache_expand (cls); + + // Account for the addition + cache->occupied += 1; + } + + // Scan for the first unused slot and insert there. + // There is guaranteed to be an empty slot because the + // minimum size is 4 and we resized at 3/4 full. + buckets = (cache_entry **)cache->buckets; + for (index = CACHE_HASH(sel, cache->mask); + buckets[index] != NULL; + index = (index+1) & cache->mask) + { + // empty + } + buckets[index] = entry; + + return YES; // successfully added new cache entry +} + + +/*********************************************************************** +* _cache_addForwardEntry +* Add a forward:: entry for the given selector to cls's method cache. +* Does nothing if the cache addition fails for any reason. +* Called from class_respondsToMethod and _class_lookupMethodAndLoadCache. +* Cache locks: cacheUpdateLock must not be held. +**********************************************************************/ +void _cache_addForwardEntry(Class cls, SEL sel) +{ + cache_entry *smt; + + smt = (cache_entry *)malloc(sizeof(cache_entry)); + smt->name = sel; + smt->imp = _objc_msgForward_impcache; + if (! _cache_fill(cls, (Method)smt, sel)) { // fixme hack + // Entry not added to cache. Don't leak the method struct. + free(smt); + } +} + + +/*********************************************************************** +* _cache_flush. Invalidate all valid entries in the given class' cache. +* +* Called from flush_caches() and _cache_fill() +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +void _cache_flush(Class cls) +{ + Cache cache; + unsigned int index; + + cacheUpdateLock.assertLocked(); + + // Locate cache. Ignore unused cache. + cache = cls->cache; + if (_cache_isEmpty(cache)) return; + +#ifdef OBJC_INSTRUMENTED + { + CacheInstrumentation *cacheData; + + // Tally this flush + cacheData = CACHE_INSTRUMENTATION(cache); + cacheData->flushCount += 1; + cacheData->flushedEntries += cache->occupied; + if (cache->occupied > cacheData->maxFlushedEntries) + cacheData->maxFlushedEntries = cache->occupied; + } +#endif + + // Traverse the cache + for (index = 0; index <= cache->mask; index += 1) + { + // Remember what this entry was, so we can possibly + // deallocate it after the bucket has been invalidated + cache_entry *oldEntry = (cache_entry *)cache->buckets[index]; + + // Invalidate this entry + cache->buckets[index] = NULL; + + // Deallocate "forward::" entry + if (oldEntry && oldEntry->imp == _objc_msgForward_impcache) + _cache_collect_free (oldEntry, sizeof(cache_entry)); + } + + // Clear the valid-entry counter + cache->occupied = 0; +} + + +/*********************************************************************** +* flush_cache. Flushes the instance method cache for class cls only. +* Use flush_caches() if cls might have in-use subclasses. +**********************************************************************/ +void flush_cache(Class cls) +{ + if (cls) { + mutex_locker_t lock(cacheUpdateLock); + _cache_flush(cls); + } +} + + +/*********************************************************************** +* cache collection. +**********************************************************************/ + +#if !TARGET_OS_WIN32 + +// A sentinel (magic value) to report bad thread_get_state status. +// Must not be a valid PC. +// Must not be zero - thread_get_state() on a new thread returns PC == 0. +#define PC_SENTINEL 1 + +// UNIX03 compliance hack (4508809) +#if !__DARWIN_UNIX03 +#define __srr0 srr0 +#define __eip eip +#endif + +static uintptr_t _get_pc_for_thread(thread_t thread) +#if defined(__i386__) +{ + i386_thread_state_t state; + unsigned int count = i386_THREAD_STATE_COUNT; + kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL; +} +#elif defined(__x86_64__) +{ + x86_thread_state64_t state; + unsigned int count = x86_THREAD_STATE64_COUNT; + kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL; +} +#elif defined(__arm__) +{ + arm_thread_state_t state; + unsigned int count = ARM_THREAD_STATE_COUNT; + kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL; +} +#else +{ +#error _get_pc_for_thread () not implemented for this architecture +} +#endif + +#endif + +/*********************************************************************** +* _collecting_in_critical. +* Returns TRUE if some thread is currently executing a cache-reading +* function. Collection of cache garbage is not allowed when a cache- +* reading function is in progress because it might still be using +* the garbage memory. +**********************************************************************/ +OBJC_EXPORT uintptr_t objc_entryPoints[]; +OBJC_EXPORT uintptr_t objc_exitPoints[]; + +static int _collecting_in_critical(void) +{ +#if TARGET_OS_WIN32 + return TRUE; +#else + thread_act_port_array_t threads; + unsigned number; + unsigned count; + kern_return_t ret; + int result; + + mach_port_t mythread = pthread_mach_thread_np(pthread_self()); + + // Get a list of all the threads in the current task + ret = task_threads (mach_task_self (), &threads, &number); + if (ret != KERN_SUCCESS) + { + _objc_fatal("task_thread failed (result %d)\n", ret); + } + + // Check whether any thread is in the cache lookup code + result = FALSE; + for (count = 0; count < number; count++) + { + int region; + uintptr_t pc; + + // Don't bother checking ourselves + if (threads[count] == mythread) + continue; + + // Find out where thread is executing + pc = _get_pc_for_thread (threads[count]); + + // Check for bad status, and if so, assume the worse (can't collect) + if (pc == PC_SENTINEL) + { + result = TRUE; + goto done; + } + + // Check whether it is in the cache lookup code + for (region = 0; objc_entryPoints[region] != 0; region++) + { + if ((pc >= objc_entryPoints[region]) && + (pc <= objc_exitPoints[region])) + { + result = TRUE; + goto done; + } + } + } + + done: + // Deallocate the port rights for the threads + for (count = 0; count < number; count++) { + mach_port_deallocate(mach_task_self (), threads[count]); + } + + // Deallocate the thread list + vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number); + + // Return our finding + return result; +#endif +} + + +/*********************************************************************** +* _garbage_make_room. Ensure that there is enough room for at least +* one more ref in the garbage. +**********************************************************************/ + +// amount of memory represented by all refs in the garbage +static size_t garbage_byte_size = 0; + +// do not empty the garbage until garbage_byte_size gets at least this big +static size_t garbage_threshold = 1024; + +// table of refs to free +static void **garbage_refs = 0; + +// current number of refs in garbage_refs +static size_t garbage_count = 0; + +// capacity of current garbage_refs +static size_t garbage_max = 0; + +// capacity of initial garbage_refs +enum { + INIT_GARBAGE_COUNT = 128 +}; + +static void _garbage_make_room(void) +{ + static int first = 1; + + // Create the collection table the first time it is needed + if (first) + { + first = 0; + garbage_refs = (void**) + malloc(INIT_GARBAGE_COUNT * sizeof(void *)); + garbage_max = INIT_GARBAGE_COUNT; + } + + // Double the table if it is full + else if (garbage_count == garbage_max) + { + garbage_refs = (void**) + realloc(garbage_refs, garbage_max * 2 * sizeof(void *)); + garbage_max *= 2; + } +} + + +/*********************************************************************** +* _cache_collect_free. Add the specified malloc'd memory to the list +* of them to free at some later point. +* size is used for the collection threshold. It does not have to be +* precisely the block's size. +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static void _cache_collect_free(void *data, size_t size) +{ + cacheUpdateLock.assertLocked(); + + _garbage_make_room (); + garbage_byte_size += size; + garbage_refs[garbage_count++] = data; +} + + +/*********************************************************************** +* _cache_collect. Try to free accumulated dead caches. +* collectALot tries harder to free memory. +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +void _cache_collect(bool collectALot) +{ + cacheUpdateLock.assertLocked(); + + // Done if the garbage is not full + if (garbage_byte_size < garbage_threshold && !collectALot) { + return; + } + + // Synchronize collection with objc_msgSend and other cache readers + if (!collectALot) { + if (_collecting_in_critical ()) { + // objc_msgSend (or other cache reader) is currently looking in + // the cache and might still be using some garbage. + if (PrintCaches) { + _objc_inform ("CACHES: not collecting; " + "objc_msgSend in progress"); + } + return; + } + } + else { + // No excuses. + while (_collecting_in_critical()) + ; + } + + // No cache readers in progress - garbage is now deletable + + // Log our progress + if (PrintCaches) { + cache_collections++; + _objc_inform ("CACHES: COLLECTING %zu bytes (%zu regions, %zu allocations, %zu collections)", garbage_byte_size, cache_allocator_regions, cache_allocations, cache_collections); + } + + // Dispose all refs now in the garbage + while (garbage_count--) { + _cache_free_block(garbage_refs[garbage_count]); + } + + // Clear the garbage count and total size indicator + garbage_count = 0; + garbage_byte_size = 0; + + if (PrintCaches) { + size_t i; + size_t total = 0; + size_t ideal_total = 0; + size_t malloc_total = 0; + size_t local_total = 0; + + for (i = 0; i < sizeof(cache_counts) / sizeof(cache_counts[0]); i++) { + int count = cache_counts[i]; + int slots = 1 << i; + size_t size = sizeof(struct objc_cache) + TABLE_SIZE(slots); + size_t ideal = size; +#if TARGET_OS_WIN32 + size_t malloc = size; +#else + size_t malloc = malloc_good_size(size); +#endif + size_t local = size < CACHE_ALLOCATOR_MIN ? malloc : cache_allocator_size_for_mask(cache_allocator_mask_for_size(size)); + + if (!count) continue; + + _objc_inform("CACHES: %4d slots: %4d caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", slots, count, ideal*count, malloc*count, local*count, malloc*count-ideal*count, local*count-ideal*count); + + total += count; + ideal_total += ideal*count; + malloc_total += malloc*count; + local_total += local*count; + } + + _objc_inform("CACHES: total: %4zu caches, %6zu / %6zu / %6zu bytes ideal/malloc/local, %6zu / %6zu bytes wasted malloc/local", total, ideal_total, malloc_total, local_total, malloc_total-ideal_total, local_total-ideal_total); + } +} + + + + + +#if defined(CACHE_ALLOCATOR) + +/*********************************************************************** +* Custom method cache allocator. +* Method cache block sizes are 2^slots+2 words, which is a pessimal +* case for the system allocator. It wastes 504 bytes per cache block +* with 128 or more slots, which adds up to tens of KB for an AppKit process. +* To save memory, the custom cache allocator below is used. +* +* The cache allocator uses 128 KB allocation regions. Few processes will +* require a second region. Within a region, allocation is address-ordered +* first fit. +* +* The cache allocator uses a quantum of 520. +* Cache block ideal sizes: 520, 1032, 2056, 4104 +* Cache allocator sizes: 520, 1040, 2080, 4160 +* +* Because all blocks are known to be genuine method caches, the ordinary +* cache->mask and cache->occupied fields are used as block headers. +* No out-of-band headers are maintained. The number of blocks will +* almost always be fewer than 200, so for simplicity there is no free +* list or other optimization. +* +* Block in use: mask != 0, occupied != -1 (mask indicates block size) +* Block free: mask != 0, occupied == -1 (mask is precisely block size) +* +* No cache allocator functions take any locks. Instead, the caller +* must hold the cacheUpdateLock. +* +* fixme with 128 KB regions and 520 B min block size, an allocation +* bitmap would be only 32 bytes - better than free list? +**********************************************************************/ + +typedef struct cache_allocator_block { + uintptr_t size; + uintptr_t state; + struct cache_allocator_block *nextFree; +} cache_allocator_block; + +typedef struct cache_allocator_region { + cache_allocator_block *start; + cache_allocator_block *end; // first non-block address + cache_allocator_block *freeList; + struct cache_allocator_region *next; +} cache_allocator_region; + +static cache_allocator_region *cacheRegion = NULL; + + +/*********************************************************************** +* cache_allocator_add_region +* Allocates and returns a new region that can hold at least size +* bytes of large method caches. +* The actual size will be rounded up to a CACHE_QUANTUM boundary, +* with a minimum of CACHE_REGION_SIZE. +* The new region is lowest-priority for new allocations. Callers that +* know the other regions are already full should allocate directly +* into the returned region. +**********************************************************************/ +static cache_allocator_region *cache_allocator_add_region(size_t size) +{ + vm_address_t addr; + cache_allocator_block *b; + cache_allocator_region **rgnP; + cache_allocator_region *newRegion = (cache_allocator_region *) + calloc(1, sizeof(cache_allocator_region)); + + // Round size up to quantum boundary, and apply the minimum size. + size += CACHE_QUANTUM - (size % CACHE_QUANTUM); + if (size < CACHE_REGION_SIZE) size = CACHE_REGION_SIZE; + + // Allocate the region + addr = (vm_address_t)calloc(size, 1); + newRegion->start = (cache_allocator_block *)addr; + newRegion->end = (cache_allocator_block *)(addr + size); + + // Mark the first block: free and covers the entire region + b = newRegion->start; + b->size = size; + b->state = (uintptr_t)-1; + b->nextFree = NULL; + newRegion->freeList = b; + + // Add to end of the linked list of regions. + // Other regions should be re-used before this one is touched. + newRegion->next = NULL; + rgnP = &cacheRegion; + while (*rgnP) { + rgnP = &(**rgnP).next; + } + *rgnP = newRegion; + + cache_allocator_regions++; + + return newRegion; +} + + +/*********************************************************************** +* cache_allocator_coalesce +* Attempts to coalesce a free block with the single free block following +* it in the free list, if any. +**********************************************************************/ +static void cache_allocator_coalesce(cache_allocator_block *block) +{ + if (block->size + (uintptr_t)block == (uintptr_t)block->nextFree) { + block->size += block->nextFree->size; + block->nextFree = block->nextFree->nextFree; + } +} + + +/*********************************************************************** +* cache_region_calloc +* Attempt to allocate a size-byte block in the given region. +* Allocation is first-fit. The free list is already fully coalesced. +* Returns NULL if there is not enough room in the region for the block. +**********************************************************************/ +static void *cache_region_calloc(cache_allocator_region *rgn, size_t size) +{ + cache_allocator_block **blockP; + uintptr_t mask; + + // Save mask for allocated block, then round size + // up to CACHE_QUANTUM boundary + mask = cache_allocator_mask_for_size(size); + size = cache_allocator_size_for_mask(mask); + + // Search the free list for a sufficiently large free block. + + for (blockP = &rgn->freeList; + *blockP != NULL; + blockP = &(**blockP).nextFree) + { + cache_allocator_block *block = *blockP; + if (block->size < size) continue; // not big enough + + // block is now big enough. Allocate from it. + + // Slice off unneeded fragment of block, if any, + // and reconnect the free list around block. + if (block->size - size >= CACHE_QUANTUM) { + cache_allocator_block *leftover = + (cache_allocator_block *)(size + (uintptr_t)block); + leftover->size = block->size - size; + leftover->state = (uintptr_t)-1; + leftover->nextFree = block->nextFree; + *blockP = leftover; + } else { + *blockP = block->nextFree; + } + + // block is now exactly the right size. + + bzero(block, size); + block->size = mask; // Cache->mask + block->state = 0; // Cache->occupied + + return block; + } + + // No room in this region. + return NULL; +} + + +/*********************************************************************** +* cache_allocator_calloc +* Custom allocator for large method caches (128+ slots) +* The returned cache block already has cache->mask set. +* cache->occupied and the cache contents are zero. +* Cache locks: cacheUpdateLock must be held by the caller +**********************************************************************/ +static Cache cache_allocator_calloc(size_t size) +{ + cache_allocator_region *rgn; + + cacheUpdateLock.assertLocked(); + + for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) { + void *p = cache_region_calloc(rgn, size); + if (p) { + return (Cache)p; + } + } + + // No regions or all regions full - make a region and try one more time + // In the unlikely case of a cache over 256KB, it will get its own region. + return (Cache)cache_region_calloc(cache_allocator_add_region(size), size); +} + + +/*********************************************************************** +* cache_allocator_region_for_block +* Returns the cache allocator region that ptr points into, or NULL. +**********************************************************************/ +static cache_allocator_region *cache_allocator_region_for_block(cache_allocator_block *block) +{ + cache_allocator_region *rgn; + for (rgn = cacheRegion; rgn != NULL; rgn = rgn->next) { + if (block >= rgn->start && block < rgn->end) return rgn; + } + return NULL; +} + + +/*********************************************************************** +* cache_allocator_is_block +* If ptr is a live block from the cache allocator, return YES +* If ptr is a block from some other allocator, return NO. +* If ptr is a dead block from the cache allocator, result is undefined. +* Cache locks: cacheUpdateLock must be held by the caller +**********************************************************************/ +static bool cache_allocator_is_block(void *ptr) +{ + cacheUpdateLock.assertLocked(); + return (cache_allocator_region_for_block((cache_allocator_block *)ptr) != NULL); +} + +/*********************************************************************** +* cache_allocator_free +* Frees a block allocated by the cache allocator. +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static void cache_allocator_free(void *ptr) +{ + cache_allocator_block *dead = (cache_allocator_block *)ptr; + cache_allocator_block *cur; + cache_allocator_region *rgn; + + cacheUpdateLock.assertLocked(); + + if (! (rgn = cache_allocator_region_for_block(dead))) { + // free of non-pointer + _objc_inform("cache_allocator_free of non-pointer %p", dead); + return; + } + + dead->size = cache_allocator_size_for_mask(dead->size); + dead->state = (uintptr_t)-1; + + if (!rgn->freeList || rgn->freeList > dead) { + // dead block belongs at front of free list + dead->nextFree = rgn->freeList; + rgn->freeList = dead; + cache_allocator_coalesce(dead); + return; + } + + // dead block belongs in the middle or end of free list + for (cur = rgn->freeList; cur != NULL; cur = cur->nextFree) { + cache_allocator_block *ahead = cur->nextFree; + + if (!ahead || ahead > dead) { + // cur and ahead straddle dead, OR dead belongs at end of free list + cur->nextFree = dead; + dead->nextFree = ahead; + + // coalesce into dead first in case both succeed + cache_allocator_coalesce(dead); + cache_allocator_coalesce(cur); + return; + } + } + + // uh-oh + _objc_inform("cache_allocator_free of non-pointer %p", ptr); +} + +// defined(CACHE_ALLOCATOR) +#endif + +/*********************************************************************** +* Cache instrumentation and debugging +**********************************************************************/ + +#ifdef OBJC_INSTRUMENTED +enum { + CACHE_HISTOGRAM_SIZE = 512 +}; + +unsigned int CacheHitHistogram [CACHE_HISTOGRAM_SIZE]; +unsigned int CacheMissHistogram [CACHE_HISTOGRAM_SIZE]; +#endif + + +/*********************************************************************** +* _cache_print. +**********************************************************************/ +static void _cache_print(Cache cache) +{ + uintptr_t index; + uintptr_t count; + + count = cache->mask + 1; + for (index = 0; index < count; index += 1) { + cache_entry *entry = (cache_entry *)cache->buckets[index]; + if (entry) { + if (entry->imp == _objc_msgForward_impcache) + printf ("does not recognize: \n"); + printf ("%s\n", sel_getName(entry->name)); + } + } +} + + +/*********************************************************************** +* _class_printMethodCaches. +**********************************************************************/ +void _class_printMethodCaches(Class cls) +{ + if (_cache_isEmpty(cls->cache)) { + printf("no instance-method cache for class %s\n",cls->nameForLogging()); + } else { + printf("instance-method cache for class %s:\n", cls->nameForLogging()); + _cache_print(cls->cache); + } + + if (_cache_isEmpty(cls->ISA()->cache)) { + printf("no class-method cache for class %s\n", cls->nameForLogging()); + } else { + printf ("class-method cache for class %s:\n", cls->nameForLogging()); + _cache_print(cls->ISA()->cache); + } +} + + +#if 0 +#warning fixme + + +/*********************************************************************** +* _class_printDuplicateCacheEntries. +**********************************************************************/ +void _class_printDuplicateCacheEntries(bool detail) +{ + NXHashState state; + Class cls; + unsigned int duplicates; + unsigned int index1; + unsigned int index2; + unsigned int mask; + unsigned int count; + unsigned int isMeta; + Cache cache; + + + printf ("Checking for duplicate cache entries \n"); + + // Outermost loop - iterate over all classes + state = NXInitHashState (class_hash); + duplicates = 0; + while (NXNextHashState (class_hash, &state, (void **) &cls)) + { + // Control loop - do given class' cache, then its isa's cache + for (isMeta = 0; isMeta <= 1; isMeta += 1) + { + // Select cache of interest and make sure it exists + cache = (isMeta ? cls->ISA : cls)->cache; + if (_cache_isEmpty(cache)) + continue; + + // Middle loop - check each entry in the given cache + mask = cache->mask; + count = mask + 1; + for (index1 = 0; index1 < count; index1 += 1) + { + // Skip invalid entry + if (!cache->buckets[index1]) + continue; + + // Inner loop - check that given entry matches no later entry + for (index2 = index1 + 1; index2 < count; index2 += 1) + { + // Skip invalid entry + if (!cache->buckets[index2]) + continue; + + // Check for duplication by method name comparison + if (strcmp ((char *) cache->buckets[index1]->name), + (char *) cache->buckets[index2]->name)) == 0) + { + if (detail) + printf ("%s %s\n", cls->nameForLogging(), sel_getName(cache->buckets[index1]->name)); + duplicates += 1; + break; + } + } + } + } + } + + // Log the findings + printf ("duplicates = %d\n", duplicates); + printf ("total cache fills = %d\n", totalCacheFills); +} + + +/*********************************************************************** +* PrintCacheHeader. +**********************************************************************/ +static void PrintCacheHeader(void) +{ +#ifdef OBJC_INSTRUMENTED + printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS TotalD AvgD MaxD TotalD AvgD MaxD TotD AvgD MaxD\n"); + printf ("Size Count Used Used Used Hit Hit Miss Miss Hits Prbs Prbs Misses Prbs Prbs Flsh Flsh Flsh\n"); + printf ("----- ----- ----- ----- ---- ---- ---- ---- ---- ------- ---- ---- ------- ---- ---- ---- ---- ----\n"); +#else + printf ("Cache Cache Slots Avg Max AvgS MaxS AvgS MaxS\n"); + printf ("Size Count Used Used Used Hit Hit Miss Miss\n"); + printf ("----- ----- ----- ----- ---- ---- ---- ---- ----\n"); +#endif +} + + +/*********************************************************************** +* PrintCacheInfo. +**********************************************************************/ +static void PrintCacheInfo(unsigned int cacheSize, + unsigned int cacheCount, + unsigned int slotsUsed, + float avgUsed, unsigned int maxUsed, + float avgSHit, unsigned int maxSHit, + float avgSMiss, unsigned int maxSMiss +#ifdef OBJC_INSTRUMENTED + , unsigned int totDHits, + float avgDHit, + unsigned int maxDHit, + unsigned int totDMisses, + float avgDMiss, + unsigned int maxDMiss, + unsigned int totDFlsh, + float avgDFlsh, + unsigned int maxDFlsh +#endif + ) +{ +#ifdef OBJC_INSTRUMENTED + printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u %7u %4.1f %4u %7u %4.1f %4u %4u %4.1f %4u\n", +#else + printf ("%5u %5u %5u %5.1f %4u %4.1f %4u %4.1f %4u\n", +#endif + cacheSize, cacheCount, slotsUsed, avgUsed, maxUsed, avgSHit, maxSHit, avgSMiss, maxSMiss +#ifdef OBJC_INSTRUMENTED + , totDHits, avgDHit, maxDHit, totDMisses, avgDMiss, maxDMiss, totDFlsh, avgDFlsh, maxDFlsh +#endif + ); + +} + + +#ifdef OBJC_INSTRUMENTED +/*********************************************************************** +* PrintCacheHistogram. Show the non-zero entries from the specified +* cache histogram. +**********************************************************************/ +static void PrintCacheHistogram(char *title, + unsigned int *firstEntry, + unsigned int entryCount) +{ + unsigned int index; + unsigned int *thisEntry; + + printf ("%s\n", title); + printf (" Probes Tally\n"); + printf (" ------ -----\n"); + for (index = 0, thisEntry = firstEntry; + index < entryCount; + index += 1, thisEntry += 1) + { + if (*thisEntry == 0) + continue; + + printf (" %6d %5d\n", index, *thisEntry); + } +} +#endif + + +/*********************************************************************** +* _class_printMethodCacheStatistics. +**********************************************************************/ + +#define MAX_LOG2_SIZE 32 +#define MAX_CHAIN_SIZE 100 + +void _class_printMethodCacheStatistics(void) +{ + unsigned int isMeta; + unsigned int index; + NXHashState state; + Class cls; + unsigned int totalChain; + unsigned int totalMissChain; + unsigned int maxChain; + unsigned int maxMissChain; + unsigned int classCount; + unsigned int negativeEntryCount; + unsigned int cacheExpandCount; + unsigned int cacheCountBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int totalEntriesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int maxEntriesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int totalChainBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int totalMissChainBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int totalMaxChainBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int totalMaxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int maxChainBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int maxMissChainBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int chainCount[MAX_CHAIN_SIZE] = {0}; + unsigned int missChainCount[MAX_CHAIN_SIZE] = {0}; +#ifdef OBJC_INSTRUMENTED + unsigned int hitCountBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int hitProbesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int maxHitProbesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int missCountBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int missProbesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int maxMissProbesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int flushCountBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int flushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}}; + unsigned int maxFlushedEntriesBySize[2][MAX_LOG2_SIZE] = {{0}}; +#endif + + printf ("Printing cache statistics\n"); + + // Outermost loop - iterate over all classes + state = NXInitHashState (class_hash); + classCount = 0; + negativeEntryCount = 0; + cacheExpandCount = 0; + while (NXNextHashState (class_hash, &state, (void **) &cls)) + { + // Tally classes + classCount += 1; + + // Control loop - do given class' cache, then its isa's cache + for (isMeta = 0; isMeta <= 1; isMeta += 1) + { + Cache cache; + unsigned int mask; + unsigned int log2Size; + unsigned int entryCount; + + // Select cache of interest + cache = (isMeta ? cls->ISA : cls)->cache; + + // Ignore empty cache... should we? + if (_cache_isEmpty(cache)) + continue; + + // Middle loop - do each entry in the given cache + mask = cache->mask; + entryCount = 0; + totalChain = 0; + totalMissChain = 0; + maxChain = 0; + maxMissChain = 0; + for (index = 0; index < mask + 1; index += 1) + { + cache_entry **buckets; + cache_entry *entry; + unsigned int hash; + unsigned int methodChain; + unsigned int methodMissChain; + unsigned int index2; + + // If entry is invalid, the only item of + // interest is that future insert hashes + // to this entry can use it directly. + buckets = (cache_entry **)cache->buckets; + if (!buckets[index]) + { + missChainCount[0] += 1; + continue; + } + + entry = buckets[index]; + + // Tally valid entries + entryCount += 1; + + // Tally "forward::" entries + if (entry->imp == _objc_msgForward_impcache) + negativeEntryCount += 1; + + // Calculate search distance (chain length) for this method + // The chain may wrap around to the beginning of the table. + hash = CACHE_HASH(entry->name, mask); + if (index >= hash) methodChain = index - hash; + else methodChain = (mask+1) + index - hash; + + // Tally chains of this length + if (methodChain < MAX_CHAIN_SIZE) + chainCount[methodChain] += 1; + + // Keep sum of all chain lengths + totalChain += methodChain; + + // Record greatest chain length + if (methodChain > maxChain) + maxChain = methodChain; + + // Calculate search distance for miss that hashes here + index2 = index; + while (buckets[index2]) + { + index2 += 1; + index2 &= mask; + } + methodMissChain = ((index2 - index) & mask); + + // Tally miss chains of this length + if (methodMissChain < MAX_CHAIN_SIZE) + missChainCount[methodMissChain] += 1; + + // Keep sum of all miss chain lengths in this class + totalMissChain += methodMissChain; + + // Record greatest miss chain length + if (methodMissChain > maxMissChain) + maxMissChain = methodMissChain; + } + + // Factor this cache into statistics about caches of the same + // type and size (all caches are a power of two in size) + log2Size = log2u (mask + 1); + cacheCountBySize[isMeta][log2Size] += 1; + totalEntriesBySize[isMeta][log2Size] += entryCount; + if (entryCount > maxEntriesBySize[isMeta][log2Size]) + maxEntriesBySize[isMeta][log2Size] = entryCount; + totalChainBySize[isMeta][log2Size] += totalChain; + totalMissChainBySize[isMeta][log2Size] += totalMissChain; + totalMaxChainBySize[isMeta][log2Size] += maxChain; + totalMaxMissChainBySize[isMeta][log2Size] += maxMissChain; + if (maxChain > maxChainBySize[isMeta][log2Size]) + maxChainBySize[isMeta][log2Size] = maxChain; + if (maxMissChain > maxMissChainBySize[isMeta][log2Size]) + maxMissChainBySize[isMeta][log2Size] = maxMissChain; +#ifdef OBJC_INSTRUMENTED + { + CacheInstrumentation *cacheData; + + cacheData = CACHE_INSTRUMENTATION(cache); + hitCountBySize[isMeta][log2Size] += cacheData->hitCount; + hitProbesBySize[isMeta][log2Size] += cacheData->hitProbes; + if (cacheData->maxHitProbes > maxHitProbesBySize[isMeta][log2Size]) + maxHitProbesBySize[isMeta][log2Size] = cacheData->maxHitProbes; + missCountBySize[isMeta][log2Size] += cacheData->missCount; + missProbesBySize[isMeta][log2Size] += cacheData->missProbes; + if (cacheData->maxMissProbes > maxMissProbesBySize[isMeta][log2Size]) + maxMissProbesBySize[isMeta][log2Size] = cacheData->maxMissProbes; + flushCountBySize[isMeta][log2Size] += cacheData->flushCount; + flushedEntriesBySize[isMeta][log2Size] += cacheData->flushedEntries; + if (cacheData->maxFlushedEntries > maxFlushedEntriesBySize[isMeta][log2Size]) + maxFlushedEntriesBySize[isMeta][log2Size] = cacheData->maxFlushedEntries; + } +#endif + // Caches start with a power of two number of entries, and grow by doubling, so + // we can calculate the number of times this cache has expanded + cacheExpandCount += log2Size - INIT_CACHE_SIZE_LOG2; + } + } + + { + unsigned int cacheCountByType[2] = {0}; + unsigned int totalCacheCount = 0; + unsigned int totalEntries = 0; + unsigned int maxEntries = 0; + unsigned int totalSlots = 0; +#ifdef OBJC_INSTRUMENTED + unsigned int totalHitCount = 0; + unsigned int totalHitProbes = 0; + unsigned int maxHitProbes = 0; + unsigned int totalMissCount = 0; + unsigned int totalMissProbes = 0; + unsigned int maxMissProbes = 0; + unsigned int totalFlushCount = 0; + unsigned int totalFlushedEntries = 0; + unsigned int maxFlushedEntries = 0; +#endif + + totalChain = 0; + maxChain = 0; + totalMissChain = 0; + maxMissChain = 0; + + // Sum information over all caches + for (isMeta = 0; isMeta <= 1; isMeta += 1) + { + for (index = 0; index < MAX_LOG2_SIZE; index += 1) + { + cacheCountByType[isMeta] += cacheCountBySize[isMeta][index]; + totalEntries += totalEntriesBySize[isMeta][index]; + totalSlots += cacheCountBySize[isMeta][index] * (1 << index); + totalChain += totalChainBySize[isMeta][index]; + if (maxEntriesBySize[isMeta][index] > maxEntries) + maxEntries = maxEntriesBySize[isMeta][index]; + if (maxChainBySize[isMeta][index] > maxChain) + maxChain = maxChainBySize[isMeta][index]; + totalMissChain += totalMissChainBySize[isMeta][index]; + if (maxMissChainBySize[isMeta][index] > maxMissChain) + maxMissChain = maxMissChainBySize[isMeta][index]; +#ifdef OBJC_INSTRUMENTED + totalHitCount += hitCountBySize[isMeta][index]; + totalHitProbes += hitProbesBySize[isMeta][index]; + if (maxHitProbesBySize[isMeta][index] > maxHitProbes) + maxHitProbes = maxHitProbesBySize[isMeta][index]; + totalMissCount += missCountBySize[isMeta][index]; + totalMissProbes += missProbesBySize[isMeta][index]; + if (maxMissProbesBySize[isMeta][index] > maxMissProbes) + maxMissProbes = maxMissProbesBySize[isMeta][index]; + totalFlushCount += flushCountBySize[isMeta][index]; + totalFlushedEntries += flushedEntriesBySize[isMeta][index]; + if (maxFlushedEntriesBySize[isMeta][index] > maxFlushedEntries) + maxFlushedEntries = maxFlushedEntriesBySize[isMeta][index]; +#endif + } + + totalCacheCount += cacheCountByType[isMeta]; + } + + // Log our findings + printf ("There are %u classes\n", classCount); + + for (isMeta = 0; isMeta <= 1; isMeta += 1) + { + // Number of this type of class + printf ("\nThere are %u %s-method caches, broken down by size (slot count):\n", + cacheCountByType[isMeta], + isMeta ? "class" : "instance"); + + // Print header + PrintCacheHeader (); + + // Keep format consistent even if there are caches of this kind + if (cacheCountByType[isMeta] == 0) + { + printf ("(none)\n"); + continue; + } + + // Usage information by cache size + for (index = 0; index < MAX_LOG2_SIZE; index += 1) + { + unsigned int cacheCount; + unsigned int cacheSlotCount; + unsigned int cacheEntryCount; + + // Get number of caches of this type and size + cacheCount = cacheCountBySize[isMeta][index]; + if (cacheCount == 0) + continue; + + // Get the cache slot count and the total number of valid entries + cacheSlotCount = (1 << index); + cacheEntryCount = totalEntriesBySize[isMeta][index]; + + // Give the analysis + PrintCacheInfo (cacheSlotCount, + cacheCount, + cacheEntryCount, + (float) cacheEntryCount / (float) cacheCount, + maxEntriesBySize[isMeta][index], + (float) totalChainBySize[isMeta][index] / (float) cacheEntryCount, + maxChainBySize[isMeta][index], + (float) totalMissChainBySize[isMeta][index] / (float) (cacheCount * cacheSlotCount), + maxMissChainBySize[isMeta][index] +#ifdef OBJC_INSTRUMENTED + , hitCountBySize[isMeta][index], + hitCountBySize[isMeta][index] ? + (float) hitProbesBySize[isMeta][index] / (float) hitCountBySize[isMeta][index] : 0.0, + maxHitProbesBySize[isMeta][index], + missCountBySize[isMeta][index], + missCountBySize[isMeta][index] ? + (float) missProbesBySize[isMeta][index] / (float) missCountBySize[isMeta][index] : 0.0, + maxMissProbesBySize[isMeta][index], + flushCountBySize[isMeta][index], + flushCountBySize[isMeta][index] ? + (float) flushedEntriesBySize[isMeta][index] / (float) flushCountBySize[isMeta][index] : 0.0, + maxFlushedEntriesBySize[isMeta][index] +#endif + ); + } + } + + // Give overall numbers + printf ("\nCumulative:\n"); + PrintCacheHeader (); + PrintCacheInfo (totalSlots, + totalCacheCount, + totalEntries, + (float) totalEntries / (float) totalCacheCount, + maxEntries, + (float) totalChain / (float) totalEntries, + maxChain, + (float) totalMissChain / (float) totalSlots, + maxMissChain +#ifdef OBJC_INSTRUMENTED + , totalHitCount, + totalHitCount ? + (float) totalHitProbes / (float) totalHitCount : 0.0, + maxHitProbes, + totalMissCount, + totalMissCount ? + (float) totalMissProbes / (float) totalMissCount : 0.0, + maxMissProbes, + totalFlushCount, + totalFlushCount ? + (float) totalFlushedEntries / (float) totalFlushCount : 0.0, + maxFlushedEntries +#endif + ); + + printf ("\nNumber of \"forward::\" entries: %d\n", negativeEntryCount); + printf ("Number of cache expansions: %d\n", cacheExpandCount); +#ifdef OBJC_INSTRUMENTED + printf ("flush_caches: total calls total visits average visits max visits total classes visits/class\n"); + printf (" ----------- ------------ -------------- ---------- ------------- -------------\n"); + printf (" linear %11u %12u %14.1f %10u %13u %12.2f\n", + LinearFlushCachesCount, + LinearFlushCachesVisitedCount, + LinearFlushCachesCount ? + (float) LinearFlushCachesVisitedCount / (float) LinearFlushCachesCount : 0.0, + MaxLinearFlushCachesVisitedCount, + LinearFlushCachesVisitedCount, + 1.0); + printf (" nonlinear %11u %12u %14.1f %10u %13u %12.2f\n", + NonlinearFlushCachesCount, + NonlinearFlushCachesVisitedCount, + NonlinearFlushCachesCount ? + (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesCount : 0.0, + MaxNonlinearFlushCachesVisitedCount, + NonlinearFlushCachesClassCount, + NonlinearFlushCachesClassCount ? + (float) NonlinearFlushCachesVisitedCount / (float) NonlinearFlushCachesClassCount : 0.0); + printf (" ideal %11u %12u %14.1f %10u %13u %12.2f\n", + LinearFlushCachesCount + NonlinearFlushCachesCount, + IdealFlushCachesCount, + LinearFlushCachesCount + NonlinearFlushCachesCount ? + (float) IdealFlushCachesCount / (float) (LinearFlushCachesCount + NonlinearFlushCachesCount) : 0.0, + MaxIdealFlushCachesCount, + LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount, + LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount ? + (float) IdealFlushCachesCount / (float) (LinearFlushCachesVisitedCount + NonlinearFlushCachesClassCount) : 0.0); + + PrintCacheHistogram ("\nCache hit histogram:", &CacheHitHistogram[0], CACHE_HISTOGRAM_SIZE); + PrintCacheHistogram ("\nCache miss histogram:", &CacheMissHistogram[0], CACHE_HISTOGRAM_SIZE); +#endif + +#if 0 + printf ("\nLookup chains:"); + for (index = 0; index < MAX_CHAIN_SIZE; index += 1) + { + if (chainCount[index] != 0) + printf (" %u:%u", index, chainCount[index]); + } + + printf ("\nMiss chains:"); + for (index = 0; index < MAX_CHAIN_SIZE; index += 1) + { + if (missChainCount[index] != 0) + printf (" %u:%u", index, missChainCount[index]); + } + + printf ("\nTotal memory usage for cache data structures: %lu bytes\n", + totalCacheCount * (sizeof(struct objc_cache) - sizeof(cache_entry *)) + + totalSlots * sizeof(cache_entry *) + + negativeEntryCount * sizeof(cache_entry)); +#endif + } +} + +#endif + + +// !__OBJC2__ +#endif diff --git a/runtime/objc-cache.h b/runtime/objc-cache.h new file mode 100644 index 0000000..de348a0 --- /dev/null +++ b/runtime/objc-cache.h @@ -0,0 +1,21 @@ + +#ifndef _OBJC_CACHE_H +#define _OBJC_CACHE_H + +#include "objc-private.h" + +__BEGIN_DECLS + +extern IMP cache_getImp(Class cls, SEL sel); + +extern void cache_fill(Class cls, SEL sel, IMP imp, id receiver); + +extern void cache_erase_nolock(Class cls); + +extern void cache_delete(Class cls); + +extern void cache_collect(bool collectALot); + +__END_DECLS + +#endif diff --git a/runtime/objc-cache.mm b/runtime/objc-cache.mm new file mode 100644 index 0000000..a0c6545 --- /dev/null +++ b/runtime/objc-cache.mm @@ -0,0 +1,1111 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-cache.m +* Method cache management +* Cache flushing +* Cache garbage collection +* Cache instrumentation +* Dedicated allocator for large caches +**********************************************************************/ + + +/*********************************************************************** + * Method cache locking (GrP 2001-1-14) + * + * For speed, objc_msgSend does not acquire any locks when it reads + * method caches. Instead, all cache changes are performed so that any + * objc_msgSend running concurrently with the cache mutator will not + * crash or hang or get an incorrect result from the cache. + * + * When cache memory becomes unused (e.g. the old cache after cache + * expansion), it is not immediately freed, because a concurrent + * objc_msgSend could still be using it. Instead, the memory is + * disconnected from the data structures and placed on a garbage list. + * The memory is now only accessible to instances of objc_msgSend that + * were running when the memory was disconnected; any further calls to + * objc_msgSend will not see the garbage memory because the other data + * structures don't point to it anymore. The collecting_in_critical + * function checks the PC of all threads and returns FALSE when all threads + * are found to be outside objc_msgSend. This means any call to objc_msgSend + * that could have had access to the garbage has finished or moved past the + * cache lookup stage, so it is safe to free the memory. + * + * All functions that modify cache data or structures must acquire the + * cacheUpdateLock to prevent interference from concurrent modifications. + * The function that frees cache garbage must acquire the cacheUpdateLock + * and use collecting_in_critical() to flush out cache readers. + * The cacheUpdateLock is also used to protect the custom allocator used + * for large method cache blocks. + * + * Cache readers (PC-checked by collecting_in_critical()) + * objc_msgSend* + * cache_getImp + * + * Cache writers (hold cacheUpdateLock while reading or writing; not PC-checked) + * cache_fill (acquires lock) + * cache_expand (only called from cache_fill) + * cache_create (only called from cache_expand) + * bcopy (only called from instrumented cache_expand) + * flush_caches (acquires lock) + * cache_flush (only called from cache_fill and flush_caches) + * cache_collect_free (only called from cache_expand and cache_flush) + * + * UNPROTECTED cache readers (NOT thread-safe; used for debug info only) + * cache_print + * _class_printMethodCaches + * _class_printDuplicateCacheEntries + * _class_printMethodCacheStatistics + * + ***********************************************************************/ + + +#if __OBJC2__ + +#include "objc-private.h" +#include "objc-cache.h" + + +/* Initial cache bucket count. INIT_CACHE_SIZE must be a power of two. */ +enum { + INIT_CACHE_SIZE_LOG2 = 2, + INIT_CACHE_SIZE = (1 << INIT_CACHE_SIZE_LOG2) +}; + +static void cache_collect_free(struct bucket_t *data, mask_t capacity); +static int _collecting_in_critical(void); +static void _garbage_make_room(void); + + +/*********************************************************************** +* Cache statistics for OBJC_PRINT_CACHE_SETUP +**********************************************************************/ +static unsigned int cache_counts[16]; +static size_t cache_allocations; +static size_t cache_collections; + +static void recordNewCache(mask_t capacity) +{ + size_t bucket = log2u(capacity); + if (bucket < countof(cache_counts)) { + cache_counts[bucket]++; + } + cache_allocations++; +} + +static void recordDeadCache(mask_t capacity) +{ + size_t bucket = log2u(capacity); + if (bucket < countof(cache_counts)) { + cache_counts[bucket]--; + } +} + +/*********************************************************************** +* Pointers used by compiled class objects +* These use asm to avoid conflicts with the compiler's internal declarations +**********************************************************************/ + +// EMPTY_BYTES includes space for a cache end marker bucket. +// This end marker doesn't actually have the wrap-around pointer +// because cache scans always find an empty bucket before they might wrap. +// 1024 buckets is fairly common. +#if DEBUG + // Use a smaller size to exercise heap-allocated empty caches. +# define EMPTY_BYTES ((8+1)*16) +#else +# define EMPTY_BYTES ((1024+1)*16) +#endif + +#define stringize(x) #x +#define stringize2(x) stringize(x) + +// "cache" is cache->buckets; "vtable" is cache->mask/occupied +// hack to avoid conflicts with compiler's internal declaration +asm("\n .section __TEXT,__const" + "\n .globl __objc_empty_vtable" + "\n .set __objc_empty_vtable, 0" + "\n .globl __objc_empty_cache" + "\n .align 3" + "\n __objc_empty_cache: .space " stringize2(EMPTY_BYTES) + ); + + +#if __arm__ || __x86_64__ || __i386__ +// objc_msgSend has few registers available. +// Cache scan increments and wraps at special end-marking bucket. +#define CACHE_END_MARKER 1 +static inline mask_t cache_next(mask_t i, mask_t mask) { + return (i+1) & mask; +} + +#elif __arm64__ +// objc_msgSend has lots of registers available. +// Cache scan decrements. No end marker needed. +#define CACHE_END_MARKER 0 +static inline mask_t cache_next(mask_t i, mask_t mask) { + return i ? i-1 : mask; +} + +#else +#error unknown architecture +#endif + + +// copied from dispatch_atomic_maximally_synchronizing_barrier +// fixme verify that this barrier hack does in fact work here +#if __x86_64__ +#define mega_barrier() \ + do { unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "rbx", "rcx", "rdx", "cc", "memory" \ + ); } while(0) + +#elif __i386__ +#define mega_barrier() \ + do { unsigned long _clbr; __asm__ __volatile__( \ + "cpuid" \ + : "=a" (_clbr) : "0" (0) : "ebx", "ecx", "edx", "cc", "memory" \ + ); } while(0) + +#elif __arm__ || __arm64__ +#define mega_barrier() \ + __asm__ __volatile__( \ + "dsb ish" \ + : : : "memory") + +#else +#error unknown architecture +#endif + +#if __arm64__ + +// Use atomic double-word instructions to update cache entries. +// This requires cache buckets not cross cache line boundaries. +#define stp(onep, twop, destp) \ + __asm__ ("stp %[one], %[two], [%[dest]]" \ + : "=m" (((uint64_t *)(destp))[0]), \ + "=m" (((uint64_t *)(destp))[1]) \ + : [one] "r" (onep), \ + [two] "r" (twop), \ + [dest] "r" (destp) \ + : /* no clobbers */ \ + ) +#define ldp(onep, twop, srcp) \ + __asm__ ("ldp %[one], %[two], [%[src]]" \ + : [one] "=r" (onep), \ + [two] "=r" (twop) \ + : "m" (((uint64_t *)(srcp))[0]), \ + "m" (((uint64_t *)(srcp))[1]), \ + [src] "r" (srcp) \ + : /* no clobbers */ \ + ) + +#endif + + +// Class points to cache. SEL is key. Cache buckets store SEL+IMP. +// Caches are never built in the dyld shared cache. + +static inline mask_t cache_hash(cache_key_t key, mask_t mask) +{ + return (mask_t)(key & mask); +} + +cache_t *getCache(Class cls) +{ + assert(cls); + return &cls->cache; +} + +cache_key_t getKey(SEL sel) +{ + assert(sel); + return (cache_key_t)sel; +} + +#if __arm64__ + +void bucket_t::set(cache_key_t newKey, IMP newImp) +{ + assert(_key == 0 || _key == newKey); + + // LDP/STP guarantees that all observers get + // either key/imp or newKey/newImp + stp(newKey, newImp, this); +} + +#else + +void bucket_t::set(cache_key_t newKey, IMP newImp) +{ + assert(_key == 0 || _key == newKey); + + // objc_msgSend uses key and imp with no locks. + // It is safe for objc_msgSend to see new imp but NULL key + // (It will get a cache miss but not dispatch to the wrong place.) + // It is unsafe for objc_msgSend to see old imp and new key. + // Therefore we write new imp, wait a lot, then write new key. + + _imp = newImp; + + if (_key != newKey) { + mega_barrier(); + _key = newKey; + } +} + +#endif + +void cache_t::setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask) +{ + // objc_msgSend uses mask and buckets with no locks. + // It is safe for objc_msgSend to see new buckets but old mask. + // (It will get a cache miss but not overrun the buckets' bounds). + // It is unsafe for objc_msgSend to see old buckets and new mask. + // Therefore we write new buckets, wait a lot, then write new mask. + // objc_msgSend reads mask first, then buckets. + + // ensure other threads see buckets contents before buckets pointer + mega_barrier(); + + _buckets = newBuckets; + + // ensure other threads see new buckets before new mask + mega_barrier(); + + _mask = newMask; + _occupied = 0; +} + + +struct bucket_t *cache_t::buckets() +{ + return _buckets; +} + +mask_t cache_t::mask() +{ + return _mask; +} + +mask_t cache_t::occupied() +{ + return _occupied; +} + +void cache_t::incrementOccupied() +{ + _occupied++; +} + +void cache_t::initializeToEmpty() +{ + bzero(this, sizeof(*this)); + _buckets = (bucket_t *)&_objc_empty_cache; +} + + +mask_t cache_t::capacity() +{ + return mask() ? mask()+1 : 0; +} + + +#if CACHE_END_MARKER + +size_t cache_t::bytesForCapacity(uint32_t cap) +{ + // fixme put end marker inline when capacity+1 malloc is inefficient + return sizeof(bucket_t) * (cap + 1); +} + +bucket_t *cache_t::endMarker(struct bucket_t *b, uint32_t cap) +{ + // bytesForCapacity() chooses whether the end marker is inline or not + return (bucket_t *)((uintptr_t)b + bytesForCapacity(cap)) - 1; +} + +bucket_t *allocateBuckets(mask_t newCapacity) +{ + // Allocate one extra bucket to mark the end of the list. + // This can't overflow mask_t because newCapacity is a power of 2. + // fixme instead put the end mark inline when +1 is malloc-inefficient + bucket_t *newBuckets = (bucket_t *) + calloc(cache_t::bytesForCapacity(newCapacity), 1); + + bucket_t *end = cache_t::endMarker(newBuckets, newCapacity); + +#if __arm__ + // End marker's key is 1 and imp points BEFORE the first bucket. + // This saves an instruction in objc_msgSend. + end->setKey((cache_key_t)(uintptr_t)1); + end->setImp((IMP)(newBuckets - 1)); +#else + // End marker's key is 1 and imp points to the first bucket. + end->setKey((cache_key_t)(uintptr_t)1); + end->setImp((IMP)newBuckets); +#endif + + if (PrintCaches) recordNewCache(newCapacity); + + return newBuckets; +} + +#else + +size_t cache_t::bytesForCapacity(uint32_t cap) +{ + return sizeof(bucket_t) * cap; +} + +bucket_t *allocateBuckets(mask_t newCapacity) +{ + if (PrintCaches) recordNewCache(newCapacity); + + return (bucket_t *)calloc(cache_t::bytesForCapacity(newCapacity), 1); +} + +#endif + + +bucket_t *emptyBucketsForCapacity(mask_t capacity, bool allocate = true) +{ + cacheUpdateLock.assertLocked(); + + size_t bytes = cache_t::bytesForCapacity(capacity); + + // Use _objc_empty_cache if the buckets is small enough. + if (bytes <= EMPTY_BYTES) { + return (bucket_t *)&_objc_empty_cache; + } + + // Use shared empty buckets allocated on the heap. + static bucket_t **emptyBucketsList = nil; + static mask_t emptyBucketsListCount = 0; + + mask_t index = log2u(capacity); + + if (index >= emptyBucketsListCount) { + if (!allocate) return nil; + + mask_t newListCount = index + 1; + bucket_t *newBuckets = (bucket_t *)calloc(bytes, 1); + emptyBucketsList = (bucket_t**) + realloc(emptyBucketsList, newListCount * sizeof(bucket_t *)); + // Share newBuckets for every un-allocated size smaller than index. + // The array is therefore always fully populated. + for (mask_t i = emptyBucketsListCount; i < newListCount; i++) { + emptyBucketsList[i] = newBuckets; + } + emptyBucketsListCount = newListCount; + + if (PrintCaches) { + _objc_inform("CACHES: new empty buckets at %p (capacity %zu)", + newBuckets, (size_t)capacity); + } + } + + return emptyBucketsList[index]; +} + + +bool cache_t::isConstantEmptyCache() +{ + return + occupied() == 0 && + buckets() == emptyBucketsForCapacity(capacity(), false); +} + +bool cache_t::canBeFreed() +{ + return !isConstantEmptyCache(); +} + + +void cache_t::reallocate(mask_t oldCapacity, mask_t newCapacity) +{ + bool freeOld = canBeFreed(); + + bucket_t *oldBuckets = buckets(); + bucket_t *newBuckets = allocateBuckets(newCapacity); + + // Cache's old contents are not propagated. + // This is thought to save cache memory at the cost of extra cache fills. + // fixme re-measure this + + assert(newCapacity > 0); + assert((uintptr_t)(mask_t)(newCapacity-1) == newCapacity-1); + + setBucketsAndMask(newBuckets, newCapacity - 1); + + if (freeOld) { + cache_collect_free(oldBuckets, oldCapacity); + cache_collect(false); + } +} + + +void cache_t::bad_cache(id receiver, SEL sel, Class isa) +{ + // Log in separate steps in case the logging itself causes a crash. + _objc_inform_now_and_on_crash + ("Method cache corrupted. This may be a message to an " + "invalid object, or a memory error somewhere else."); + cache_t *cache = &isa->cache; + _objc_inform_now_and_on_crash + ("%s %p, SEL %p, isa %p, cache %p, buckets %p, " + "mask 0x%x, occupied 0x%x", + receiver ? "receiver" : "unused", receiver, + sel, isa, cache, cache->_buckets, + cache->_mask, cache->_occupied); + _objc_inform_now_and_on_crash + ("%s %zu bytes, buckets %zu bytes", + receiver ? "receiver" : "unused", malloc_size(receiver), + malloc_size(cache->_buckets)); + _objc_inform_now_and_on_crash + ("selector '%s'", sel_getName(sel)); + _objc_inform_now_and_on_crash + ("isa '%s'", isa->nameForLogging()); + _objc_fatal + ("Method cache corrupted. This may be a message to an " + "invalid object, or a memory error somewhere else."); +} + + +bucket_t * cache_t::find(cache_key_t k, id receiver) +{ + assert(k != 0); + + bucket_t *b = buckets(); + mask_t m = mask(); + mask_t begin = cache_hash(k, m); + mask_t i = begin; + do { + if (b[i].key() == 0 || b[i].key() == k) { + return &b[i]; + } + } while ((i = cache_next(i, m)) != begin); + + // hack + Class cls = (Class)((uintptr_t)this - offsetof(objc_class, cache)); + cache_t::bad_cache(receiver, (SEL)k, cls); +} + + +void cache_t::expand() +{ + cacheUpdateLock.assertLocked(); + + uint32_t oldCapacity = capacity(); + uint32_t newCapacity = oldCapacity ? oldCapacity*2 : INIT_CACHE_SIZE; + + if ((uint32_t)(mask_t)newCapacity != newCapacity) { + // mask overflow - can't grow further + // fixme this wastes one bit of mask + newCapacity = oldCapacity; + } + + reallocate(oldCapacity, newCapacity); +} + + +static void cache_fill_nolock(Class cls, SEL sel, IMP imp, id receiver) +{ + cacheUpdateLock.assertLocked(); + + // Never cache before +initialize is done + if (!cls->isInitialized()) return; + + // Make sure the entry wasn't added to the cache by some other thread + // before we grabbed the cacheUpdateLock. + if (cache_getImp(cls, sel)) return; + + cache_t *cache = getCache(cls); + cache_key_t key = getKey(sel); + + // Use the cache as-is if it is less than 3/4 full + mask_t newOccupied = cache->occupied() + 1; + mask_t capacity = cache->capacity(); + if (cache->isConstantEmptyCache()) { + // Cache is read-only. Replace it. + cache->reallocate(capacity, capacity ?: INIT_CACHE_SIZE); + } + else if (newOccupied <= capacity / 4 * 3) { + // Cache is less than 3/4 full. Use it as-is. + } + else { + // Cache is too full. Expand it. + cache->expand(); + } + + // Scan for the first unused slot and insert there. + // There is guaranteed to be an empty slot because the + // minimum size is 4 and we resized at 3/4 full. + bucket_t *bucket = cache->find(key, receiver); + if (bucket->key() == 0) cache->incrementOccupied(); + bucket->set(key, imp); +} + +void cache_fill(Class cls, SEL sel, IMP imp, id receiver) +{ +#if !DEBUG_TASK_THREADS + mutex_locker_t lock(cacheUpdateLock); + cache_fill_nolock(cls, sel, imp, receiver); +#else + _collecting_in_critical(); + return; +#endif +} + + +// Reset this entire cache to the uncached lookup by reallocating it. +// This must not shrink the cache - that breaks the lock-free scheme. +void cache_erase_nolock(Class cls) +{ + cacheUpdateLock.assertLocked(); + + cache_t *cache = getCache(cls); + + mask_t capacity = cache->capacity(); + if (capacity > 0 && cache->occupied() > 0) { + auto oldBuckets = cache->buckets(); + auto buckets = emptyBucketsForCapacity(capacity); + cache->setBucketsAndMask(buckets, capacity - 1); // also clears occupied + + cache_collect_free(oldBuckets, capacity); + cache_collect(false); + } +} + + +void cache_delete(Class cls) +{ + mutex_locker_t lock(cacheUpdateLock); + if (cls->cache.canBeFreed()) { + if (PrintCaches) recordDeadCache(cls->cache.capacity()); + free(cls->cache.buckets()); + } +} + + +/*********************************************************************** +* cache collection. +**********************************************************************/ + +#if !TARGET_OS_WIN32 + +// A sentinel (magic value) to report bad thread_get_state status. +// Must not be a valid PC. +// Must not be zero - thread_get_state() on a new thread returns PC == 0. +#define PC_SENTINEL 1 + +static uintptr_t _get_pc_for_thread(thread_t thread) +#if defined(__i386__) +{ + i386_thread_state_t state; + unsigned int count = i386_THREAD_STATE_COUNT; + kern_return_t okay = thread_get_state (thread, i386_THREAD_STATE, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__eip : PC_SENTINEL; +} +#elif defined(__x86_64__) +{ + x86_thread_state64_t state; + unsigned int count = x86_THREAD_STATE64_COUNT; + kern_return_t okay = thread_get_state (thread, x86_THREAD_STATE64, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__rip : PC_SENTINEL; +} +#elif defined(__arm__) +{ + arm_thread_state_t state; + unsigned int count = ARM_THREAD_STATE_COUNT; + kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL; +} +#elif defined(__arm64__) +{ + arm_thread_state64_t state; + unsigned int count = ARM_THREAD_STATE64_COUNT; + kern_return_t okay = thread_get_state (thread, ARM_THREAD_STATE64, (thread_state_t)&state, &count); + return (okay == KERN_SUCCESS) ? state.__pc : PC_SENTINEL; +} +#else +{ +#error _get_pc_for_thread () not implemented for this architecture +} +#endif + +#endif + +/*********************************************************************** +* _collecting_in_critical. +* Returns TRUE if some thread is currently executing a cache-reading +* function. Collection of cache garbage is not allowed when a cache- +* reading function is in progress because it might still be using +* the garbage memory. +**********************************************************************/ +OBJC_EXPORT uintptr_t objc_entryPoints[]; +OBJC_EXPORT uintptr_t objc_exitPoints[]; + +static int _collecting_in_critical(void) +{ +#if TARGET_OS_WIN32 + return TRUE; +#else + thread_act_port_array_t threads; + unsigned number; + unsigned count; + kern_return_t ret; + int result; + + mach_port_t mythread = pthread_mach_thread_np(pthread_self()); + + // Get a list of all the threads in the current task +#if !DEBUG_TASK_THREADS + ret = task_threads(mach_task_self(), &threads, &number); +#else + ret = objc_task_threads(mach_task_self(), &threads, &number); +#endif + + if (ret != KERN_SUCCESS) { + // See DEBUG_TASK_THREADS below to help debug this. + _objc_fatal("task_threads failed (result 0x%x)\n", ret); + } + + // Check whether any thread is in the cache lookup code + result = FALSE; + for (count = 0; count < number; count++) + { + int region; + uintptr_t pc; + + // Don't bother checking ourselves + if (threads[count] == mythread) + continue; + + // Find out where thread is executing + pc = _get_pc_for_thread (threads[count]); + + // Check for bad status, and if so, assume the worse (can't collect) + if (pc == PC_SENTINEL) + { + result = TRUE; + goto done; + } + + // Check whether it is in the cache lookup code + for (region = 0; objc_entryPoints[region] != 0; region++) + { + if ((pc >= objc_entryPoints[region]) && + (pc <= objc_exitPoints[region])) + { + result = TRUE; + goto done; + } + } + } + + done: + // Deallocate the port rights for the threads + for (count = 0; count < number; count++) { + mach_port_deallocate(mach_task_self (), threads[count]); + } + + // Deallocate the thread list + vm_deallocate (mach_task_self (), (vm_address_t) threads, sizeof(threads[0]) * number); + + // Return our finding + return result; +#endif +} + + +/*********************************************************************** +* _garbage_make_room. Ensure that there is enough room for at least +* one more ref in the garbage. +**********************************************************************/ + +// amount of memory represented by all refs in the garbage +static size_t garbage_byte_size = 0; + +// do not empty the garbage until garbage_byte_size gets at least this big +static size_t garbage_threshold = 32*1024; + +// table of refs to free +static bucket_t **garbage_refs = 0; + +// current number of refs in garbage_refs +static size_t garbage_count = 0; + +// capacity of current garbage_refs +static size_t garbage_max = 0; + +// capacity of initial garbage_refs +enum { + INIT_GARBAGE_COUNT = 128 +}; + +static void _garbage_make_room(void) +{ + static int first = 1; + + // Create the collection table the first time it is needed + if (first) + { + first = 0; + garbage_refs = (bucket_t**) + malloc(INIT_GARBAGE_COUNT * sizeof(void *)); + garbage_max = INIT_GARBAGE_COUNT; + } + + // Double the table if it is full + else if (garbage_count == garbage_max) + { + garbage_refs = (bucket_t**) + realloc(garbage_refs, garbage_max * 2 * sizeof(void *)); + garbage_max *= 2; + } +} + + +/*********************************************************************** +* cache_collect_free. Add the specified malloc'd memory to the list +* of them to free at some later point. +* size is used for the collection threshold. It does not have to be +* precisely the block's size. +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +static void cache_collect_free(bucket_t *data, mask_t capacity) +{ + cacheUpdateLock.assertLocked(); + + if (PrintCaches) recordDeadCache(capacity); + + _garbage_make_room (); + garbage_byte_size += cache_t::bytesForCapacity(capacity); + garbage_refs[garbage_count++] = data; +} + + +/*********************************************************************** +* cache_collect. Try to free accumulated dead caches. +* collectALot tries harder to free memory. +* Cache locks: cacheUpdateLock must be held by the caller. +**********************************************************************/ +void cache_collect(bool collectALot) +{ + cacheUpdateLock.assertLocked(); + + // Done if the garbage is not full + if (garbage_byte_size < garbage_threshold && !collectALot) { + return; + } + + // Synchronize collection with objc_msgSend and other cache readers + if (!collectALot) { + if (_collecting_in_critical ()) { + // objc_msgSend (or other cache reader) is currently looking in + // the cache and might still be using some garbage. + if (PrintCaches) { + _objc_inform ("CACHES: not collecting; " + "objc_msgSend in progress"); + } + return; + } + } + else { + // No excuses. + while (_collecting_in_critical()) + ; + } + + // No cache readers in progress - garbage is now deletable + + // Log our progress + if (PrintCaches) { + cache_collections++; + _objc_inform ("CACHES: COLLECTING %zu bytes (%zu allocations, %zu collections)", garbage_byte_size, cache_allocations, cache_collections); + } + + // Dispose all refs now in the garbage + // Erase each entry so debugging tools don't see stale pointers. + while (garbage_count--) { + auto dead = garbage_refs[garbage_count]; + garbage_refs[garbage_count] = nil; + free(dead); + } + + // Clear the garbage count and total size indicator + garbage_count = 0; + garbage_byte_size = 0; + + if (PrintCaches) { + size_t i; + size_t total_count = 0; + size_t total_size = 0; + + for (i = 0; i < countof(cache_counts); i++) { + int count = cache_counts[i]; + int slots = 1 << i; + size_t size = count * slots * sizeof(bucket_t); + + if (!count) continue; + + _objc_inform("CACHES: %4d slots: %4d caches, %6zu bytes", + slots, count, size); + + total_count += count; + total_size += size; + } + + _objc_inform("CACHES: total: %4zu caches, %6zu bytes", + total_count, total_size); + } +} + + +/*********************************************************************** +* objc_task_threads +* Replacement for task_threads(). Define DEBUG_TASK_THREADS to debug +* crashes when task_threads() is failing. +* +* A failure in task_threads() usually means somebody has botched their +* Mach or MIG traffic. For example, somebody's error handling was wrong +* and they left a message queued on the MIG reply port for task_threads() +* to trip over. +* +* The code below is a modified version of task_threads(). It logs +* the msgh_id of the reply message. The msgh_id can identify the sender +* of the message, which can help pinpoint the faulty code. +* DEBUG_TASK_THREADS also calls collecting_in_critical() during every +* message dispatch, which can increase reproducibility of bugs. +* +* This code can be regenerated by running +* `mig /usr/include/mach/task.defs`. +**********************************************************************/ +#if DEBUG_TASK_THREADS + +#include +#include +#include + +#define __MIG_check__Reply__task_subsystem__ 1 +#define mig_internal static inline +#define __DeclareSendRpc(a, b) +#define __BeforeSendRpc(a, b) +#define __AfterSendRpc(a, b) +#define msgh_request_port msgh_remote_port +#define msgh_reply_port msgh_local_port + +#ifndef __MachMsgErrorWithTimeout +#define __MachMsgErrorWithTimeout(_R_) { \ + switch (_R_) { \ + case MACH_SEND_INVALID_DATA: \ + case MACH_SEND_INVALID_DEST: \ + case MACH_SEND_INVALID_HEADER: \ + mig_put_reply_port(InP->Head.msgh_reply_port); \ + break; \ + case MACH_SEND_TIMED_OUT: \ + case MACH_RCV_TIMED_OUT: \ + default: \ + mig_dealloc_reply_port(InP->Head.msgh_reply_port); \ + } \ + } +#endif /* __MachMsgErrorWithTimeout */ + +#ifndef __MachMsgErrorWithoutTimeout +#define __MachMsgErrorWithoutTimeout(_R_) { \ + switch (_R_) { \ + case MACH_SEND_INVALID_DATA: \ + case MACH_SEND_INVALID_DEST: \ + case MACH_SEND_INVALID_HEADER: \ + mig_put_reply_port(InP->Head.msgh_reply_port); \ + break; \ + default: \ + mig_dealloc_reply_port(InP->Head.msgh_reply_port); \ + } \ + } +#endif /* __MachMsgErrorWithoutTimeout */ + + +#if ( __MigTypeCheck ) +#if __MIG_check__Reply__task_subsystem__ +#if !defined(__MIG_check__Reply__task_threads_t__defined) +#define __MIG_check__Reply__task_threads_t__defined + +mig_internal kern_return_t __MIG_check__Reply__task_threads_t(__Reply__task_threads_t *Out0P) +{ + + typedef __Reply__task_threads_t __Reply; + boolean_t msgh_simple; +#if __MigTypeCheck + unsigned int msgh_size; +#endif /* __MigTypeCheck */ + if (Out0P->Head.msgh_id != 3502) { + if (Out0P->Head.msgh_id == MACH_NOTIFY_SEND_ONCE) + { return MIG_SERVER_DIED; } + else + { return MIG_REPLY_MISMATCH; } + } + + msgh_simple = !(Out0P->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX); +#if __MigTypeCheck + msgh_size = Out0P->Head.msgh_size; + + if ((msgh_simple || Out0P->msgh_body.msgh_descriptor_count != 1 || + msgh_size != (mach_msg_size_t)sizeof(__Reply)) && + (!msgh_simple || msgh_size != (mach_msg_size_t)sizeof(mig_reply_error_t) || + ((mig_reply_error_t *)Out0P)->RetCode == KERN_SUCCESS)) + { return MIG_TYPE_ERROR ; } +#endif /* __MigTypeCheck */ + + if (msgh_simple) { + return ((mig_reply_error_t *)Out0P)->RetCode; + } + +#if __MigTypeCheck + if (Out0P->act_list.type != MACH_MSG_OOL_PORTS_DESCRIPTOR || + Out0P->act_list.disposition != 17) { + return MIG_TYPE_ERROR; + } +#endif /* __MigTypeCheck */ + + return MACH_MSG_SUCCESS; +} +#endif /* !defined(__MIG_check__Reply__task_threads_t__defined) */ +#endif /* __MIG_check__Reply__task_subsystem__ */ +#endif /* ( __MigTypeCheck ) */ + + +/* Routine task_threads */ +static kern_return_t objc_task_threads +( + task_t target_task, + thread_act_array_t *act_list, + mach_msg_type_number_t *act_listCnt +) +{ + +#ifdef __MigPackStructs +#pragma pack(4) +#endif + typedef struct { + mach_msg_header_t Head; + } Request; +#ifdef __MigPackStructs +#pragma pack() +#endif + +#ifdef __MigPackStructs +#pragma pack(4) +#endif + typedef struct { + mach_msg_header_t Head; + /* start of the kernel processed data */ + mach_msg_body_t msgh_body; + mach_msg_ool_ports_descriptor_t act_list; + /* end of the kernel processed data */ + NDR_record_t NDR; + mach_msg_type_number_t act_listCnt; + mach_msg_trailer_t trailer; + } Reply; +#ifdef __MigPackStructs +#pragma pack() +#endif + +#ifdef __MigPackStructs +#pragma pack(4) +#endif + typedef struct { + mach_msg_header_t Head; + /* start of the kernel processed data */ + mach_msg_body_t msgh_body; + mach_msg_ool_ports_descriptor_t act_list; + /* end of the kernel processed data */ + NDR_record_t NDR; + mach_msg_type_number_t act_listCnt; + } __Reply; +#ifdef __MigPackStructs +#pragma pack() +#endif + /* + * typedef struct { + * mach_msg_header_t Head; + * NDR_record_t NDR; + * kern_return_t RetCode; + * } mig_reply_error_t; + */ + + union { + Request In; + Reply Out; + } Mess; + + Request *InP = &Mess.In; + Reply *Out0P = &Mess.Out; + + mach_msg_return_t msg_result; + +#ifdef __MIG_check__Reply__task_threads_t__defined + kern_return_t check_result; +#endif /* __MIG_check__Reply__task_threads_t__defined */ + + __DeclareSendRpc(3402, "task_threads") + + InP->Head.msgh_bits = + MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE); + /* msgh_size passed as argument */ + InP->Head.msgh_request_port = target_task; + InP->Head.msgh_reply_port = mig_get_reply_port(); + InP->Head.msgh_id = 3402; + + __BeforeSendRpc(3402, "task_threads") + msg_result = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_reply_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); + __AfterSendRpc(3402, "task_threads") + if (msg_result != MACH_MSG_SUCCESS) { + _objc_inform("task_threads received unexpected reply msgh_id 0x%zx", + (size_t)Out0P->Head.msgh_id); + __MachMsgErrorWithoutTimeout(msg_result); + { return msg_result; } + } + + +#if defined(__MIG_check__Reply__task_threads_t__defined) + check_result = __MIG_check__Reply__task_threads_t((__Reply__task_threads_t *)Out0P); + if (check_result != MACH_MSG_SUCCESS) + { return check_result; } +#endif /* defined(__MIG_check__Reply__task_threads_t__defined) */ + + *act_list = (thread_act_array_t)(Out0P->act_list.address); + *act_listCnt = Out0P->act_listCnt; + + return KERN_SUCCESS; +} + +// DEBUG_TASK_THREADS +#endif + + +// __OBJC2__ +#endif diff --git a/runtime/objc-class-old.mm b/runtime/objc-class-old.mm new file mode 100644 index 0000000..f62bdee --- /dev/null +++ b/runtime/objc-class-old.mm @@ -0,0 +1,2528 @@ +/* + * Copyright (c) 1999-2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-class-old.m +* Support for old-ABI classes, methods, and categories. +**********************************************************************/ + +#if !__OBJC2__ + +#include "objc-private.h" +#include "objc-runtime-old.h" +#include "objc-file-old.h" +#include "objc-cache-old.h" + +static Method _class_getMethod(Class cls, SEL sel); +static Method _class_getMethodNoSuper(Class cls, SEL sel); +static Method _class_getMethodNoSuper_nolock(Class cls, SEL sel); +static void flush_caches(Class cls, bool flush_meta); + + +// Freed objects have their isa set to point to this dummy class. +// This avoids the need to check for Nil classes in the messenger. +static const void* freedObjectClass[12] = +{ + Nil, // isa + Nil, // superclass + "FREED(id)", // name + 0, // version + 0, // info + 0, // instance_size + nil, // ivars + nil, // methodLists + (Cache) &_objc_empty_cache, // cache + nil, // protocols + nil, // ivar_layout; + nil // ext +}; + + +/*********************************************************************** +* _class_getFreedObjectClass. Return a pointer to the dummy freed +* object class. Freed objects get their isa pointers replaced with +* a pointer to the freedObjectClass, so that we can catch usages of +* the freed object. +**********************************************************************/ +static Class _class_getFreedObjectClass(void) +{ + return (Class)freedObjectClass; +} + + +/*********************************************************************** +* _objc_getFreedObjectClass. Return a pointer to the dummy freed +* object class. Freed objects get their isa pointers replaced with +* a pointer to the freedObjectClass, so that we can catch usages of +* the freed object. +**********************************************************************/ +Class _objc_getFreedObjectClass(void) +{ + return _class_getFreedObjectClass(); +} + + +static void allocateExt(Class cls) +{ + if (! (cls->info & CLS_EXT)) { + _objc_inform("class '%s' needs to be recompiled", cls->name); + return; + } + if (!cls->ext) { + uint32_t size = (uint32_t)sizeof(old_class_ext); + cls->ext = (old_class_ext *)calloc(size, 1); + cls->ext->size = size; + } +} + + +static inline old_method *_findNamedMethodInList(old_method_list * mlist, const char *meth_name) { + int i; + if (!mlist) return nil; + for (i = 0; i < mlist->method_count; i++) { + old_method *m = &mlist->method_list[i]; + if (0 == strcmp((const char *)(m->method_name), meth_name)) { + return m; + } + } + return nil; +} + + +/*********************************************************************** +* Method list fixup markers. +* mlist->obsolete == fixed_up_method_list marks method lists with real SELs +* versus method lists with un-uniqued char*. +* PREOPTIMIZED VERSION: +* Fixed-up method lists get mlist->obsolete == OBJC_FIXED_UP +* dyld shared cache sets this for method lists it preoptimizes. +* UN-PREOPTIMIZED VERSION +* Fixed-up method lists get mlist->obsolete == OBJC_FIXED_UP_outside_dyld +* dyld shared cache uses OBJC_FIXED_UP, but those aren't trusted. +**********************************************************************/ +#define OBJC_FIXED_UP ((void *)1771) +#define OBJC_FIXED_UP_outside_dyld ((void *)1773) +static void *fixed_up_method_list = OBJC_FIXED_UP; + +// sel_init() decided that selectors in the dyld shared cache are untrustworthy +void disableSharedCacheOptimizations(void) +{ + fixed_up_method_list = OBJC_FIXED_UP_outside_dyld; +} + +/*********************************************************************** +* fixupSelectorsInMethodList +* Uniques selectors in the given method list. +* The given method list must be non-nil and not already fixed-up. +* If the class was loaded from a bundle: +* fixes up the given list in place with heap-allocated selector strings +* If the class was not from a bundle: +* allocates a copy of the method list, fixes up the copy, and returns +* the copy. The given list is unmodified. +* +* If cls is already in use, methodListLock must be held by the caller. +**********************************************************************/ +static old_method_list *fixupSelectorsInMethodList(Class cls, old_method_list *mlist) +{ + int i; + size_t size; + old_method *method; + old_method_list *old_mlist; + + if ( ! mlist ) return nil; + if ( mlist->obsolete == fixed_up_method_list ) { + // method list OK + } else { + bool isBundle = cls->info & CLS_FROM_BUNDLE; + if (!isBundle) { + old_mlist = mlist; + size = sizeof(old_method_list) - sizeof(old_method) + old_mlist->method_count * sizeof(old_method); + mlist = (old_method_list *)malloc(size); + memmove(mlist, old_mlist, size); + } else { + // Mach-O bundles are fixed up in place. + // This prevents leaks when a bundle is unloaded. + } + sel_lock(); + for ( i = 0; i < mlist->method_count; i += 1 ) { + method = &mlist->method_list[i]; + method->method_name = + sel_registerNameNoLock((const char *)method->method_name, isBundle); // Always copy selector data from bundles. + } + sel_unlock(); + mlist->obsolete = fixed_up_method_list; + } + return mlist; +} + + +/*********************************************************************** +* nextMethodList +* Returns successive method lists from the given class. +* Method lists are returned in method search order (i.e. highest-priority +* implementations first). +* All necessary method list fixups are performed, so the +* returned method list is fully-constructed. +* +* If cls is already in use, methodListLock must be held by the caller. +* For full thread-safety, methodListLock must be continuously held by the +* caller across all calls to nextMethodList(). If the lock is released, +* the bad results listed in class_nextMethodList() may occur. +* +* void *iterator = nil; +* old_method_list *mlist; +* mutex_locker_t lock(methodListLock); +* while ((mlist = nextMethodList(cls, &iterator))) { +* // do something with mlist +* } +**********************************************************************/ +static old_method_list *nextMethodList(Class cls, + void **it) +{ + uintptr_t index = *(uintptr_t *)it; + old_method_list **resultp; + + if (index == 0) { + // First call to nextMethodList. + if (!cls->methodLists) { + resultp = nil; + } else if (cls->info & CLS_NO_METHOD_ARRAY) { + resultp = (old_method_list **)&cls->methodLists; + } else { + resultp = &cls->methodLists[0]; + if (!*resultp || *resultp == END_OF_METHODS_LIST) { + resultp = nil; + } + } + } else { + // Subsequent call to nextMethodList. + if (!cls->methodLists) { + resultp = nil; + } else if (cls->info & CLS_NO_METHOD_ARRAY) { + resultp = nil; + } else { + resultp = &cls->methodLists[index]; + if (!*resultp || *resultp == END_OF_METHODS_LIST) { + resultp = nil; + } + } + } + + // resultp now is nil, meaning there are no more method lists, + // OR the address of the method list pointer to fix up and return. + + if (resultp) { + if (*resultp) { + *resultp = fixupSelectorsInMethodList(cls, *resultp); + } + *it = (void *)(index + 1); + return *resultp; + } else { + *it = 0; + return nil; + } +} + + +/* These next three functions are the heart of ObjC method lookup. + * If the class is currently in use, methodListLock must be held by the caller. + */ +static inline old_method *_findMethodInList(old_method_list * mlist, SEL sel) { + int i; + if (!mlist) return nil; + for (i = 0; i < mlist->method_count; i++) { + old_method *m = &mlist->method_list[i]; + if (m->method_name == sel) { + return m; + } + } + return nil; +} + +static inline old_method * _findMethodInClass(Class cls, SEL sel) __attribute__((always_inline)); +static inline old_method * _findMethodInClass(Class cls, SEL sel) { + // Flattened version of nextMethodList(). The optimizer doesn't + // do a good job with hoisting the conditionals out of the loop. + // Conceptually, this looks like: + // while ((mlist = nextMethodList(cls, &iterator))) { + // old_method *m = _findMethodInList(mlist, sel); + // if (m) return m; + // } + + if (!cls->methodLists) { + // No method lists. + return nil; + } + else if (cls->info & CLS_NO_METHOD_ARRAY) { + // One method list. + old_method_list **mlistp; + mlistp = (old_method_list **)&cls->methodLists; + *mlistp = fixupSelectorsInMethodList(cls, *mlistp); + return _findMethodInList(*mlistp, sel); + } + else { + // Multiple method lists. + old_method_list **mlistp; + for (mlistp = cls->methodLists; + *mlistp != nil && *mlistp != END_OF_METHODS_LIST; + mlistp++) + { + old_method *m; + *mlistp = fixupSelectorsInMethodList(cls, *mlistp); + m = _findMethodInList(*mlistp, sel); + if (m) return m; + } + return nil; + } +} + +static inline old_method * _getMethod(Class cls, SEL sel) { + for (; cls; cls = cls->superclass) { + old_method *m; + m = _findMethodInClass(cls, sel); + if (m) return m; + } + return nil; +} + + +// called by a debugging check in _objc_insertMethods +IMP findIMPInClass(Class cls, SEL sel) +{ + old_method *m = _findMethodInClass(cls, sel); + if (m) return m->method_imp; + else return nil; +} + + +/*********************************************************************** +* _freedHandler. +**********************************************************************/ +static void _freedHandler(id obj, SEL sel) +{ + __objc_error (obj, "message %s sent to freed object=%p", + sel_getName(sel), (void*)obj); +} + + +/*********************************************************************** +* log_and_fill_cache +* Log this method call. If the logger permits it, fill the method cache. +* cls is the method whose cache should be filled. +* implementer is the class that owns the implementation in question. +**********************************************************************/ +static void +log_and_fill_cache(Class cls, Class implementer, Method meth, SEL sel) +{ +#if SUPPORT_MESSAGE_LOGGING + if (objcMsgLogEnabled) { + bool cacheIt = logMessageSend(implementer->isMetaClass(), + cls->nameForLogging(), + implementer->nameForLogging(), + sel); + if (!cacheIt) return; + } +#endif + _cache_fill (cls, meth, sel); +} + + +/*********************************************************************** +* _class_lookupMethodAndLoadCache. +* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpImp(). +* This lookup avoids optimistic cache scan because the dispatcher +* already tried that. +**********************************************************************/ +IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls) +{ + return lookUpImpOrForward(cls, sel, obj, + YES/*initialize*/, NO/*cache*/, YES/*resolver*/); +} + + +/*********************************************************************** +* lookUpImpOrForward. +* The standard IMP lookup. +* initialize==NO tries to avoid +initialize (but sometimes fails) +* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere) +* Most callers should use initialize==YES and cache==YES. +* inst is an instance of cls or a subclass thereof, or nil if none is known. +* If cls is an un-initialized metaclass then a non-nil inst is faster. +* May return _objc_msgForward_impcache. IMPs destined for external use +* must be converted to _objc_msgForward or _objc_msgForward_stret. +* If you don't want forwarding at all, use lookUpImpOrNil() instead. +**********************************************************************/ +IMP lookUpImpOrForward(Class cls, SEL sel, id inst, + bool initialize, bool cache, bool resolver) +{ + Class curClass; + IMP methodPC = nil; + Method meth; + bool triedResolver = NO; + + methodListLock.assertUnlocked(); + + // Optimistic cache lookup + if (cache) { + methodPC = _cache_getImp(cls, sel); + if (methodPC) return methodPC; + } + + // Check for freed class + if (cls == _class_getFreedObjectClass()) + return (IMP) _freedHandler; + + // Check for +initialize + if (initialize && !cls->isInitialized()) { + _class_initialize (_class_getNonMetaClass(cls, inst)); + // If sel == initialize, _class_initialize will send +initialize and + // then the messenger will send +initialize again after this + // procedure finishes. Of course, if this is not being called + // from the messenger then it won't happen. 2778172 + } + + // The lock is held to make method-lookup + cache-fill atomic + // with respect to method addition. Otherwise, a category could + // be added but ignored indefinitely because the cache was re-filled + // with the old value after the cache flush on behalf of the category. + retry: + methodListLock.lock(); + + // Try this class's cache. + + methodPC = _cache_getImp(cls, sel); + if (methodPC) goto done; + + // Try this class's method lists. + + meth = _class_getMethodNoSuper_nolock(cls, sel); + if (meth) { + log_and_fill_cache(cls, cls, meth, sel); + methodPC = method_getImplementation(meth); + goto done; + } + + // Try superclass caches and method lists. + + curClass = cls; + while ((curClass = curClass->superclass)) { + // Superclass cache. + meth = _cache_getMethod(curClass, sel, _objc_msgForward_impcache); + if (meth) { + if (meth != (Method)1) { + // Found the method in a superclass. Cache it in this class. + log_and_fill_cache(cls, curClass, meth, sel); + methodPC = method_getImplementation(meth); + goto done; + } + else { + // Found a forward:: entry in a superclass. + // Stop searching, but don't cache yet; call method + // resolver for this class first. + break; + } + } + + // Superclass method list. + meth = _class_getMethodNoSuper_nolock(curClass, sel); + if (meth) { + log_and_fill_cache(cls, curClass, meth, sel); + methodPC = method_getImplementation(meth); + goto done; + } + } + + // No implementation found. Try method resolver once. + + if (resolver && !triedResolver) { + methodListLock.unlock(); + _class_resolveMethod(cls, sel, inst); + triedResolver = YES; + goto retry; + } + + // No implementation found, and method resolver didn't help. + // Use forwarding. + + _cache_addForwardEntry(cls, sel); + methodPC = _objc_msgForward_impcache; + + done: + methodListLock.unlock(); + + return methodPC; +} + + +/*********************************************************************** +* lookUpImpOrNil. +* Like lookUpImpOrForward, but returns nil instead of _objc_msgForward_impcache +**********************************************************************/ +IMP lookUpImpOrNil(Class cls, SEL sel, id inst, + bool initialize, bool cache, bool resolver) +{ + IMP imp = lookUpImpOrForward(cls, sel, inst, initialize, cache, resolver); + if (imp == _objc_msgForward_impcache) return nil; + else return imp; +} + + +/*********************************************************************** +* lookupMethodInClassAndLoadCache. +* Like _class_lookupMethodAndLoadCache, but does not search superclasses. +* Caches and returns objc_msgForward if the method is not found in the class. +**********************************************************************/ +IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel) +{ + Method meth; + IMP imp; + + // fixme this still has the method list vs method cache race + // because it doesn't hold a lock across lookup+cache_fill, + // but it's only used for .cxx_construct/destruct and we assume + // categories don't change them. + + // Search cache first. + imp = _cache_getImp(cls, sel); + if (imp) return imp; + + // Cache miss. Search method list. + + meth = _class_getMethodNoSuper(cls, sel); + + if (meth) { + // Hit in method list. Cache it. + _cache_fill(cls, meth, sel); + return method_getImplementation(meth); + } else { + // Miss in method list. Cache objc_msgForward. + _cache_addForwardEntry(cls, sel); + return _objc_msgForward_impcache; + } +} + + +/*********************************************************************** +* _class_getClassForIvar +* Given a class and an ivar that is in it or one of its superclasses, +* find the actual class that defined the ivar. +**********************************************************************/ +Class _class_getClassForIvar(Class cls, Ivar ivar) +{ + for ( ; cls; cls = cls->superclass) { + if (auto ivars = cls->ivars) { + if (ivar >= &ivars->ivar_list[0] && + ivar < &ivars->ivar_list[ivars->ivar_count]) + { + return cls; + } + } + } + + return nil; +} + + +/*********************************************************************** +* class_getVariable. Return the named instance variable. +**********************************************************************/ + +Ivar _class_getVariable(Class cls, const char *name) +{ + for (; cls != Nil; cls = cls->superclass) { + int i; + + // Skip class having no ivars + if (!cls->ivars) continue; + + for (i = 0; i < cls->ivars->ivar_count; i++) { + // Check this ivar's name. Be careful because the + // compiler generates ivar entries with nil ivar_name + // (e.g. for anonymous bit fields). + old_ivar *ivar = &cls->ivars->ivar_list[i]; + if (ivar->ivar_name && 0 == strcmp(name, ivar->ivar_name)) { + return (Ivar)ivar; + } + } + } + + // Not found + return nil; +} + + +old_property * +property_list_nth(const old_property_list *plist, uint32_t i) +{ + return (old_property *)(i*plist->entsize + (char *)&plist->first); +} + +old_property ** +copyPropertyList(old_property_list *plist, unsigned int *outCount) +{ + old_property **result = nil; + unsigned int count = 0; + + if (plist) { + count = plist->count; + } + + if (count > 0) { + unsigned int i; + result = (old_property **)malloc((count+1) * sizeof(old_property *)); + + for (i = 0; i < count; i++) { + result[i] = property_list_nth(plist, i); + } + result[i] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +static old_property_list * +nextPropertyList(Class cls, uintptr_t *indexp) +{ + old_property_list *result = nil; + + classLock.assertLocked(); + if (! ((cls->info & CLS_EXT) && cls->ext)) { + // No class ext + result = nil; + } else if (!cls->ext->propertyLists) { + // No property lists + result = nil; + } else if (cls->info & CLS_NO_PROPERTY_ARRAY) { + // Only one property list + if (*indexp == 0) { + result = (old_property_list *)cls->ext->propertyLists; + } else { + result = nil; + } + } else { + // More than one property list + result = cls->ext->propertyLists[*indexp]; + } + + if (result) { + ++*indexp; + return result; + } else { + *indexp = 0; + return nil; + } +} + + +/*********************************************************************** +* class_getIvarLayout +* nil means all-scanned. "" means non-scanned. +**********************************************************************/ +const uint8_t * +class_getIvarLayout(Class cls) +{ + if (cls && (cls->info & CLS_EXT)) { + return cls->ivar_layout; + } else { + return nil; // conservative scan + } +} + + +/*********************************************************************** +* class_getWeakIvarLayout +* nil means no weak ivars. +**********************************************************************/ +const uint8_t * +class_getWeakIvarLayout(Class cls) +{ + if (cls && (cls->info & CLS_EXT) && cls->ext) { + return cls->ext->weak_ivar_layout; + } else { + return nil; // no weak ivars + } +} + + +/*********************************************************************** +* class_setIvarLayout +* nil means all-scanned. "" means non-scanned. +**********************************************************************/ +void class_setIvarLayout(Class cls, const uint8_t *layout) +{ + if (!cls) return; + + if (! (cls->info & CLS_EXT)) { + _objc_inform("class '%s' needs to be recompiled", cls->name); + return; + } + + // fixme leak + cls->ivar_layout = ustrdupMaybeNil(layout); +} + +// SPI: Instance-specific object layout. + +void _class_setIvarLayoutAccessor(Class cls, const uint8_t* (*accessor) (id object)) { + if (!cls) return; + + if (! (cls->info & CLS_EXT)) { + _objc_inform("class '%s' needs to be recompiled", cls->name); + return; + } + + // fixme leak + cls->ivar_layout = (const uint8_t *)accessor; + cls->setInfo(CLS_HAS_INSTANCE_SPECIFIC_LAYOUT); +} + +const uint8_t *_object_getIvarLayout(Class cls, id object) { + if (cls && (cls->info & CLS_EXT)) { + const uint8_t* layout = cls->ivar_layout; + if (cls->info & CLS_HAS_INSTANCE_SPECIFIC_LAYOUT) { + const uint8_t* (*accessor) (id object) = (const uint8_t* (*)(id))layout; + layout = accessor(object); + } + return layout; + } else { + return nil; + } +} + +/*********************************************************************** +* class_setWeakIvarLayout +* nil means no weak ivars. +**********************************************************************/ +void class_setWeakIvarLayout(Class cls, const uint8_t *layout) +{ + if (!cls) return; + + mutex_locker_t lock(classLock); + + allocateExt(cls); + + // fixme leak + cls->ext->weak_ivar_layout = ustrdupMaybeNil(layout); +} + + +/*********************************************************************** +* class_setVersion. Record the specified version with the class. +**********************************************************************/ +void class_setVersion(Class cls, int version) +{ + if (!cls) return; + cls->version = version; +} + +/*********************************************************************** +* class_getVersion. Return the version recorded with the class. +**********************************************************************/ +int class_getVersion(Class cls) +{ + if (!cls) return 0; + return (int)cls->version; +} + + +/*********************************************************************** +* class_getName. +**********************************************************************/ +const char *class_getName(Class cls) +{ + if (!cls) return "nil"; + else return cls->demangledName(); +} + + +/*********************************************************************** +* _class_getNonMetaClass. +* Return the ordinary class for this class or metaclass. +* Used by +initialize. +**********************************************************************/ +Class _class_getNonMetaClass(Class cls, id obj) +{ + // fixme ick + if (cls->isMetaClass()) { + if (cls->info & CLS_CONSTRUCTING) { + // Class is under construction and isn't in the class_hash, + // so objc_getClass doesn't work. + cls = obj; // fixme this may be nil in some paths + } + else if (strncmp(cls->name, "_%", 2) == 0) { + // Posee's meta's name is smashed and isn't in the class_hash, + // so objc_getClass doesn't work. + const char *baseName = strchr(cls->name, '%'); // get posee's real name + cls = objc_getClass(baseName); + } + else { + cls = objc_getClass(cls->name); + } + assert(cls); + } + + return cls; +} + + +Cache _class_getCache(Class cls) +{ + return cls->cache; +} + +void _class_setCache(Class cls, Cache cache) +{ + cls->cache = cache; +} + +const char *_category_getName(Category cat) +{ + return oldcategory(cat)->category_name; +} + +const char *_category_getClassName(Category cat) +{ + return oldcategory(cat)->class_name; +} + +Class _category_getClass(Category cat) +{ + return objc_getClass(oldcategory(cat)->class_name); +} + +IMP _category_getLoadMethod(Category cat) +{ + old_method_list *mlist = oldcategory(cat)->class_methods; + if (mlist) { + return lookupNamedMethodInMethodList(mlist, "load"); + } else { + return nil; + } +} + + + +/*********************************************************************** +* class_nextMethodList. +* External version of nextMethodList(). +* +* This function is not fully thread-safe. A series of calls to +* class_nextMethodList() may fail if methods are added to or removed +* from the class between calls. +* If methods are added between calls to class_nextMethodList(), it may +* return previously-returned method lists again, and may fail to return +* newly-added lists. +* If methods are removed between calls to class_nextMethodList(), it may +* omit surviving method lists or simply crash. +**********************************************************************/ +OBJC_EXPORT struct objc_method_list *class_nextMethodList(Class cls, void **it) +{ + OBJC_WARN_DEPRECATED; + + mutex_locker_t lock(methodListLock); + return (struct objc_method_list *) nextMethodList(cls, it); +} + + +/*********************************************************************** +* class_addMethods. +* +* Formerly class_addInstanceMethods () +**********************************************************************/ +OBJC_EXPORT void class_addMethods(Class cls, struct objc_method_list *meths) +{ + OBJC_WARN_DEPRECATED; + + // Add the methods. + { + mutex_locker_t lock(methodListLock); + _objc_insertMethods(cls, (old_method_list *)meths, nil); + } + + // Must flush when dynamically adding methods. No need to flush + // all the class method caches. If cls is a meta class, though, + // this will still flush it and any of its sub-meta classes. + flush_caches (cls, NO); +} + + +/*********************************************************************** +* class_removeMethods. +**********************************************************************/ +OBJC_EXPORT void class_removeMethods(Class cls, struct objc_method_list *meths) +{ + OBJC_WARN_DEPRECATED; + + // Remove the methods + { + mutex_locker_t lock(methodListLock); + _objc_removeMethods(cls, (old_method_list *)meths); + } + + // Must flush when dynamically removing methods. No need to flush + // all the class method caches. If cls is a meta class, though, + // this will still flush it and any of its sub-meta classes. + flush_caches (cls, NO); +} + +/*********************************************************************** +* lookupNamedMethodInMethodList +* Only called to find +load/-.cxx_construct/-.cxx_destruct methods, +* without fixing up the entire method list. +* The class is not yet in use, so methodListLock is not taken. +**********************************************************************/ +IMP lookupNamedMethodInMethodList(old_method_list *mlist, const char *meth_name) +{ + old_method *m; + m = meth_name ? _findNamedMethodInList(mlist, meth_name) : nil; + return (m ? m->method_imp : nil); +} + +static Method _class_getMethod(Class cls, SEL sel) +{ + mutex_locker_t lock(methodListLock); + return (Method)_getMethod(cls, sel); +} + +static Method _class_getMethodNoSuper(Class cls, SEL sel) +{ + mutex_locker_t lock(methodListLock); + return (Method)_findMethodInClass(cls, sel); +} + +static Method _class_getMethodNoSuper_nolock(Class cls, SEL sel) +{ + methodListLock.assertLocked(); + return (Method)_findMethodInClass(cls, sel); +} + + +/*********************************************************************** +* class_getInstanceMethod. Return the instance method for the +* specified class and selector. +**********************************************************************/ +Method class_getInstanceMethod(Class cls, SEL sel) +{ + if (!cls || !sel) return nil; + + // This deliberately avoids +initialize because it historically did so. + + // This implementation is a bit weird because it's the only place that + // wants a Method instead of an IMP. + + Method meth; + meth = _cache_getMethod(cls, sel, _objc_msgForward_impcache); + if (meth == (Method)1) { + // Cache contains forward:: . Stop searching. + return nil; + } else if (meth) { + return meth; + } + + // Search method lists, try method resolver, etc. + lookUpImpOrNil(cls, sel, nil, + NO/*initialize*/, NO/*cache*/, YES/*resolver*/); + + meth = _cache_getMethod(cls, sel, _objc_msgForward_impcache); + if (meth == (Method)1) { + // Cache contains forward:: . Stop searching. + return nil; + } else if (meth) { + return meth; + } + + return _class_getMethod(cls, sel); +} + + +BOOL class_conformsToProtocol(Class cls, Protocol *proto_gen) +{ + old_protocol *proto = oldprotocol(proto_gen); + + if (!cls) return NO; + if (!proto) return NO; + + if (cls->ISA()->version >= 3) { + old_protocol_list *list; + for (list = cls->protocols; list != nil; list = list->next) { + int i; + for (i = 0; i < list->count; i++) { + if (list->list[i] == proto) return YES; + if (protocol_conformsToProtocol((Protocol *)list->list[i], proto_gen)) return YES; + } + if (cls->ISA()->version <= 4) break; + } + } + return NO; +} + + +static NXMapTable * posed_class_hash = nil; + +/*********************************************************************** +* objc_getOrigClass. +**********************************************************************/ +extern "C" +Class _objc_getOrigClass(const char *name) +{ + // Look for class among the posers + { + mutex_locker_t lock(classLock); + if (posed_class_hash) { + Class cls = (Class) NXMapGet (posed_class_hash, name); + if (cls) return cls; + } + } + + // Not a poser. Do a normal lookup. + Class cls = objc_getClass (name); + if (cls) return cls; + + _objc_inform ("class `%s' not linked into application", name); + return nil; +} + +Class objc_getOrigClass(const char *name) +{ + OBJC_WARN_DEPRECATED; + return _objc_getOrigClass(name); +} + +/*********************************************************************** +* _objc_addOrigClass. This function is only used from class_poseAs. +* Registers the original class names, before they get obscured by +* posing, so that [super ..] will work correctly from categories +* in posing classes and in categories in classes being posed for. +**********************************************************************/ +static void _objc_addOrigClass (Class origClass) +{ + mutex_locker_t lock(classLock); + + // Create the poser's hash table on first use + if (!posed_class_hash) + { + posed_class_hash = NXCreateMapTable(NXStrValueMapPrototype, 8); + } + + // Add the named class iff it is not already there (or collides?) + if (NXMapGet (posed_class_hash, origClass->name) == 0) + NXMapInsert (posed_class_hash, origClass->name, origClass); +} + + +/*********************************************************************** +* change_class_references +* Change classrefs and superclass pointers from original to imposter +* But if copy!=nil, don't change copy->superclass. +* If changeSuperRefs==YES, also change [super message] classrefs. +* Used by class_poseAs and objc_setFutureClass +* classLock must be locked. +**********************************************************************/ +void change_class_references(Class imposter, + Class original, + Class copy, + bool changeSuperRefs) +{ + header_info *hInfo; + Class clsObject; + NXHashState state; + + // Change all subclasses of the original to point to the imposter. + state = NXInitHashState (class_hash); + while (NXNextHashState (class_hash, &state, (void **) &clsObject)) + { + while ((clsObject) && (clsObject != imposter) && + (clsObject != copy)) + { + if (clsObject->superclass == original) + { + clsObject->superclass = imposter; + clsObject->ISA()->superclass = imposter->ISA(); + // We must flush caches here! + break; + } + + clsObject = clsObject->superclass; + } + } + + // Replace the original with the imposter in all class refs + // Major loop - process all headers + for (hInfo = FirstHeader; hInfo != nil; hInfo = hInfo->getNext()) + { + Class *cls_refs; + size_t refCount; + unsigned int index; + + // Fix class refs associated with this header + cls_refs = _getObjcClassRefs(hInfo, &refCount); + if (cls_refs) { + for (index = 0; index < refCount; index += 1) { + if (cls_refs[index] == original) { + cls_refs[index] = imposter; + } + } + } + } +} + + +/*********************************************************************** +* class_poseAs. +* +* !!! class_poseAs () does not currently flush any caches. +**********************************************************************/ +Class class_poseAs(Class imposter, Class original) +{ + char * imposterNamePtr; + Class copy; + + OBJC_WARN_DEPRECATED; + + // Trivial case is easy + if (imposter == original) + return imposter; + + // Imposter must be an immediate subclass of the original + if (imposter->superclass != original) { + __objc_error(imposter, + "[%s poseAs:%s]: target not immediate superclass", + imposter->name, original->name); + } + + // Can't pose when you have instance variables (how could it work?) + if (imposter->ivars) { + __objc_error(imposter, + "[%s poseAs:%s]: %s defines new instance variables", + imposter->name, original->name, imposter->name); + } + + // Build a string to use to replace the name of the original class. +#if TARGET_OS_WIN32 +# define imposterNamePrefix "_%" + imposterNamePtr = malloc(strlen(original->name) + strlen(imposterNamePrefix) + 1); + strcpy(imposterNamePtr, imposterNamePrefix); + strcat(imposterNamePtr, original->name); +# undef imposterNamePrefix +#else + asprintf(&imposterNamePtr, "_%%%s", original->name); +#endif + + // We lock the class hashtable, so we are thread safe with respect to + // calls to objc_getClass (). However, the class names are not + // changed atomically, nor are all of the subclasses updated + // atomically. I have ordered the operations so that you will + // never crash, but you may get inconsistent results.... + + // Register the original class so that [super ..] knows + // exactly which classes are the "original" classes. + _objc_addOrigClass (original); + _objc_addOrigClass (imposter); + + // Copy the imposter, so that the imposter can continue + // its normal life in addition to changing the behavior of + // the original. As a hack we don't bother to copy the metaclass. + // For some reason we modify the original rather than the copy. + copy = (Class)malloc(sizeof(objc_class)); + memmove(copy, imposter, sizeof(objc_class)); + + mutex_locker_t lock(classLock); + + // Remove both the imposter and the original class. + NXHashRemove (class_hash, imposter); + NXHashRemove (class_hash, original); + + NXHashInsert (class_hash, copy); + + // Mark the imposter as such + imposter->setInfo(CLS_POSING); + imposter->ISA()->setInfo(CLS_POSING); + + // Change the name of the imposter to that of the original class. + imposter->name = original->name; + imposter->ISA()->name = original->ISA()->name; + + // Also copy the version field to avoid archiving problems. + imposter->version = original->version; + + // Change classrefs and superclass pointers + // Don't change copy->superclass + // Don't change [super ...] messages + change_class_references(imposter, original, copy, NO); + + // Change the name of the original class. + original->name = imposterNamePtr + 1; + original->ISA()->name = imposterNamePtr; + + // Restore the imposter and the original class with their new names. + NXHashInsert (class_hash, imposter); + NXHashInsert (class_hash, original); + + return imposter; +} + + +/*********************************************************************** +* _objc_flush_caches. Flush the instance and class method caches +* of cls and all its subclasses. +* +* Specifying Nil for the class "all classes." +**********************************************************************/ +static void flush_caches(Class target, bool flush_meta) +{ + bool collectALot = (target == nil); + NXHashState state; + Class clsObject; +#ifdef OBJC_INSTRUMENTED + unsigned int classesVisited; + unsigned int subclassCount; +#endif + + mutex_locker_t lock(classLock); + mutex_locker_t lock2(cacheUpdateLock); + + // Leaf classes are fastest because there are no subclass caches to flush. + // fixme instrument + if (target && (target->info & CLS_LEAF)) { + _cache_flush (target); + + if (target->ISA() && (target->ISA()->info & CLS_LEAF)) { + _cache_flush (target->ISA()); + return; // done + } else { + // Reset target and handle it by one of the methods below. + target = target->ISA(); + flush_meta = NO; + // NOT done + } + } + + state = NXInitHashState(class_hash); + + // Handle nil and root instance class specially: flush all + // instance and class method caches. Nice that this + // loop is linear vs the N-squared loop just below. + if (!target || !target->superclass) + { +#ifdef OBJC_INSTRUMENTED + LinearFlushCachesCount += 1; + classesVisited = 0; + subclassCount = 0; +#endif + // Traverse all classes in the hash table + while (NXNextHashState(class_hash, &state, (void**)&clsObject)) + { + Class metaClsObject; +#ifdef OBJC_INSTRUMENTED + classesVisited += 1; +#endif + + // Skip class that is known not to be a subclass of this root + // (the isa pointer of any meta class points to the meta class + // of the root). + // NOTE: When is an isa pointer of a hash tabled class ever nil? + metaClsObject = clsObject->ISA(); + if (target && metaClsObject && target->ISA() != metaClsObject->ISA()) { + continue; + } + +#ifdef OBJC_INSTRUMENTED + subclassCount += 1; +#endif + + _cache_flush (clsObject); + if (flush_meta && metaClsObject != nil) { + _cache_flush (metaClsObject); + } + } +#ifdef OBJC_INSTRUMENTED + LinearFlushCachesVisitedCount += classesVisited; + if (classesVisited > MaxLinearFlushCachesVisitedCount) + MaxLinearFlushCachesVisitedCount = classesVisited; + IdealFlushCachesCount += subclassCount; + if (subclassCount > MaxIdealFlushCachesCount) + MaxIdealFlushCachesCount = subclassCount; +#endif + + goto done; + } + + // Outer loop - flush any cache that could now get a method from + // cls (i.e. the cache associated with cls and any of its subclasses). +#ifdef OBJC_INSTRUMENTED + NonlinearFlushCachesCount += 1; + classesVisited = 0; + subclassCount = 0; +#endif + while (NXNextHashState(class_hash, &state, (void**)&clsObject)) + { + Class clsIter; + +#ifdef OBJC_INSTRUMENTED + NonlinearFlushCachesClassCount += 1; +#endif + + // Inner loop - Process a given class + clsIter = clsObject; + while (clsIter) + { + +#ifdef OBJC_INSTRUMENTED + classesVisited += 1; +#endif + // Flush clsObject instance method cache if + // clsObject is a subclass of cls, or is cls itself + // Flush the class method cache if that was asked for + if (clsIter == target) + { +#ifdef OBJC_INSTRUMENTED + subclassCount += 1; +#endif + _cache_flush (clsObject); + if (flush_meta) + _cache_flush (clsObject->ISA()); + + break; + + } + + // Flush clsObject class method cache if cls is + // the meta class of clsObject or of one + // of clsObject's superclasses + else if (clsIter->ISA() == target) + { +#ifdef OBJC_INSTRUMENTED + subclassCount += 1; +#endif + _cache_flush (clsObject->ISA()); + break; + } + + // Move up superclass chain + // else if (clsIter->isInitialized()) + clsIter = clsIter->superclass; + + // clsIter is not initialized, so its cache + // must be empty. This happens only when + // clsIter == clsObject, because + // superclasses are initialized before + // subclasses, and this loop traverses + // from sub- to super- classes. + // else + // break; + } + } +#ifdef OBJC_INSTRUMENTED + NonlinearFlushCachesVisitedCount += classesVisited; + if (classesVisited > MaxNonlinearFlushCachesVisitedCount) + MaxNonlinearFlushCachesVisitedCount = classesVisited; + IdealFlushCachesCount += subclassCount; + if (subclassCount > MaxIdealFlushCachesCount) + MaxIdealFlushCachesCount = subclassCount; +#endif + + + done: + if (collectALot) { + _cache_collect(true); + } +} + + +void _objc_flush_caches(Class target) +{ + flush_caches(target, YES); +} + + + +/*********************************************************************** +* flush_marked_caches. Flush the method cache of any class marked +* CLS_FLUSH_CACHE (and all subclasses thereof) +* fixme instrument +**********************************************************************/ +void flush_marked_caches(void) +{ + Class cls; + Class supercls; + NXHashState state; + + mutex_locker_t lock(classLock); + mutex_locker_t lock2(cacheUpdateLock); + + state = NXInitHashState(class_hash); + while (NXNextHashState(class_hash, &state, (void**)&cls)) { + for (supercls = cls; supercls; supercls = supercls->superclass) { + if (supercls->info & CLS_FLUSH_CACHE) { + _cache_flush(cls); + break; + } + } + + for (supercls = cls->ISA(); supercls; supercls = supercls->superclass) { + if (supercls->info & CLS_FLUSH_CACHE) { + _cache_flush(cls->ISA()); + break; + } + } + } + + state = NXInitHashState(class_hash); + while (NXNextHashState(class_hash, &state, (void**)&cls)) { + if (cls->info & CLS_FLUSH_CACHE) { + cls->clearInfo(CLS_FLUSH_CACHE); + } + if (cls->ISA()->info & CLS_FLUSH_CACHE) { + cls->ISA()->clearInfo(CLS_FLUSH_CACHE); + } + } +} + + +/*********************************************************************** +* get_base_method_list +* Returns the method list containing the class's own methods, +* ignoring any method lists added by categories or class_addMethods. +* Called only by add_class_to_loadable_list. +* Does not hold methodListLock because add_class_to_loadable_list +* does not manipulate in-use classes. +**********************************************************************/ +static old_method_list *get_base_method_list(Class cls) +{ + old_method_list **ptr; + + if (!cls->methodLists) return nil; + if (cls->info & CLS_NO_METHOD_ARRAY) return (old_method_list *)cls->methodLists; + ptr = cls->methodLists; + if (!*ptr || *ptr == END_OF_METHODS_LIST) return nil; + while ( *ptr != 0 && *ptr != END_OF_METHODS_LIST ) { ptr++; } + --ptr; + return *ptr; +} + + +static IMP _class_getLoadMethod_nocheck(Class cls) +{ + old_method_list *mlist; + mlist = get_base_method_list(cls->ISA()); + if (mlist) { + return lookupNamedMethodInMethodList (mlist, "load"); + } + return nil; +} + + +bool _class_hasLoadMethod(Class cls) +{ + if (cls->ISA()->info & CLS_HAS_LOAD_METHOD) return YES; + return _class_getLoadMethod_nocheck(cls); +} + + +/*********************************************************************** +* objc_class::getLoadMethod +* Returns cls's +load implementation, or nil if it doesn't have one. +**********************************************************************/ +IMP objc_class::getLoadMethod() +{ + if (ISA()->info & CLS_HAS_LOAD_METHOD) { + return _class_getLoadMethod_nocheck((Class)this); + } + return nil; +} + +ptrdiff_t ivar_getOffset(Ivar ivar) +{ + return oldivar(ivar)->ivar_offset; +} + +const char *ivar_getName(Ivar ivar) +{ + return oldivar(ivar)->ivar_name; +} + +const char *ivar_getTypeEncoding(Ivar ivar) +{ + return oldivar(ivar)->ivar_type; +} + + +IMP method_getImplementation(Method m) +{ + if (!m) return nil; + return oldmethod(m)->method_imp; +} + +SEL method_getName(Method m) +{ + if (!m) return nil; + return oldmethod(m)->method_name; +} + +const char *method_getTypeEncoding(Method m) +{ + if (!m) return nil; + return oldmethod(m)->method_types; +} + +unsigned int method_getSizeOfArguments(Method m) +{ + OBJC_WARN_DEPRECATED; + if (!m) return 0; + return encoding_getSizeOfArguments(method_getTypeEncoding(m)); +} + +unsigned int method_getArgumentInfo(Method m, int arg, + const char **type, int *offset) +{ + OBJC_WARN_DEPRECATED; + if (!m) return 0; + return encoding_getArgumentInfo(method_getTypeEncoding(m), + arg, type, offset); +} + + +static spinlock_t impLock; + +IMP method_setImplementation(Method m_gen, IMP imp) +{ + IMP old; + old_method *m = oldmethod(m_gen); + if (!m) return nil; + if (!imp) return nil; + + impLock.lock(); + old = m->method_imp; + m->method_imp = imp; + impLock.unlock(); + return old; +} + + +void method_exchangeImplementations(Method m1_gen, Method m2_gen) +{ + IMP m1_imp; + old_method *m1 = oldmethod(m1_gen); + old_method *m2 = oldmethod(m2_gen); + if (!m1 || !m2) return; + + impLock.lock(); + m1_imp = m1->method_imp; + m1->method_imp = m2->method_imp; + m2->method_imp = m1_imp; + impLock.unlock(); +} + + +struct objc_method_description * method_getDescription(Method m) +{ + if (!m) return nil; + return (struct objc_method_description *)oldmethod(m); +} + + +const char *property_getName(objc_property_t prop) +{ + return oldproperty(prop)->name; +} + +const char *property_getAttributes(objc_property_t prop) +{ + return oldproperty(prop)->attributes; +} + +objc_property_attribute_t *property_copyAttributeList(objc_property_t prop, + unsigned int *outCount) +{ + if (!prop) { + if (outCount) *outCount = 0; + return nil; + } + + mutex_locker_t lock(classLock); + return copyPropertyAttributeList(oldproperty(prop)->attributes,outCount); +} + +char * property_copyAttributeValue(objc_property_t prop, const char *name) +{ + if (!prop || !name || *name == '\0') return nil; + + mutex_locker_t lock(classLock); + return copyPropertyAttributeValue(oldproperty(prop)->attributes, name); +} + + +/*********************************************************************** +* class_addMethod +**********************************************************************/ +static IMP _class_addMethod(Class cls, SEL name, IMP imp, + const char *types, bool replace) +{ + old_method *m; + IMP result = nil; + + if (!types) types = ""; + + mutex_locker_t lock(methodListLock); + + if ((m = _findMethodInClass(cls, name))) { + // already exists + // fixme atomic + result = method_getImplementation((Method)m); + if (replace) { + method_setImplementation((Method)m, imp); + } + } else { + // fixme could be faster + old_method_list *mlist = + (old_method_list *)calloc(sizeof(old_method_list), 1); + mlist->obsolete = fixed_up_method_list; + mlist->method_count = 1; + mlist->method_list[0].method_name = name; + mlist->method_list[0].method_types = strdup(types); + mlist->method_list[0].method_imp = imp; + + _objc_insertMethods(cls, mlist, nil); + if (!(cls->info & CLS_CONSTRUCTING)) { + flush_caches(cls, NO); + } else { + // in-construction class has no subclasses + flush_cache(cls); + } + result = nil; + } + + return result; +} + + +/*********************************************************************** +* class_addMethod +**********************************************************************/ +BOOL class_addMethod(Class cls, SEL name, IMP imp, const char *types) +{ + IMP old; + if (!cls) return NO; + + old = _class_addMethod(cls, name, imp, types, NO); + return !old; +} + + +/*********************************************************************** +* class_replaceMethod +**********************************************************************/ +IMP class_replaceMethod(Class cls, SEL name, IMP imp, const char *types) +{ + if (!cls) return nil; + + return _class_addMethod(cls, name, imp, types, YES); +} + + +/*********************************************************************** +* class_addIvar +**********************************************************************/ +BOOL class_addIvar(Class cls, const char *name, size_t size, + uint8_t alignment, const char *type) +{ + bool result = YES; + + if (!cls) return NO; + if (ISMETA(cls)) return NO; + if (!(cls->info & CLS_CONSTRUCTING)) return NO; + + if (!type) type = ""; + if (name && 0 == strcmp(name, "")) name = nil; + + mutex_locker_t lock(classLock); + + // Check for existing ivar with this name + // fixme check superclasses? + if (cls->ivars) { + int i; + for (i = 0; i < cls->ivars->ivar_count; i++) { + if (0 == strcmp(cls->ivars->ivar_list[i].ivar_name, name)) { + result = NO; + break; + } + } + } + + if (result) { + old_ivar_list *old = cls->ivars; + size_t oldSize; + int newCount; + old_ivar *ivar; + size_t alignBytes; + size_t misalign; + + if (old) { + oldSize = sizeof(old_ivar_list) + + (old->ivar_count - 1) * sizeof(old_ivar); + newCount = 1 + old->ivar_count; + } else { + oldSize = sizeof(old_ivar_list) - sizeof(old_ivar); + newCount = 1; + } + + // allocate new ivar list + cls->ivars = (old_ivar_list *) + calloc(oldSize+sizeof(old_ivar), 1); + if (old) memcpy(cls->ivars, old, oldSize); + if (old && malloc_size(old)) free(old); + cls->ivars->ivar_count = newCount; + ivar = &cls->ivars->ivar_list[newCount-1]; + + // set ivar name and type + ivar->ivar_name = strdup(name); + ivar->ivar_type = strdup(type); + + // align if necessary + alignBytes = 1 << alignment; + misalign = cls->instance_size % alignBytes; + if (misalign) cls->instance_size += (long)(alignBytes - misalign); + + // set ivar offset and increase instance size + ivar->ivar_offset = (int)cls->instance_size; + cls->instance_size += (long)size; + } + + return result; +} + + +/*********************************************************************** +* class_addProtocol +**********************************************************************/ +BOOL class_addProtocol(Class cls, Protocol *protocol_gen) +{ + old_protocol *protocol = oldprotocol(protocol_gen); + old_protocol_list *plist; + + if (!cls) return NO; + if (class_conformsToProtocol(cls, protocol_gen)) return NO; + + mutex_locker_t lock(classLock); + + // fixme optimize - protocol list doesn't escape? + plist = (old_protocol_list*)calloc(sizeof(old_protocol_list), 1); + plist->count = 1; + plist->list[0] = protocol; + plist->next = cls->protocols; + cls->protocols = plist; + + // fixme metaclass? + + return YES; +} + + +/*********************************************************************** +* _class_addProperties +* Internal helper to add properties to a class. +* Used by category attachment and class_addProperty() +* Locking: acquires classLock +**********************************************************************/ +bool +_class_addProperties(Class cls, + old_property_list *additions) +{ + old_property_list *newlist; + + if (!(cls->info & CLS_EXT)) return NO; + + newlist = (old_property_list *) + memdup(additions, sizeof(*newlist) - sizeof(newlist->first) + + (additions->entsize * additions->count)); + + mutex_locker_t lock(classLock); + + allocateExt(cls); + if (!cls->ext->propertyLists) { + // cls has no properties - simply use this list + cls->ext->propertyLists = (old_property_list **)newlist; + cls->setInfo(CLS_NO_PROPERTY_ARRAY); + } + else if (cls->info & CLS_NO_PROPERTY_ARRAY) { + // cls has one property list - make a new array + old_property_list **newarray = (old_property_list **) + malloc(3 * sizeof(*newarray)); + newarray[0] = newlist; + newarray[1] = (old_property_list *)cls->ext->propertyLists; + newarray[2] = nil; + cls->ext->propertyLists = newarray; + cls->clearInfo(CLS_NO_PROPERTY_ARRAY); + } + else { + // cls has a property array - make a bigger one + old_property_list **newarray; + int count = 0; + while (cls->ext->propertyLists[count]) count++; + newarray = (old_property_list **) + malloc((count+2) * sizeof(*newarray)); + newarray[0] = newlist; + memcpy(&newarray[1], &cls->ext->propertyLists[0], + count * sizeof(*newarray)); + newarray[count+1] = nil; + free(cls->ext->propertyLists); + cls->ext->propertyLists = newarray; + } + + return YES; +} + + +/*********************************************************************** +* class_addProperty +* Adds a property to a class. Returns NO if the proeprty already exists. +* Locking: acquires classLock +**********************************************************************/ +static bool +_class_addProperty(Class cls, const char *name, + const objc_property_attribute_t *attrs, unsigned int count, + bool replace) +{ + if (!cls) return NO; + if (!name) return NO; + + old_property *prop = oldproperty(class_getProperty(cls, name)); + if (prop && !replace) { + // already exists, refuse to replace + return NO; + } + else if (prop) { + // replace existing + mutex_locker_t lock(classLock); + try_free(prop->attributes); + prop->attributes = copyPropertyAttributeString(attrs, count); + return YES; + } + else { + // add new + old_property_list proplist; + proplist.entsize = sizeof(old_property); + proplist.count = 1; + proplist.first.name = strdup(name); + proplist.first.attributes = copyPropertyAttributeString(attrs, count); + + return _class_addProperties(cls, &proplist); + } +} + +BOOL +class_addProperty(Class cls, const char *name, + const objc_property_attribute_t *attrs, unsigned int n) +{ + return _class_addProperty(cls, name, attrs, n, NO); +} + +void +class_replaceProperty(Class cls, const char *name, + const objc_property_attribute_t *attrs, unsigned int n) +{ + _class_addProperty(cls, name, attrs, n, YES); +} + + +/*********************************************************************** +* class_copyProtocolList. Returns a heap block containing the +* protocols implemented by the class, or nil if the class +* implements no protocols. Caller must free the block. +* Does not copy any superclass's protocols. +**********************************************************************/ +Protocol * __unsafe_unretained * +class_copyProtocolList(Class cls, unsigned int *outCount) +{ + old_protocol_list *plist; + Protocol **result = nil; + unsigned int count = 0; + unsigned int p; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + mutex_locker_t lock(classLock); + + for (plist = cls->protocols; plist != nil; plist = plist->next) { + count += (int)plist->count; + } + + if (count > 0) { + result = (Protocol **)malloc((count+1) * sizeof(Protocol *)); + + for (p = 0, plist = cls->protocols; + plist != nil; + plist = plist->next) + { + int i; + for (i = 0; i < plist->count; i++) { + result[p++] = (Protocol *)plist->list[i]; + } + } + result[p] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* class_getProperty. Return the named property. +**********************************************************************/ +objc_property_t class_getProperty(Class cls, const char *name) +{ + if (!cls || !name) return nil; + + mutex_locker_t lock(classLock); + + for (; cls; cls = cls->superclass) { + uintptr_t iterator = 0; + old_property_list *plist; + while ((plist = nextPropertyList(cls, &iterator))) { + uint32_t i; + for (i = 0; i < plist->count; i++) { + old_property *p = property_list_nth(plist, i); + if (0 == strcmp(name, p->name)) { + return (objc_property_t)p; + } + } + } + } + + return nil; +} + + +/*********************************************************************** +* class_copyPropertyList. Returns a heap block containing the +* properties declared in the class, or nil if the class +* declares no properties. Caller must free the block. +* Does not copy any superclass's properties. +**********************************************************************/ +objc_property_t *class_copyPropertyList(Class cls, unsigned int *outCount) +{ + old_property_list *plist; + uintptr_t iterator = 0; + old_property **result = nil; + unsigned int count = 0; + unsigned int p, i; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + mutex_locker_t lock(classLock); + + iterator = 0; + while ((plist = nextPropertyList(cls, &iterator))) { + count += plist->count; + } + + if (count > 0) { + result = (old_property **)malloc((count+1) * sizeof(old_property *)); + + p = 0; + iterator = 0; + while ((plist = nextPropertyList(cls, &iterator))) { + for (i = 0; i < plist->count; i++) { + result[p++] = property_list_nth(plist, i); + } + } + result[p] = nil; + } + + if (outCount) *outCount = count; + return (objc_property_t *)result; +} + + +/*********************************************************************** +* class_copyMethodList. Returns a heap block containing the +* methods implemented by the class, or nil if the class +* implements no methods. Caller must free the block. +* Does not copy any superclass's methods. +**********************************************************************/ +Method *class_copyMethodList(Class cls, unsigned int *outCount) +{ + old_method_list *mlist; + void *iterator = nil; + Method *result = nil; + unsigned int count = 0; + unsigned int m; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + mutex_locker_t lock(methodListLock); + + iterator = nil; + while ((mlist = nextMethodList(cls, &iterator))) { + count += mlist->method_count; + } + + if (count > 0) { + result = (Method *)malloc((count+1) * sizeof(Method)); + + m = 0; + iterator = nil; + while ((mlist = nextMethodList(cls, &iterator))) { + int i; + for (i = 0; i < mlist->method_count; i++) { + result[m++] = (Method)&mlist->method_list[i]; + } + } + result[m] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* class_copyIvarList. Returns a heap block containing the +* ivars declared in the class, or nil if the class +* declares no ivars. Caller must free the block. +* Does not copy any superclass's ivars. +**********************************************************************/ +Ivar *class_copyIvarList(Class cls, unsigned int *outCount) +{ + Ivar *result = nil; + unsigned int count = 0; + int i; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + if (cls->ivars) { + count = cls->ivars->ivar_count; + } + + if (count > 0) { + result = (Ivar *)malloc((count+1) * sizeof(Ivar)); + + for (i = 0; i < cls->ivars->ivar_count; i++) { + result[i] = (Ivar)&cls->ivars->ivar_list[i]; + } + result[i] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* objc_allocateClass. +**********************************************************************/ + +void set_superclass(Class cls, Class supercls, bool cls_is_new) +{ + Class meta = cls->ISA(); + + if (supercls) { + cls->superclass = supercls; + meta->superclass = supercls->ISA(); + meta->initIsa(supercls->ISA()->ISA()); + + // Propagate C++ cdtors from superclass. + if (supercls->info & CLS_HAS_CXX_STRUCTORS) { + if (cls_is_new) cls->info |= CLS_HAS_CXX_STRUCTORS; + else cls->setInfo(CLS_HAS_CXX_STRUCTORS); + } + + // Superclass is no longer a leaf for cache flushing + if (supercls->info & CLS_LEAF) { + supercls->clearInfo(CLS_LEAF); + supercls->ISA()->clearInfo(CLS_LEAF); + } + } else { + cls->superclass = Nil; // superclass of root class is nil + meta->superclass = cls; // superclass of root metaclass is root class + meta->initIsa(meta); // metaclass of root metaclass is root metaclass + + // Root class is never a leaf for cache flushing, because the + // root metaclass is a subclass. (This could be optimized, but + // is too uncommon to bother.) + cls->clearInfo(CLS_LEAF); + meta->clearInfo(CLS_LEAF); + } +} + +// &UnsetLayout is the default ivar layout during class construction +static const uint8_t UnsetLayout = 0; + +Class objc_initializeClassPair(Class supercls, const char *name, Class cls, Class meta) +{ + // Connect to superclasses and metaclasses + cls->initIsa(meta); + set_superclass(cls, supercls, YES); + + // Set basic info + cls->name = strdup(name); + meta->name = strdup(name); + cls->version = 0; + meta->version = 7; + cls->info = CLS_CLASS | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF; + meta->info = CLS_META | CLS_CONSTRUCTING | CLS_EXT | CLS_LEAF; + + // Set instance size based on superclass. + if (supercls) { + cls->instance_size = supercls->instance_size; + meta->instance_size = supercls->ISA()->instance_size; + } else { + cls->instance_size = sizeof(Class); // just an isa + meta->instance_size = sizeof(objc_class); + } + + // No ivars. No methods. Empty cache. No protocols. No layout. Empty ext. + cls->ivars = nil; + cls->methodLists = nil; + cls->cache = (Cache)&_objc_empty_cache; + cls->protocols = nil; + cls->ivar_layout = &UnsetLayout; + cls->ext = nil; + allocateExt(cls); + cls->ext->weak_ivar_layout = &UnsetLayout; + + meta->ivars = nil; + meta->methodLists = nil; + meta->cache = (Cache)&_objc_empty_cache; + meta->protocols = nil; + meta->ext = nil; + + return cls; +} + +Class objc_allocateClassPair(Class supercls, const char *name, + size_t extraBytes) +{ + Class cls, meta; + + if (objc_getClass(name)) return nil; + // fixme reserve class name against simultaneous allocation + + if (supercls && (supercls->info & CLS_CONSTRUCTING)) { + // Can't make subclass of an in-construction class + return nil; + } + + // Allocate new classes. + if (supercls) { + cls = _calloc_class(supercls->ISA()->alignedInstanceSize() + extraBytes); + meta = _calloc_class(supercls->ISA()->ISA()->alignedInstanceSize() + extraBytes); + } else { + cls = _calloc_class(sizeof(objc_class) + extraBytes); + meta = _calloc_class(sizeof(objc_class) + extraBytes); + } + + + objc_initializeClassPair(supercls, name, cls, meta); + + return cls; +} + + +void objc_registerClassPair(Class cls) +{ + if ((cls->info & CLS_CONSTRUCTED) || + (cls->ISA()->info & CLS_CONSTRUCTED)) + { + _objc_inform("objc_registerClassPair: class '%s' was already " + "registered!", cls->name); + return; + } + + if (!(cls->info & CLS_CONSTRUCTING) || + !(cls->ISA()->info & CLS_CONSTRUCTING)) + { + _objc_inform("objc_registerClassPair: class '%s' was not " + "allocated with objc_allocateClassPair!", cls->name); + return; + } + + if (ISMETA(cls)) { + _objc_inform("objc_registerClassPair: class '%s' is a metaclass, " + "not a class!", cls->name); + return; + } + + mutex_locker_t lock(classLock); + + // Clear "under construction" bit, set "done constructing" bit + cls->info &= ~CLS_CONSTRUCTING; + cls->ISA()->info &= ~CLS_CONSTRUCTING; + cls->info |= CLS_CONSTRUCTED; + cls->ISA()->info |= CLS_CONSTRUCTED; + + NXHashInsertIfAbsent(class_hash, cls); +} + + +Class objc_duplicateClass(Class original, const char *name, size_t extraBytes) +{ + unsigned int count, i; + old_method **originalMethods; + old_method_list *duplicateMethods; + // Don't use sizeof(objc_class) here because + // instance_size has historically contained two extra words, + // and instance_size is what objc_getIndexedIvars() actually uses. + Class duplicate = + _calloc_class(original->ISA()->alignedInstanceSize() + extraBytes); + + duplicate->initIsa(original->ISA()); + duplicate->superclass = original->superclass; + duplicate->name = strdup(name); + duplicate->version = original->version; + duplicate->info = original->info & (CLS_CLASS|CLS_META|CLS_INITIALIZED|CLS_JAVA_HYBRID|CLS_JAVA_CLASS|CLS_HAS_CXX_STRUCTORS|CLS_HAS_LOAD_METHOD); + duplicate->instance_size = original->instance_size; + duplicate->ivars = original->ivars; + // methodLists handled below + duplicate->cache = (Cache)&_objc_empty_cache; + duplicate->protocols = original->protocols; + if (original->info & CLS_EXT) { + duplicate->info |= original->info & (CLS_EXT|CLS_NO_PROPERTY_ARRAY); + duplicate->ivar_layout = original->ivar_layout; + if (original->ext) { + duplicate->ext = (old_class_ext *)malloc(original->ext->size); + memcpy(duplicate->ext, original->ext, original->ext->size); + } else { + duplicate->ext = nil; + } + } + + // Method lists are deep-copied so they can be stomped. + originalMethods = (old_method **)class_copyMethodList(original, &count); + if (originalMethods) { + duplicateMethods = (old_method_list *) + calloc(sizeof(old_method_list) + + (count-1)*sizeof(old_method), 1); + duplicateMethods->obsolete = fixed_up_method_list; + duplicateMethods->method_count = count; + for (i = 0; i < count; i++) { + duplicateMethods->method_list[i] = *(originalMethods[i]); + } + duplicate->methodLists = (old_method_list **)duplicateMethods; + duplicate->info |= CLS_NO_METHOD_ARRAY; + free(originalMethods); + } + + mutex_locker_t lock(classLock); + NXHashInsert(class_hash, duplicate); + + return duplicate; +} + + +void objc_disposeClassPair(Class cls) +{ + if (!(cls->info & (CLS_CONSTRUCTED|CLS_CONSTRUCTING)) || + !(cls->ISA()->info & (CLS_CONSTRUCTED|CLS_CONSTRUCTING))) + { + // class not allocated with objc_allocateClassPair + // disposing still-unregistered class is OK! + _objc_inform("objc_disposeClassPair: class '%s' was not " + "allocated with objc_allocateClassPair!", cls->name); + return; + } + + if (ISMETA(cls)) { + _objc_inform("objc_disposeClassPair: class '%s' is a metaclass, " + "not a class!", cls->name); + return; + } + + mutex_locker_t lock(classLock); + NXHashRemove(class_hash, cls); + unload_class(cls->ISA()); + unload_class(cls); +} + + +/*********************************************************************** +* objc_constructInstance +* Creates an instance of `cls` at the location pointed to by `bytes`. +* `bytes` must point to at least class_getInstanceSize(cls) bytes of +* well-aligned zero-filled memory. +* The new object's isa is set. Any C++ constructors are called. +* Returns `bytes` if successful. Returns nil if `cls` or `bytes` is +* nil, or if C++ constructors fail. +**********************************************************************/ +id +objc_constructInstance(Class cls, void *bytes) +{ + if (!cls || !bytes) return nil; + + id obj = (id)bytes; + + obj->initIsa(cls); + + if (cls->hasCxxCtor()) { + return object_cxxConstructFromClass(obj, cls); + } else { + return obj; + } +} + + +/*********************************************************************** +* _class_createInstanceFromZone. Allocate an instance of the +* specified class with the specified number of bytes for indexed +* variables, in the specified zone. The isa field is set to the +* class, C++ default constructors are called, and all other fields are zeroed. +**********************************************************************/ +id +_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone) +{ + void *bytes; + size_t size; + + // Can't create something for nothing + if (!cls) return nil; + + // Allocate and initialize + size = cls->alignedInstanceSize() + extraBytes; + + // CF requires all objects be at least 16 bytes. + if (size < 16) size = 16; + + if (zone) { + bytes = malloc_zone_calloc((malloc_zone_t *)zone, 1, size); + } else { + bytes = calloc(1, size); + } + + return objc_constructInstance(cls, bytes); +} + + +/*********************************************************************** +* _class_createInstance. Allocate an instance of the specified +* class with the specified number of bytes for indexed variables, in +* the default zone, using _class_createInstanceFromZone. +**********************************************************************/ +static id _class_createInstance(Class cls, size_t extraBytes) +{ + return _class_createInstanceFromZone (cls, extraBytes, nil); +} + + +static id _object_copyFromZone(id oldObj, size_t extraBytes, void *zone) +{ + id obj; + size_t size; + + if (!oldObj) return nil; + + obj = (*_zoneAlloc)(oldObj->ISA(), extraBytes, zone); + size = oldObj->ISA()->alignedInstanceSize() + extraBytes; + + // fixme need C++ copy constructor + memmove(obj, oldObj, size); + + fixupCopiedIvars(obj, oldObj); + + return obj; +} + + +/*********************************************************************** +* objc_destructInstance +* Destroys an instance without freeing memory. +* Calls C++ destructors. +* Removes associative references. +* Returns `obj`. Does nothing if `obj` is nil. +* CoreFoundation and other clients do call this under GC. +**********************************************************************/ +void *objc_destructInstance(id obj) +{ + if (obj) { + Class isa = obj->getIsa(); + + if (isa->hasCxxDtor()) { + object_cxxDestruct(obj); + } + + if (isa->instancesHaveAssociatedObjects()) { + _object_remove_assocations(obj); + } + + objc_clear_deallocating(obj); + } + + return obj; +} + +static id +_object_dispose(id anObject) +{ + if (anObject==nil) return nil; + + objc_destructInstance(anObject); + + anObject->initIsa(_objc_getFreedObjectClass ()); + + free(anObject); + return nil; +} + +static id _object_copy(id oldObj, size_t extraBytes) +{ + void *z = malloc_zone_from_ptr(oldObj); + return _object_copyFromZone(oldObj, extraBytes, + z ? z : malloc_default_zone()); +} + +static id _object_reallocFromZone(id anObject, size_t nBytes, void *zone) +{ + id newObject; + Class tmp; + + if (anObject == nil) + __objc_error(nil, "reallocating nil object"); + + if (anObject->ISA() == _objc_getFreedObjectClass ()) + __objc_error(anObject, "reallocating freed object"); + + if (nBytes < anObject->ISA()->alignedInstanceSize()) + __objc_error(anObject, "(%s, %zu) requested size too small", + object_getClassName(anObject), nBytes); + + // fixme need C++ copy constructor + // fixme GC copy + // Make sure not to modify space that has been declared free + tmp = anObject->ISA(); + anObject->initIsa(_objc_getFreedObjectClass ()); + newObject = (id)malloc_zone_realloc((malloc_zone_t *)zone, anObject, nBytes); + if (newObject) { + newObject->initIsa(tmp); + } else { + // realloc failed, anObject is still alive + anObject->initIsa(tmp); + } + return newObject; +} + + +static id _object_realloc(id anObject, size_t nBytes) +{ + void *z = malloc_zone_from_ptr(anObject); + return _object_reallocFromZone(anObject, + nBytes, + z ? z : malloc_default_zone()); +} + +id (*_alloc)(Class, size_t) = _class_createInstance; +id (*_copy)(id, size_t) = _object_copy; +id (*_realloc)(id, size_t) = _object_realloc; +id (*_dealloc)(id) = _object_dispose; +id (*_zoneAlloc)(Class, size_t, void *) = _class_createInstanceFromZone; +id (*_zoneCopy)(id, size_t, void *) = _object_copyFromZone; +id (*_zoneRealloc)(id, size_t, void *) = _object_reallocFromZone; +void (*_error)(id, const char *, va_list) = _objc_error; + + +id class_createInstance(Class cls, size_t extraBytes) +{ + return (*_alloc)(cls, extraBytes); +} + +id class_createInstanceFromZone(Class cls, size_t extraBytes, void *z) +{ + OBJC_WARN_DEPRECATED; + return (*_zoneAlloc)(cls, extraBytes, z); +} + +unsigned class_createInstances(Class cls, size_t extraBytes, + id *results, unsigned num_requested) +{ + if (_alloc == &_class_createInstance) { + return _class_createInstancesFromZone(cls, extraBytes, nil, + results, num_requested); + } else { + // _alloc in use, which isn't understood by the batch allocator + return 0; + } +} + +id object_copy(id obj, size_t extraBytes) +{ + return (*_copy)(obj, extraBytes); +} + +id object_copyFromZone(id obj, size_t extraBytes, void *z) +{ + OBJC_WARN_DEPRECATED; + return (*_zoneCopy)(obj, extraBytes, z); +} + +id object_dispose(id obj) +{ + return (*_dealloc)(obj); +} + +id object_realloc(id obj, size_t nBytes) +{ + OBJC_WARN_DEPRECATED; + return (*_realloc)(obj, nBytes); +} + +id object_reallocFromZone(id obj, size_t nBytes, void *z) +{ + OBJC_WARN_DEPRECATED; + return (*_zoneRealloc)(obj, nBytes, z); +} + + +/*********************************************************************** +* object_getIndexedIvars. +**********************************************************************/ +void *object_getIndexedIvars(id obj) +{ + // ivars are tacked onto the end of the object + if (!obj) return nil; + if (obj->isTaggedPointer()) return nil; + return ((char *) obj) + obj->ISA()->alignedInstanceSize(); +} + + +// ProKit SPI +Class class_setSuperclass(Class cls, Class newSuper) +{ + Class oldSuper = cls->superclass; + set_superclass(cls, newSuper, NO); + flush_caches(cls, YES); + return oldSuper; +} +#endif diff --git a/runtime/objc-class.h b/runtime/objc-class.h new file mode 100644 index 0000000..4599f08 --- /dev/null +++ b/runtime/objc-class.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/runtime/objc-class.mm b/runtime/objc-class.mm new file mode 100644 index 0000000..d16ebc2 --- /dev/null +++ b/runtime/objc-class.mm @@ -0,0 +1,1272 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*********************************************************************** +* objc-class.m +* Copyright 1988-1997, Apple Computer, Inc. +* Author: s. naroff +**********************************************************************/ + + +/*********************************************************************** + * Lazy method list arrays and method list locking (2004-10-19) + * + * cls->methodLists may be in one of three forms: + * 1. nil: The class has no methods. + * 2. non-nil, with CLS_NO_METHOD_ARRAY set: cls->methodLists points + * to a single method list, which is the class's only method list. + * 3. non-nil, with CLS_NO_METHOD_ARRAY clear: cls->methodLists points to + * an array of method list pointers. The end of the array's block + * is set to -1. If the actual number of method lists is smaller + * than that, the rest of the array is nil. + * + * Attaching categories and adding and removing classes may change + * the form of the class list. In addition, individual method lists + * may be reallocated when fixed up. + * + * Classes are initially read as #1 or #2. If a category is attached + * or other methods added, the class is changed to #3. Once in form #3, + * the class is never downgraded to #1 or #2, even if methods are removed. + * Classes added with objc_addClass are initially either #1 or #3. + * + * Accessing and manipulating a class's method lists are synchronized, + * to prevent races when one thread restructures the list. However, + * if the class is not yet in use (i.e. not in class_hash), then the + * thread loading the class may access its method lists without locking. + * + * The following functions acquire methodListLock: + * class_getInstanceMethod + * class_getClassMethod + * class_nextMethodList + * class_addMethods + * class_removeMethods + * class_respondsToMethod + * _class_lookupMethodAndLoadCache + * lookupMethodInClassAndLoadCache + * _objc_add_category_flush_caches + * + * The following functions don't acquire methodListLock because they + * only access method lists during class load and unload: + * _objc_register_category + * _resolve_categories_for_class (calls _objc_add_category) + * add_class_to_loadable_list + * _objc_addClass + * _objc_remove_classes_in_image + * + * The following functions use method lists without holding methodListLock. + * The caller must either hold methodListLock, or be loading the class. + * _getMethod (called by class_getInstanceMethod, class_getClassMethod, + * and class_respondsToMethod) + * _findMethodInClass (called by _class_lookupMethodAndLoadCache, + * lookupMethodInClassAndLoadCache, _getMethod) + * _findMethodInList (called by _findMethodInClass) + * nextMethodList (called by _findMethodInClass and class_nextMethodList + * fixupSelectorsInMethodList (called by nextMethodList) + * _objc_add_category (called by _objc_add_category_flush_caches, + * resolve_categories_for_class and _objc_register_category) + * _objc_insertMethods (called by class_addMethods and _objc_add_category) + * _objc_removeMethods (called by class_removeMethods) + * _objcTweakMethodListPointerForClass (called by _objc_insertMethods) + * get_base_method_list (called by add_class_to_loadable_list) + * lookupNamedMethodInMethodList (called by add_class_to_loadable_list) + ***********************************************************************/ + +/*********************************************************************** + * Thread-safety of class info bits (2004-10-19) + * + * Some class info bits are used to store mutable runtime state. + * Modifications of the info bits at particular times need to be + * synchronized to prevent races. + * + * Three thread-safe modification functions are provided: + * cls->setInfo() // atomically sets some bits + * cls->clearInfo() // atomically clears some bits + * cls->changeInfo() // atomically sets some bits and clears others + * These replace CLS_SETINFO() for the multithreaded cases. + * + * Three modification windows are defined: + * - compile time + * - class construction or image load (before +load) in one thread + * - multi-threaded messaging and method caches + * + * Info bit modification at compile time and class construction do not + * need to be locked, because only one thread is manipulating the class. + * Info bit modification during messaging needs to be locked, because + * there may be other threads simultaneously messaging or otherwise + * manipulating the class. + * + * Modification windows for each flag: + * + * CLS_CLASS: compile-time and class load + * CLS_META: compile-time and class load + * CLS_INITIALIZED: +initialize + * CLS_POSING: messaging + * CLS_MAPPED: compile-time + * CLS_FLUSH_CACHE: class load and messaging + * CLS_GROW_CACHE: messaging + * CLS_NEED_BIND: unused + * CLS_METHOD_ARRAY: unused + * CLS_JAVA_HYBRID: JavaBridge only + * CLS_JAVA_CLASS: JavaBridge only + * CLS_INITIALIZING: messaging + * CLS_FROM_BUNDLE: class load + * CLS_HAS_CXX_STRUCTORS: compile-time and class load + * CLS_NO_METHOD_ARRAY: class load and messaging + * CLS_HAS_LOAD_METHOD: class load + * + * CLS_INITIALIZED and CLS_INITIALIZING have additional thread-safety + * constraints to support thread-safe +initialize. See "Thread safety + * during class initialization" for details. + * + * CLS_JAVA_HYBRID and CLS_JAVA_CLASS are set immediately after JavaBridge + * calls objc_addClass(). The JavaBridge does not use an atomic update, + * but the modification counts as "class construction" unless some other + * thread quickly finds the class via the class list. This race is + * small and unlikely in well-behaved code. + * + * Most info bits that may be modified during messaging are also never + * read without a lock. There is no general read lock for the info bits. + * CLS_INITIALIZED: classInitLock + * CLS_FLUSH_CACHE: cacheUpdateLock + * CLS_GROW_CACHE: cacheUpdateLock + * CLS_NO_METHOD_ARRAY: methodListLock + * CLS_INITIALIZING: classInitLock + ***********************************************************************/ + +/*********************************************************************** +* Imports. +**********************************************************************/ + +#include "objc-private.h" +#include "objc-abi.h" +#include + + +/* overriding the default object allocation and error handling routines */ + +OBJC_EXPORT id (*_alloc)(Class, size_t); +OBJC_EXPORT id (*_copy)(id, size_t); +OBJC_EXPORT id (*_realloc)(id, size_t); +OBJC_EXPORT id (*_dealloc)(id); +OBJC_EXPORT id (*_zoneAlloc)(Class, size_t, void *); +OBJC_EXPORT id (*_zoneRealloc)(id, size_t, void *); +OBJC_EXPORT id (*_zoneCopy)(id, size_t, void *); + + +/*********************************************************************** +* Information about multi-thread support: +* +* Since we do not lock many operations which walk the superclass, method +* and ivar chains, these chains must remain intact once a class is published +* by inserting it into the class hashtable. All modifications must be +* atomic so that someone walking these chains will always geta valid +* result. +***********************************************************************/ + + + +/*********************************************************************** +* object_getClass. +* Locking: None. If you add locking, tell gdb (rdar://7516456). +**********************************************************************/ +Class object_getClass(id obj) +{ + if (obj) return obj->getIsa(); + else return Nil; +} + + +/*********************************************************************** +* object_setClass. +**********************************************************************/ +Class object_setClass(id obj, Class cls) +{ + if (!obj) return nil; + + // Prevent a deadlock between the weak reference machinery + // and the +initialize machinery by ensuring that no + // weakly-referenced object has an un-+initialized isa. + // Unresolved future classes are not so protected. + if (!cls->isFuture() && !cls->isInitialized()) { + _class_initialize(_class_getNonMetaClass(cls, nil)); + } + + return obj->changeIsa(cls); +} + + +/*********************************************************************** +* object_isClass. +**********************************************************************/ +BOOL object_isClass(id obj) +{ + if (!obj) return NO; + return obj->isClass(); +} + + +/*********************************************************************** +* object_getClassName. +**********************************************************************/ +const char *object_getClassName(id obj) +{ + return class_getName(obj ? obj->getIsa() : nil); +} + + +/*********************************************************************** + * object_getMethodImplementation. + **********************************************************************/ +IMP object_getMethodImplementation(id obj, SEL name) +{ + Class cls = (obj ? obj->getIsa() : nil); + return class_getMethodImplementation(cls, name); +} + + +/*********************************************************************** + * object_getMethodImplementation_stret. + **********************************************************************/ +#if SUPPORT_STRET +IMP object_getMethodImplementation_stret(id obj, SEL name) +{ + Class cls = (obj ? obj->getIsa() : nil); + return class_getMethodImplementation_stret(cls, name); +} +#endif + + +static bool isScanned(ptrdiff_t ivar_offset, const uint8_t *layout) +{ + if (!layout) return NO; + + ptrdiff_t index = 0, ivar_index = ivar_offset / sizeof(void*); + uint8_t byte; + while ((byte = *layout++)) { + unsigned skips = (byte >> 4); + unsigned scans = (byte & 0x0F); + index += skips; + if (index > ivar_index) return NO; + index += scans; + if (index > ivar_index) return YES; + } + return NO; +} + + +/*********************************************************************** +* _class_lookUpIvar +* Given an object and an ivar in it, look up some data about that ivar: +* - its offset +* - its memory management behavior +* The ivar is assumed to be word-aligned and of of object type. +**********************************************************************/ +static void +_class_lookUpIvar(Class cls, Ivar ivar, ptrdiff_t& ivarOffset, + objc_ivar_memory_management_t& memoryManagement) +{ + ivarOffset = ivar_getOffset(ivar); + + // Look for ARC variables and ARC-style weak. + + // Preflight the hasAutomaticIvars check + // because _class_getClassForIvar() may need to take locks. + bool hasAutomaticIvars = NO; + for (Class c = cls; c; c = c->superclass) { + if (c->hasAutomaticIvars()) { + hasAutomaticIvars = YES; + break; + } + } + + if (hasAutomaticIvars) { + Class ivarCls = _class_getClassForIvar(cls, ivar); + if (ivarCls->hasAutomaticIvars()) { + // ARC layout bitmaps encode the class's own ivars only. + // Use alignedInstanceStart() because unaligned bytes at the start + // of this class's ivars are not represented in the layout bitmap. + ptrdiff_t localOffset = + ivarOffset - ivarCls->alignedInstanceStart(); + + if (isScanned(localOffset, class_getIvarLayout(ivarCls))) { + memoryManagement = objc_ivar_memoryStrong; + return; + } + + if (isScanned(localOffset, class_getWeakIvarLayout(ivarCls))) { + memoryManagement = objc_ivar_memoryWeak; + return; + } + + // Unretained is only for true ARC classes. + if (ivarCls->isARC()) { + memoryManagement = objc_ivar_memoryUnretained; + return; + } + } + } + + memoryManagement = objc_ivar_memoryUnknown; +} + + +/*********************************************************************** +* _class_getIvarMemoryManagement +* SPI for KVO and others to decide what memory management to use +* when setting instance variables directly. +**********************************************************************/ +objc_ivar_memory_management_t +_class_getIvarMemoryManagement(Class cls, Ivar ivar) +{ + ptrdiff_t offset; + objc_ivar_memory_management_t memoryManagement; + _class_lookUpIvar(cls, ivar, offset, memoryManagement); + return memoryManagement; +} + + +static ALWAYS_INLINE +void _object_setIvar(id obj, Ivar ivar, id value, bool assumeStrong) +{ + if (!obj || !ivar || obj->isTaggedPointer()) return; + + ptrdiff_t offset; + objc_ivar_memory_management_t memoryManagement; + _class_lookUpIvar(obj->ISA(), ivar, offset, memoryManagement); + + if (memoryManagement == objc_ivar_memoryUnknown) { + if (assumeStrong) memoryManagement = objc_ivar_memoryStrong; + else memoryManagement = objc_ivar_memoryUnretained; + } + + id *location = (id *)((char *)obj + offset); + + switch (memoryManagement) { + case objc_ivar_memoryWeak: objc_storeWeak(location, value); break; + case objc_ivar_memoryStrong: objc_storeStrong(location, value); break; + case objc_ivar_memoryUnretained: *location = value; break; + case objc_ivar_memoryUnknown: _objc_fatal("impossible"); + } +} + +void object_setIvar(id obj, Ivar ivar, id value) +{ + return _object_setIvar(obj, ivar, value, false /*not strong default*/); +} + +void object_setIvarWithStrongDefault(id obj, Ivar ivar, id value) +{ + return _object_setIvar(obj, ivar, value, true /*strong default*/); +} + + +id object_getIvar(id obj, Ivar ivar) +{ + if (!obj || !ivar || obj->isTaggedPointer()) return nil; + + ptrdiff_t offset; + objc_ivar_memory_management_t memoryManagement; + _class_lookUpIvar(obj->ISA(), ivar, offset, memoryManagement); + + id *location = (id *)((char *)obj + offset); + + if (memoryManagement == objc_ivar_memoryWeak) { + return objc_loadWeak(location); + } else { + return *location; + } +} + + +static ALWAYS_INLINE +Ivar _object_setInstanceVariable(id obj, const char *name, void *value, + bool assumeStrong) +{ + Ivar ivar = nil; + + if (obj && name && !obj->isTaggedPointer()) { + if ((ivar = _class_getVariable(obj->ISA(), name))) { + _object_setIvar(obj, ivar, (id)value, assumeStrong); + } + } + return ivar; +} + +Ivar object_setInstanceVariable(id obj, const char *name, void *value) +{ + return _object_setInstanceVariable(obj, name, value, false); +} + +Ivar object_setInstanceVariableWithStrongDefault(id obj, const char *name, + void *value) +{ + return _object_setInstanceVariable(obj, name, value, true); +} + + +Ivar object_getInstanceVariable(id obj, const char *name, void **value) +{ + if (obj && name && !obj->isTaggedPointer()) { + Ivar ivar; + if ((ivar = class_getInstanceVariable(obj->ISA(), name))) { + if (value) *value = (void *)object_getIvar(obj, ivar); + return ivar; + } + } + if (value) *value = nil; + return nil; +} + + +/*********************************************************************** +* object_cxxDestructFromClass. +* Call C++ destructors on obj, starting with cls's +* dtor method (if any) followed by superclasses' dtors (if any), +* stopping at cls's dtor (if any). +* Uses methodListLock and cacheUpdateLock. The caller must hold neither. +**********************************************************************/ +static void object_cxxDestructFromClass(id obj, Class cls) +{ + void (*dtor)(id); + + // Call cls's dtor first, then superclasses's dtors. + + for ( ; cls; cls = cls->superclass) { + if (!cls->hasCxxDtor()) return; + dtor = (void(*)(id)) + lookupMethodInClassAndLoadCache(cls, SEL_cxx_destruct); + if (dtor != (void(*)(id))_objc_msgForward_impcache) { + if (PrintCxxCtors) { + _objc_inform("CXX: calling C++ destructors for class %s", + cls->nameForLogging()); + } + (*dtor)(obj); + } + } +} + + +/*********************************************************************** +* object_cxxDestruct. +* Call C++ destructors on obj, if any. +* Uses methodListLock and cacheUpdateLock. The caller must hold neither. +**********************************************************************/ +void object_cxxDestruct(id obj) +{ + if (!obj) return; + if (obj->isTaggedPointer()) return; + object_cxxDestructFromClass(obj, obj->ISA()); +} + + +/*********************************************************************** +* object_cxxConstructFromClass. +* Recursively call C++ constructors on obj, starting with base class's +* ctor method (if any) followed by subclasses' ctors (if any), stopping +* at cls's ctor (if any). +* Does not check cls->hasCxxCtor(). The caller should preflight that. +* Returns self if construction succeeded. +* Returns nil if some constructor threw an exception. The exception is +* caught and discarded. Any partial construction is destructed. +* Uses methodListLock and cacheUpdateLock. The caller must hold neither. +* +* .cxx_construct returns id. This really means: +* return self: construction succeeded +* return nil: construction failed because a C++ constructor threw an exception +**********************************************************************/ +id +object_cxxConstructFromClass(id obj, Class cls) +{ + assert(cls->hasCxxCtor()); // required for performance, not correctness + + id (*ctor)(id); + Class supercls; + + supercls = cls->superclass; + + // Call superclasses' ctors first, if any. + if (supercls && supercls->hasCxxCtor()) { + bool ok = object_cxxConstructFromClass(obj, supercls); + if (!ok) return nil; // some superclass's ctor failed - give up + } + + // Find this class's ctor, if any. + ctor = (id(*)(id))lookupMethodInClassAndLoadCache(cls, SEL_cxx_construct); + if (ctor == (id(*)(id))_objc_msgForward_impcache) return obj; // no ctor - ok + + // Call this class's ctor. + if (PrintCxxCtors) { + _objc_inform("CXX: calling C++ constructors for class %s", + cls->nameForLogging()); + } + if ((*ctor)(obj)) return obj; // ctor called and succeeded - ok + + // This class's ctor was called and failed. + // Call superclasses's dtors to clean up. + if (supercls) object_cxxDestructFromClass(obj, supercls); + return nil; +} + + +/*********************************************************************** +* fixupCopiedIvars +* Fix up ARC strong and ARC-style weak variables +* after oldObject was memcpy'd to newObject. +**********************************************************************/ +void fixupCopiedIvars(id newObject, id oldObject) +{ + for (Class cls = oldObject->ISA(); cls; cls = cls->superclass) { + if (cls->hasAutomaticIvars()) { + // Use alignedInstanceStart() because unaligned bytes at the start + // of this class's ivars are not represented in the layout bitmap. + size_t instanceStart = cls->alignedInstanceStart(); + + const uint8_t *strongLayout = class_getIvarLayout(cls); + if (strongLayout) { + id *newPtr = (id *)((char*)newObject + instanceStart); + unsigned char byte; + while ((byte = *strongLayout++)) { + unsigned skips = (byte >> 4); + unsigned scans = (byte & 0x0F); + newPtr += skips; + while (scans--) { + // ensure strong references are properly retained. + id value = *newPtr++; + if (value) objc_retain(value); + } + } + } + + const uint8_t *weakLayout = class_getWeakIvarLayout(cls); + // fix up weak references if any. + if (weakLayout) { + id *newPtr = (id *)((char*)newObject + instanceStart), *oldPtr = (id *)((char*)oldObject + instanceStart); + unsigned char byte; + while ((byte = *weakLayout++)) { + unsigned skips = (byte >> 4); + unsigned weaks = (byte & 0x0F); + newPtr += skips, oldPtr += skips; + while (weaks--) { + objc_copyWeak(newPtr, oldPtr); + ++newPtr, ++oldPtr; + } + } + } + } + } +} + + +/*********************************************************************** +* _class_resolveClassMethod +* Call +resolveClassMethod, looking for a method to be added to class cls. +* cls should be a metaclass. +* Does not check if the method already exists. +**********************************************************************/ +static void _class_resolveClassMethod(Class cls, SEL sel, id inst) +{ + assert(cls->isMetaClass()); + + if (! lookUpImpOrNil(cls, SEL_resolveClassMethod, inst, + NO/*initialize*/, YES/*cache*/, NO/*resolver*/)) + { + // Resolver not implemented. + return; + } + + BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend; + bool resolved = msg(_class_getNonMetaClass(cls, inst), + SEL_resolveClassMethod, sel); + + // Cache the result (good or bad) so the resolver doesn't fire next time. + // +resolveClassMethod adds to self->ISA() a.k.a. cls + IMP imp = lookUpImpOrNil(cls, sel, inst, + NO/*initialize*/, YES/*cache*/, NO/*resolver*/); + + if (resolved && PrintResolving) { + if (imp) { + _objc_inform("RESOLVE: method %c[%s %s] " + "dynamically resolved to %p", + cls->isMetaClass() ? '+' : '-', + cls->nameForLogging(), sel_getName(sel), imp); + } + else { + // Method resolver didn't add anything? + _objc_inform("RESOLVE: +[%s resolveClassMethod:%s] returned YES" + ", but no new implementation of %c[%s %s] was found", + cls->nameForLogging(), sel_getName(sel), + cls->isMetaClass() ? '+' : '-', + cls->nameForLogging(), sel_getName(sel)); + } + } +} + + +/*********************************************************************** +* _class_resolveInstanceMethod +* Call +resolveInstanceMethod, looking for a method to be added to class cls. +* cls may be a metaclass or a non-meta class. +* Does not check if the method already exists. +**********************************************************************/ +static void _class_resolveInstanceMethod(Class cls, SEL sel, id inst) +{ + if (! lookUpImpOrNil(cls->ISA(), SEL_resolveInstanceMethod, cls, + NO/*initialize*/, YES/*cache*/, NO/*resolver*/)) + { + // Resolver not implemented. + return; + } + + BOOL (*msg)(Class, SEL, SEL) = (typeof(msg))objc_msgSend; + bool resolved = msg(cls, SEL_resolveInstanceMethod, sel); + + // Cache the result (good or bad) so the resolver doesn't fire next time. + // +resolveInstanceMethod adds to self a.k.a. cls + IMP imp = lookUpImpOrNil(cls, sel, inst, + NO/*initialize*/, YES/*cache*/, NO/*resolver*/); + + if (resolved && PrintResolving) { + if (imp) { + _objc_inform("RESOLVE: method %c[%s %s] " + "dynamically resolved to %p", + cls->isMetaClass() ? '+' : '-', + cls->nameForLogging(), sel_getName(sel), imp); + } + else { + // Method resolver didn't add anything? + _objc_inform("RESOLVE: +[%s resolveInstanceMethod:%s] returned YES" + ", but no new implementation of %c[%s %s] was found", + cls->nameForLogging(), sel_getName(sel), + cls->isMetaClass() ? '+' : '-', + cls->nameForLogging(), sel_getName(sel)); + } + } +} + + +/*********************************************************************** +* _class_resolveMethod +* Call +resolveClassMethod or +resolveInstanceMethod. +* Returns nothing; any result would be potentially out-of-date already. +* Does not check if the method already exists. +**********************************************************************/ +void _class_resolveMethod(Class cls, SEL sel, id inst) +{ + if (! cls->isMetaClass()) { + // try [cls resolveInstanceMethod:sel] + _class_resolveInstanceMethod(cls, sel, inst); + } + else { + // try [nonMetaClass resolveClassMethod:sel] + // and [cls resolveInstanceMethod:sel] + _class_resolveClassMethod(cls, sel, inst); + if (!lookUpImpOrNil(cls, sel, inst, + NO/*initialize*/, YES/*cache*/, NO/*resolver*/)) + { + _class_resolveInstanceMethod(cls, sel, inst); + } + } +} + + +/*********************************************************************** +* class_getClassMethod. Return the class method for the specified +* class and selector. +**********************************************************************/ +Method class_getClassMethod(Class cls, SEL sel) +{ + if (!cls || !sel) return nil; + + return class_getInstanceMethod(cls->getMeta(), sel); +} + + +/*********************************************************************** +* class_getInstanceVariable. Return the named instance variable. +**********************************************************************/ +Ivar class_getInstanceVariable(Class cls, const char *name) +{ + if (!cls || !name) return nil; + + return _class_getVariable(cls, name); +} + + +/*********************************************************************** +* class_getClassVariable. Return the named class variable. +**********************************************************************/ +Ivar class_getClassVariable(Class cls, const char *name) +{ + if (!cls) return nil; + + return class_getInstanceVariable(cls->ISA(), name); +} + + +/*********************************************************************** +* gdb_objc_class_changed +* Tell gdb that a class changed. Currently used for OBJC2 ivar layouts only +* Does nothing; gdb sets a breakpoint on it. +**********************************************************************/ +BREAKPOINT_FUNCTION( + void gdb_objc_class_changed(Class cls, unsigned long changes, const char *classname) +); + + +/*********************************************************************** +* class_respondsToSelector. +**********************************************************************/ +BOOL class_respondsToMethod(Class cls, SEL sel) +{ + OBJC_WARN_DEPRECATED; + + return class_respondsToSelector(cls, sel); +} + + +BOOL class_respondsToSelector(Class cls, SEL sel) +{ + return class_respondsToSelector_inst(cls, sel, nil); +} + + +// inst is an instance of cls or a subclass thereof, or nil if none is known. +// Non-nil inst is faster in some cases. See lookUpImpOrForward() for details. +bool class_respondsToSelector_inst(Class cls, SEL sel, id inst) +{ + IMP imp; + + if (!sel || !cls) return NO; + + // Avoids +initialize because it historically did so. + // We're not returning a callable IMP anyway. + imp = lookUpImpOrNil(cls, sel, inst, + NO/*initialize*/, YES/*cache*/, YES/*resolver*/); + return bool(imp); +} + + +/*********************************************************************** +* class_getMethodImplementation. +* Returns the IMP that would be invoked if [obj sel] were sent, +* where obj is an instance of class cls. +**********************************************************************/ +IMP class_lookupMethod(Class cls, SEL sel) +{ + OBJC_WARN_DEPRECATED; + + // No one responds to zero! + if (!sel) { + __objc_error(cls, "invalid selector (null)"); + } + + return class_getMethodImplementation(cls, sel); +} + +IMP class_getMethodImplementation(Class cls, SEL sel) +{ + IMP imp; + + if (!cls || !sel) return nil; + + imp = lookUpImpOrNil(cls, sel, nil, + YES/*initialize*/, YES/*cache*/, YES/*resolver*/); + + // Translate forwarding function to C-callable external version + if (!imp) { + return _objc_msgForward; + } + + return imp; +} + +#if SUPPORT_STRET +IMP class_getMethodImplementation_stret(Class cls, SEL sel) +{ + IMP imp = class_getMethodImplementation(cls, sel); + + // Translate forwarding function to struct-returning version + if (imp == (IMP)&_objc_msgForward /* not _internal! */) { + return (IMP)&_objc_msgForward_stret; + } + return imp; +} +#endif + + +/*********************************************************************** +* instrumentObjcMessageSends +**********************************************************************/ +#if !SUPPORT_MESSAGE_LOGGING + +void instrumentObjcMessageSends(BOOL flag) +{ +} + +#else + +bool objcMsgLogEnabled = false; +static int objcMsgLogFD = -1; +static spinlock_t objcMsgLogLock; + +bool logMessageSend(bool isClassMethod, + const char *objectsClass, + const char *implementingClass, + SEL selector) +{ + char buf[ 1024 ]; + + // Create/open the log file + if (objcMsgLogFD == (-1)) + { + snprintf (buf, sizeof(buf), "/tmp/msgSends-%d", (int) getpid ()); + objcMsgLogFD = secure_open (buf, O_WRONLY | O_CREAT, geteuid()); + if (objcMsgLogFD < 0) { + // no log file - disable logging + objcMsgLogEnabled = false; + objcMsgLogFD = -1; + return true; + } + } + + // Make the log entry + snprintf(buf, sizeof(buf), "%c %s %s %s\n", + isClassMethod ? '+' : '-', + objectsClass, + implementingClass, + sel_getName(selector)); + + objcMsgLogLock.lock(); + write (objcMsgLogFD, buf, strlen(buf)); + objcMsgLogLock.unlock(); + + // Tell caller to not cache the method + return false; +} + +void instrumentObjcMessageSends(BOOL flag) +{ + bool enable = flag; + + // Shortcut NOP + if (objcMsgLogEnabled == enable) + return; + + // If enabling, flush all method caches so we get some traces + if (enable) + _objc_flush_caches(Nil); + + // Sync our log file + if (objcMsgLogFD != -1) + fsync (objcMsgLogFD); + + objcMsgLogEnabled = enable; +} + +// SUPPORT_MESSAGE_LOGGING +#endif + + +Class _calloc_class(size_t size) +{ + return (Class) calloc(1, size); +} + +Class class_getSuperclass(Class cls) +{ + if (!cls) return nil; + return cls->superclass; +} + +BOOL class_isMetaClass(Class cls) +{ + if (!cls) return NO; + return cls->isMetaClass(); +} + + +size_t class_getInstanceSize(Class cls) +{ + if (!cls) return 0; + return cls->alignedInstanceSize(); +} + + +/*********************************************************************** +* method_getNumberOfArguments. +**********************************************************************/ +unsigned int method_getNumberOfArguments(Method m) +{ + if (!m) return 0; + return encoding_getNumberOfArguments(method_getTypeEncoding(m)); +} + + +void method_getReturnType(Method m, char *dst, size_t dst_len) +{ + encoding_getReturnType(method_getTypeEncoding(m), dst, dst_len); +} + + +char * method_copyReturnType(Method m) +{ + return encoding_copyReturnType(method_getTypeEncoding(m)); +} + + +void method_getArgumentType(Method m, unsigned int index, + char *dst, size_t dst_len) +{ + encoding_getArgumentType(method_getTypeEncoding(m), + index, dst, dst_len); +} + + +char * method_copyArgumentType(Method m, unsigned int index) +{ + return encoding_copyArgumentType(method_getTypeEncoding(m), index); +} + + +/*********************************************************************** +* _objc_constructOrFree +* Call C++ constructors, and free() if they fail. +* bytes->isa must already be set. +* cls must have cxx constructors. +* Returns the object, or nil. +**********************************************************************/ +id +_objc_constructOrFree(id bytes, Class cls) +{ + assert(cls->hasCxxCtor()); // for performance, not correctness + + id obj = object_cxxConstructFromClass(bytes, cls); + if (!obj) free(bytes); + + return obj; +} + + +/*********************************************************************** +* _class_createInstancesFromZone +* Batch-allocating version of _class_createInstanceFromZone. +* Attempts to allocate num_requested objects, each with extraBytes. +* Returns the number of allocated objects (possibly zero), with +* the allocated pointers in *results. +**********************************************************************/ +unsigned +_class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, + id *results, unsigned num_requested) +{ + unsigned num_allocated; + if (!cls) return 0; + + size_t size = cls->instanceSize(extraBytes); + + num_allocated = + malloc_zone_batch_malloc((malloc_zone_t *)(zone ? zone : malloc_default_zone()), + size, (void**)results, num_requested); + for (unsigned i = 0; i < num_allocated; i++) { + bzero(results[i], size); + } + + // Construct each object, and delete any that fail construction. + + unsigned shift = 0; + bool ctor = cls->hasCxxCtor(); + for (unsigned i = 0; i < num_allocated; i++) { + id obj = results[i]; + obj->initIsa(cls); // fixme allow nonpointer + if (ctor) obj = _objc_constructOrFree(obj, cls); + + if (obj) { + results[i-shift] = obj; + } else { + shift++; + } + } + + return num_allocated - shift; +} + + +/*********************************************************************** +* inform_duplicate. Complain about duplicate class implementations. +**********************************************************************/ +void +inform_duplicate(const char *name, Class oldCls, Class newCls) +{ +#if TARGET_OS_WIN32 + (DebugDuplicateClasses ? _objc_fatal : _objc_inform) + ("Class %s is implemented in two different images.", name); +#else + const header_info *oldHeader = _headerForClass(oldCls); + const header_info *newHeader = _headerForClass(newCls); + const char *oldName = oldHeader ? oldHeader->fname() : "??"; + const char *newName = newHeader ? newHeader->fname() : "??"; + + (DebugDuplicateClasses ? _objc_fatal : _objc_inform) + ("Class %s is implemented in both %s (%p) and %s (%p). " + "One of the two will be used. Which one is undefined.", + name, oldName, oldCls, newName, newCls); +#endif +} + + +const char * +copyPropertyAttributeString(const objc_property_attribute_t *attrs, + unsigned int count) +{ + char *result; + unsigned int i; + if (count == 0) return strdup(""); + +#if DEBUG + // debug build: sanitize input + for (i = 0; i < count; i++) { + assert(attrs[i].name); + assert(strlen(attrs[i].name) > 0); + assert(! strchr(attrs[i].name, ',')); + assert(! strchr(attrs[i].name, '"')); + if (attrs[i].value) assert(! strchr(attrs[i].value, ',')); + } +#endif + + size_t len = 0; + for (i = 0; i < count; i++) { + if (attrs[i].value) { + size_t namelen = strlen(attrs[i].name); + if (namelen > 1) namelen += 2; // long names get quoted + len += namelen + strlen(attrs[i].value) + 1; + } + } + + result = (char *)malloc(len + 1); + char *s = result; + for (i = 0; i < count; i++) { + if (attrs[i].value) { + size_t namelen = strlen(attrs[i].name); + if (namelen > 1) { + s += sprintf(s, "\"%s\"%s,", attrs[i].name, attrs[i].value); + } else { + s += sprintf(s, "%s%s,", attrs[i].name, attrs[i].value); + } + } + } + + // remove trailing ',' if any + if (s > result) s[-1] = '\0'; + + return result; +} + +/* + Property attribute string format: + + - Comma-separated name-value pairs. + - Name and value may not contain , + - Name may not contain " + - Value may be empty + - Name is single char, value follows + - OR Name is double-quoted string of 2+ chars, value follows + + Grammar: + attribute-string: \0 + attribute-string: name-value-pair (',' name-value-pair)* + name-value-pair: unquoted-name optional-value + name-value-pair: quoted-name optional-value + unquoted-name: [^",] + quoted-name: '"' [^",]{2,} '"' + optional-value: [^,]* + +*/ +static unsigned int +iteratePropertyAttributes(const char *attrs, + bool (*fn)(unsigned int index, + void *ctx1, void *ctx2, + const char *name, size_t nlen, + const char *value, size_t vlen), + void *ctx1, void *ctx2) +{ + if (!attrs) return 0; + +#if DEBUG + const char *attrsend = attrs + strlen(attrs); +#endif + unsigned int attrcount = 0; + + while (*attrs) { + // Find the next comma-separated attribute + const char *start = attrs; + const char *end = start + strcspn(attrs, ","); + + // Move attrs past this attribute and the comma (if any) + attrs = *end ? end+1 : end; + + assert(attrs <= attrsend); + assert(start <= attrsend); + assert(end <= attrsend); + + // Skip empty attribute + if (start == end) continue; + + // Process one non-empty comma-free attribute [start,end) + const char *nameStart; + const char *nameEnd; + + assert(start < end); + assert(*start); + if (*start != '\"') { + // single-char short name + nameStart = start; + nameEnd = start+1; + start++; + } + else { + // double-quoted long name + nameStart = start+1; + nameEnd = nameStart + strcspn(nameStart, "\","); + start++; // leading quote + start += nameEnd - nameStart; // name + if (*start == '\"') start++; // trailing quote, if any + } + + // Process one possibly-empty comma-free attribute value [start,end) + const char *valueStart; + const char *valueEnd; + + assert(start <= end); + + valueStart = start; + valueEnd = end; + + bool more = (*fn)(attrcount, ctx1, ctx2, + nameStart, nameEnd-nameStart, + valueStart, valueEnd-valueStart); + attrcount++; + if (!more) break; + } + + return attrcount; +} + + +static bool +copyOneAttribute(unsigned int index, void *ctxa, void *ctxs, + const char *name, size_t nlen, const char *value, size_t vlen) +{ + objc_property_attribute_t **ap = (objc_property_attribute_t**)ctxa; + char **sp = (char **)ctxs; + + objc_property_attribute_t *a = *ap; + char *s = *sp; + + a->name = s; + memcpy(s, name, nlen); + s += nlen; + *s++ = '\0'; + + a->value = s; + memcpy(s, value, vlen); + s += vlen; + *s++ = '\0'; + + a++; + + *ap = a; + *sp = s; + + return YES; +} + + +objc_property_attribute_t * +copyPropertyAttributeList(const char *attrs, unsigned int *outCount) +{ + if (!attrs) { + if (outCount) *outCount = 0; + return nil; + } + + // Result size: + // number of commas plus 1 for the attributes (upper bound) + // plus another attribute for the attribute array terminator + // plus strlen(attrs) for name/value string data (upper bound) + // plus count*2 for the name/value string terminators (upper bound) + unsigned int attrcount = 1; + const char *s; + for (s = attrs; s && *s; s++) { + if (*s == ',') attrcount++; + } + + size_t size = + attrcount * sizeof(objc_property_attribute_t) + + sizeof(objc_property_attribute_t) + + strlen(attrs) + + attrcount * 2; + objc_property_attribute_t *result = (objc_property_attribute_t *) + calloc(size, 1); + + objc_property_attribute_t *ra = result; + char *rs = (char *)(ra+attrcount+1); + + attrcount = iteratePropertyAttributes(attrs, copyOneAttribute, &ra, &rs); + + assert((uint8_t *)(ra+1) <= (uint8_t *)result+size); + assert((uint8_t *)rs <= (uint8_t *)result+size); + + if (attrcount == 0) { + free(result); + result = nil; + } + + if (outCount) *outCount = attrcount; + return result; +} + + +static bool +findOneAttribute(unsigned int index, void *ctxa, void *ctxs, + const char *name, size_t nlen, const char *value, size_t vlen) +{ + const char *query = (char *)ctxa; + char **resultp = (char **)ctxs; + + if (strlen(query) == nlen && 0 == strncmp(name, query, nlen)) { + char *result = (char *)calloc(vlen+1, 1); + memcpy(result, value, vlen); + result[vlen] = '\0'; + *resultp = result; + return NO; + } + + return YES; +} + +char *copyPropertyAttributeValue(const char *attrs, const char *name) +{ + char *result = nil; + + iteratePropertyAttributes(attrs, findOneAttribute, (void*)name, &result); + + return result; +} diff --git a/runtime/objc-config.h b/runtime/objc-config.h new file mode 100644 index 0000000..979b467 --- /dev/null +++ b/runtime/objc-config.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 1999-2002, 2005-2008 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_CONFIG_H_ +#define _OBJC_CONFIG_H_ + +#include + +// Avoid the !NDEBUG double negative. +#if !NDEBUG +# define DEBUG 1 +#else +# define DEBUG 0 +#endif + +// Define SUPPORT_GC_COMPAT=1 to enable compatibility where GC once was. +// OBJC_NO_GC and OBJC_NO_GC_API in objc-api.h mean something else. +#if !TARGET_OS_OSX +# define SUPPORT_GC_COMPAT 0 +#else +# define SUPPORT_GC_COMPAT 1 +#endif + +// Define SUPPORT_ZONES=1 to enable malloc zone support in NXHashTable. +#if !TARGET_OS_OSX +# define SUPPORT_ZONES 0 +#else +# define SUPPORT_ZONES 1 +#endif + +// Define SUPPORT_MOD=1 to use the mod operator in NXHashTable and objc-sel-set +#if defined(__arm__) +# define SUPPORT_MOD 0 +#else +# define SUPPORT_MOD 1 +#endif + +// Define SUPPORT_PREOPT=1 to enable dyld shared cache optimizations +#if TARGET_OS_WIN32 || TARGET_OS_SIMULATOR +# define SUPPORT_PREOPT 0 +#else +# define SUPPORT_PREOPT 1 +#endif + +// Define SUPPORT_TAGGED_POINTERS=1 to enable tagged pointer objects +// Be sure to edit tagged pointer SPI in objc-internal.h as well. +#if !(__OBJC2__ && __LP64__) +# define SUPPORT_TAGGED_POINTERS 0 +#else +# define SUPPORT_TAGGED_POINTERS 1 +#endif + +// Define SUPPORT_MSB_TAGGED_POINTERS to use the MSB +// as the tagged pointer marker instead of the LSB. +// Be sure to edit tagged pointer SPI in objc-internal.h as well. +#if !SUPPORT_TAGGED_POINTERS || !TARGET_OS_IPHONE +# define SUPPORT_MSB_TAGGED_POINTERS 0 +#else +# define SUPPORT_MSB_TAGGED_POINTERS 1 +#endif + +// Define SUPPORT_INDEXED_ISA=1 on platforms that store the class in the isa +// field as an index into a class table. +// Note, keep this in sync with any .s files which also define it. +// Be sure to edit objc-abi.h as well. +#if __ARM_ARCH_7K__ >= 2 +# define SUPPORT_INDEXED_ISA 1 +#else +# define SUPPORT_INDEXED_ISA 0 +#endif + +// Define SUPPORT_PACKED_ISA=1 on platforms that store the class in the isa +// field as a maskable pointer with other data around it. +#if (!__LP64__ || TARGET_OS_WIN32 || TARGET_OS_SIMULATOR) +# define SUPPORT_PACKED_ISA 0 +#else +# define SUPPORT_PACKED_ISA 1 +#endif + +// Define SUPPORT_NONPOINTER_ISA=1 on any platform that may store something +// in the isa field that is not a raw pointer. +#if !SUPPORT_INDEXED_ISA && !SUPPORT_PACKED_ISA +# define SUPPORT_NONPOINTER_ISA 0 +#else +# define SUPPORT_NONPOINTER_ISA 1 +#endif + +// Define SUPPORT_FIXUP=1 to repair calls sites for fixup dispatch. +// Fixup messaging itself is no longer supported. +// Be sure to edit objc-abi.h as well (objc_msgSend*_fixup) +#if !(defined(__x86_64__) && (TARGET_OS_OSX || TARGET_OS_SIMULATOR)) +# define SUPPORT_FIXUP 0 +#else +# define SUPPORT_FIXUP 1 +#endif + +// Define SUPPORT_ZEROCOST_EXCEPTIONS to use "zero-cost" exceptions for OBJC2. +// Be sure to edit objc-exception.h as well (objc_add/removeExceptionHandler) +#if !__OBJC2__ || (defined(__arm__) && __USING_SJLJ_EXCEPTIONS__) +# define SUPPORT_ZEROCOST_EXCEPTIONS 0 +#else +# define SUPPORT_ZEROCOST_EXCEPTIONS 1 +#endif + +// Define SUPPORT_ALT_HANDLERS if you're using zero-cost exceptions +// but also need to support AppKit's alt-handler scheme +// Be sure to edit objc-exception.h as well (objc_add/removeExceptionHandler) +#if !SUPPORT_ZEROCOST_EXCEPTIONS || TARGET_OS_IPHONE || TARGET_OS_EMBEDDED +# define SUPPORT_ALT_HANDLERS 0 +#else +# define SUPPORT_ALT_HANDLERS 1 +#endif + +// Define SUPPORT_RETURN_AUTORELEASE to optimize autoreleased return values +#if TARGET_OS_WIN32 +# define SUPPORT_RETURN_AUTORELEASE 0 +#else +# define SUPPORT_RETURN_AUTORELEASE 1 +#endif + +// Define SUPPORT_STRET on architectures that need separate struct-return ABI. +#if defined(__arm64__) +# define SUPPORT_STRET 0 +#else +# define SUPPORT_STRET 1 +#endif + +// Define SUPPORT_MESSAGE_LOGGING to enable NSObjCMessageLoggingEnabled +#if TARGET_OS_WIN32 || TARGET_OS_EMBEDDED +# define SUPPORT_MESSAGE_LOGGING 0 +#else +# define SUPPORT_MESSAGE_LOGGING 1 +#endif + +// Define SUPPORT_QOS_HACK to work around deadlocks due to QoS bugs. +#if !__OBJC2__ || TARGET_OS_WIN32 +# define SUPPORT_QOS_HACK 0 +#else +# define SUPPORT_QOS_HACK 1 +#endif + +// OBJC_INSTRUMENTED controls whether message dispatching is dynamically +// monitored. Monitoring introduces substantial overhead. +// NOTE: To define this condition, do so in the build command, NOT by +// uncommenting the line here. This is because objc-class.h heeds this +// condition, but objc-class.h can not #include this file (objc-config.h) +// because objc-class.h is public and objc-config.h is not. +//#define OBJC_INSTRUMENTED + +#endif diff --git a/runtime/objc-env.h b/runtime/objc-env.h new file mode 100644 index 0000000..7eb0afa --- /dev/null +++ b/runtime/objc-env.h @@ -0,0 +1,42 @@ +// -*- truncate-lines: t; -*- + +// OPTION(var, env, help) + +OPTION( PrintImages, OBJC_PRINT_IMAGES, "log image and library names as they are loaded") +OPTION( PrintImageTimes, OBJC_PRINT_IMAGE_TIMES, "measure duration of image loading steps") +OPTION( PrintLoading, OBJC_PRINT_LOAD_METHODS, "log calls to class and category +load methods") +OPTION( PrintInitializing, OBJC_PRINT_INITIALIZE_METHODS, "log calls to class +initialize methods") +OPTION( PrintResolving, OBJC_PRINT_RESOLVED_METHODS, "log methods created by +resolveClassMethod: and +resolveInstanceMethod:") +OPTION( PrintConnecting, OBJC_PRINT_CLASS_SETUP, "log progress of class and category setup") +OPTION( PrintProtocols, OBJC_PRINT_PROTOCOL_SETUP, "log progress of protocol setup") +OPTION( PrintIvars, OBJC_PRINT_IVAR_SETUP, "log processing of non-fragile ivars") +OPTION( PrintVtables, OBJC_PRINT_VTABLE_SETUP, "log processing of class vtables") +OPTION( PrintVtableImages, OBJC_PRINT_VTABLE_IMAGES, "print vtable images showing overridden methods") +OPTION( PrintCaches, OBJC_PRINT_CACHE_SETUP, "log processing of method caches") +OPTION( PrintFuture, OBJC_PRINT_FUTURE_CLASSES, "log use of future classes for toll-free bridging") +OPTION( PrintPreopt, OBJC_PRINT_PREOPTIMIZATION, "log preoptimization courtesy of dyld shared cache") +OPTION( PrintCxxCtors, OBJC_PRINT_CXX_CTORS, "log calls to C++ ctors and dtors for instance variables") +OPTION( PrintExceptions, OBJC_PRINT_EXCEPTIONS, "log exception handling") +OPTION( PrintExceptionThrow, OBJC_PRINT_EXCEPTION_THROW, "log backtrace of every objc_exception_throw()") +OPTION( PrintAltHandlers, OBJC_PRINT_ALT_HANDLERS, "log processing of exception alt handlers") +OPTION( PrintReplacedMethods, OBJC_PRINT_REPLACED_METHODS, "log methods replaced by category implementations") +OPTION( PrintDeprecation, OBJC_PRINT_DEPRECATION_WARNINGS, "warn about calls to deprecated runtime functions") +OPTION( PrintPoolHiwat, OBJC_PRINT_POOL_HIGHWATER, "log high-water marks for autorelease pools") +OPTION( PrintCustomRR, OBJC_PRINT_CUSTOM_RR, "log classes with un-optimized custom retain/release methods") +OPTION( PrintCustomAWZ, OBJC_PRINT_CUSTOM_AWZ, "log classes with un-optimized custom allocWithZone methods") +OPTION( PrintRawIsa, OBJC_PRINT_RAW_ISA, "log classes that require raw pointer isa fields") + +OPTION( DebugUnload, OBJC_DEBUG_UNLOAD, "warn about poorly-behaving bundles when unloaded") +OPTION( DebugFragileSuperclasses, OBJC_DEBUG_FRAGILE_SUPERCLASSES, "warn about subclasses that may have been broken by subsequent changes to superclasses") +OPTION( DebugNilSync, OBJC_DEBUG_NIL_SYNC, "warn about @synchronized(nil), which does no synchronization") +OPTION( DebugNonFragileIvars, OBJC_DEBUG_NONFRAGILE_IVARS, "capriciously rearrange non-fragile ivars") +OPTION( DebugAltHandlers, OBJC_DEBUG_ALT_HANDLERS, "record more info about bad alt handler use") +OPTION( DebugMissingPools, OBJC_DEBUG_MISSING_POOLS, "warn about autorelease with no pool in place, which may be a leak") +OPTION( DebugPoolAllocation, OBJC_DEBUG_POOL_ALLOCATION, "halt when autorelease pools are popped out of order, and allow heap debuggers to track autorelease pools") +OPTION( DebugDuplicateClasses, OBJC_DEBUG_DUPLICATE_CLASSES, "halt when multiple classes with the same name are present") +OPTION( DebugDontCrash, OBJC_DEBUG_DONT_CRASH, "halt the process by exiting instead of crashing") + +OPTION( DisableVtables, OBJC_DISABLE_VTABLES, "disable vtable dispatch") +OPTION( DisablePreopt, OBJC_DISABLE_PREOPTIMIZATION, "disable preoptimization courtesy of dyld shared cache") +OPTION( DisableTaggedPointers, OBJC_DISABLE_TAGGED_POINTERS, "disable tagged pointer optimization of NSNumber et al.") +OPTION( DisableNonpointerIsa, OBJC_DISABLE_NONPOINTER_ISA, "disable non-pointer isa fields") diff --git a/runtime/objc-errors.mm b/runtime/objc-errors.mm new file mode 100644 index 0000000..4c426b0 --- /dev/null +++ b/runtime/objc-errors.mm @@ -0,0 +1,306 @@ +/* + * Copyright (c) 1999-2003, 2005-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * objc-errors.m + * Copyright 1988-2001, NeXT Software, Inc., Apple Computer, Inc. + */ + +#include "objc-private.h" + +#if TARGET_OS_WIN32 + +#include + +void _objc_inform_on_crash(const char *fmt, ...) +{ +} + +void _objc_inform(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + _vcprintf(fmt, args); + va_end(args); + _cprintf("\n"); +} + +void _objc_fatal(const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + _vcprintf(fmt, args); + va_end(args); + _cprintf("\n"); + + abort(); +} + +void __objc_error(id rcv, const char *fmt, ...) +{ + va_list args; + va_start(args, fmt); + _vcprintf(fmt, args); + va_end(args); + + abort(); +} + +void _objc_error(id rcv, const char *fmt, va_list args) +{ + _vcprintf(fmt, args); + + abort(); +} + +#else + +#include <_simple.h> + +OBJC_EXPORT void (*_error)(id, const char *, va_list); + +// Return true if c is a UTF8 continuation byte +static bool isUTF8Continuation(char c) +{ + return (c & 0xc0) == 0x80; // continuation byte is 0b10xxxxxx +} + +// Add "message" to any forthcoming crash log. +static mutex_t crashlog_lock; +static void _objc_crashlog(const char *message) +{ + char *newmsg; + +#if 0 + { + // for debugging at BOOT time. + extern char **_NSGetProgname(void); + FILE *crashlog = fopen("/_objc_crash.log", "a"); + setbuf(crashlog, NULL); + fprintf(crashlog, "[%s] %s\n", *_NSGetProgname(), message); + fclose(crashlog); + sync(); + } +#endif + + mutex_locker_t lock(crashlog_lock); + + char *oldmsg = (char *)CRGetCrashLogMessage(); + size_t oldlen; + const size_t limit = 8000; + + if (!oldmsg) { + newmsg = strdup(message); + } else if ((oldlen = strlen(oldmsg)) > limit) { + // limit total length by dropping old contents + char *truncmsg = oldmsg + oldlen - limit; + // advance past partial UTF-8 bytes + while (isUTF8Continuation(*truncmsg)) truncmsg++; + asprintf(&newmsg, "... %s\n%s", truncmsg, message); + } else { + asprintf(&newmsg, "%s\n%s", oldmsg, message); + } + + if (newmsg) { + // Strip trailing newline + char *c = &newmsg[strlen(newmsg)-1]; + if (*c == '\n') *c = '\0'; + + if (oldmsg) free(oldmsg); + CRSetCrashLogMessage(newmsg); + } +} + +// Returns true if logs should be sent to stderr as well as syslog. +// Copied from CFUtilities.c +static bool also_do_stderr(void) +{ + struct stat st; + int ret = fstat(STDERR_FILENO, &st); + if (ret < 0) return false; + mode_t m = st.st_mode & S_IFMT; + if (m == S_IFREG || m == S_IFSOCK || m == S_IFIFO || m == S_IFCHR) { + return true; + } + return false; +} + +// Print "message" to the console. +static void _objc_syslog(const char *message) +{ + _simple_asl_log(ASL_LEVEL_ERR, nil, message); + + if (also_do_stderr()) { + write(STDERR_FILENO, message, strlen(message)); + } +} + +/* + * _objc_error is the default *_error handler. + */ +#if !__OBJC2__ +// used by ExceptionHandling.framework +#endif +__attribute__((noreturn)) +void _objc_error(id self, const char *fmt, va_list ap) +{ + char *buf; + vasprintf(&buf, fmt, ap); + _objc_fatal("%s: %s", object_getClassName(self), buf); +} + +/* + * this routine handles errors that involve an object (or class). + */ +void __objc_error(id rcv, const char *fmt, ...) +{ + va_list vp; + + va_start(vp,fmt); +#if !__OBJC2__ + (*_error)(rcv, fmt, vp); +#endif + _objc_error (rcv, fmt, vp); /* In case (*_error)() returns. */ + va_end(vp); +} + +static __attribute__((noreturn)) +void _objc_fatalv(uint64_t reason, uint64_t flags, const char *fmt, va_list ap) +{ + char *buf1; + vasprintf(&buf1, fmt, ap); + + char *buf2; + asprintf(&buf2, "objc[%d]: %s\n", getpid(), buf1); + _objc_syslog(buf2); + + if (DebugDontCrash) { + char *buf3; + asprintf(&buf3, "objc[%d]: HALTED\n", getpid()); + _objc_syslog(buf3); + _Exit(1); + } + else { + abort_with_reason(OS_REASON_OBJC, reason, buf1, flags); + } +} + +void _objc_fatal_with_reason(uint64_t reason, uint64_t flags, + const char *fmt, ...) +{ + va_list ap; + va_start(ap, fmt); + _objc_fatalv(reason, flags, fmt, ap); +} + +void _objc_fatal(const char *fmt, ...) +{ + va_list ap; + va_start(ap,fmt); + _objc_fatalv(OBJC_EXIT_REASON_UNSPECIFIED, + OS_REASON_FLAG_ONE_TIME_FAILURE, + fmt, ap); +} + +/* + * this routine handles soft runtime errors...like not being able + * add a category to a class (because it wasn't linked in). + */ +void _objc_inform(const char *fmt, ...) +{ + va_list ap; + char *buf1; + char *buf2; + + va_start (ap,fmt); + vasprintf(&buf1, fmt, ap); + va_end (ap); + + asprintf(&buf2, "objc[%d]: %s\n", getpid(), buf1); + _objc_syslog(buf2); + + free(buf2); + free(buf1); +} + + +/* + * Like _objc_inform(), but prints the message only in any + * forthcoming crash log, not to the console. + */ +void _objc_inform_on_crash(const char *fmt, ...) +{ + va_list ap; + char *buf1; + char *buf2; + + va_start (ap,fmt); + vasprintf(&buf1, fmt, ap); + va_end (ap); + + asprintf(&buf2, "objc[%d]: %s\n", getpid(), buf1); + _objc_crashlog(buf2); + + free(buf2); + free(buf1); +} + + +/* + * Like calling both _objc_inform and _objc_inform_on_crash. + */ +void _objc_inform_now_and_on_crash(const char *fmt, ...) +{ + va_list ap; + char *buf1; + char *buf2; + + va_start (ap,fmt); + vasprintf(&buf1, fmt, ap); + va_end (ap); + + asprintf(&buf2, "objc[%d]: %s\n", getpid(), buf1); + _objc_crashlog(buf2); + _objc_syslog(buf2); + + free(buf2); + free(buf1); +} + +#endif + + +BREAKPOINT_FUNCTION( + void _objc_warn_deprecated(void) +); + +void _objc_inform_deprecated(const char *oldf, const char *newf) +{ + if (PrintDeprecation) { + if (newf) { + _objc_inform("The function %s is obsolete. Use %s instead. Set a breakpoint on _objc_warn_deprecated to find the culprit.", oldf, newf); + } else { + _objc_inform("The function %s is obsolete. Do not use it. Set a breakpoint on _objc_warn_deprecated to find the culprit.", oldf); + } + } + _objc_warn_deprecated(); +} diff --git a/runtime/objc-exception.h b/runtime/objc-exception.h new file mode 100644 index 0000000..c67b92a --- /dev/null +++ b/runtime/objc-exception.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2002-2003, 2006-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __OBJC_EXCEPTION_H_ +#define __OBJC_EXCEPTION_H_ + +#include +#include + +#if !__OBJC2__ + +// compiler reserves a setjmp buffer + 4 words as localExceptionData + +OBJC_EXPORT void objc_exception_throw(id exception) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_exception_try_enter(void *localExceptionData) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_exception_try_exit(void *localExceptionData) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT id objc_exception_extract(void *localExceptionData) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT int objc_exception_match(Class exceptionClass, id exception) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + + +typedef struct { + int version; + void (*throw_exc)(id); // version 0 + void (*try_enter)(void *); // version 0 + void (*try_exit)(void *); // version 0 + id (*extract)(void *); // version 0 + int (*match)(Class, id); // version 0 +} objc_exception_functions_t; + +// get table; version tells how many +OBJC_EXPORT void objc_exception_get_functions(objc_exception_functions_t *table) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +// set table +OBJC_EXPORT void objc_exception_set_functions(objc_exception_functions_t *table) + __OSX_AVAILABLE(10.3) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + + +// !__OBJC2__ +#else +// __OBJC2__ + +typedef id (*objc_exception_preprocessor)(id exception); +typedef int (*objc_exception_matcher)(Class catch_type, id exception); +typedef void (*objc_uncaught_exception_handler)(id exception); +typedef void (*objc_exception_handler)(id unused, void *context); + +/** + * Throw a runtime exception. This function is inserted by the compiler + * where \c @throw would otherwise be. + * + * @param exception The exception to be thrown. + */ +OBJC_EXPORT void objc_exception_throw(id exception) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_exception_rethrow(void) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT id objc_begin_catch(void *exc_buf) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_end_catch(void) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT void objc_terminate(void) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); + +OBJC_EXPORT objc_exception_preprocessor objc_setExceptionPreprocessor(objc_exception_preprocessor fn) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT objc_exception_matcher objc_setExceptionMatcher(objc_exception_matcher fn) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); +OBJC_EXPORT objc_uncaught_exception_handler objc_setUncaughtExceptionHandler(objc_uncaught_exception_handler fn) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +// Not for iOS. +OBJC_EXPORT uintptr_t objc_addExceptionHandler(objc_exception_handler fn, void *context) + __OSX_AVAILABLE(10.5) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT void objc_removeExceptionHandler(uintptr_t token) + __OSX_AVAILABLE(10.5) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +// __OBJC2__ +#endif + +#endif // __OBJC_EXCEPTION_H_ + diff --git a/runtime/objc-exception.mm b/runtime/objc-exception.mm new file mode 100644 index 0000000..d6b1d83 --- /dev/null +++ b/runtime/objc-exception.mm @@ -0,0 +1,1396 @@ +/* + * Copyright (c) 2002-2007 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#if !__OBJC2__ + +/*********************************************************************** +* 32-bit implementation +**********************************************************************/ + +#include "objc-private.h" +#include +#include +#include + +#include "objc-exception.h" + +static objc_exception_functions_t xtab; + +// forward declaration +static void set_default_handlers(); + + +/* + * Exported functions + */ + +// get table; version tells how many +void objc_exception_get_functions(objc_exception_functions_t *table) { + // only version 0 supported at this point + if (table && table->version == 0) + *table = xtab; +} + +// set table +void objc_exception_set_functions(objc_exception_functions_t *table) { + // only version 0 supported at this point + if (table && table->version == 0) + xtab = *table; +} + +/* + * The following functions are + * synthesized by the compiler upon encountering language constructs + */ + +void objc_exception_throw(id exception) { + if (!xtab.throw_exc) { + set_default_handlers(); + } + + if (PrintExceptionThrow) { + _objc_inform("EXCEPTIONS: throwing %p (%s)", + (void*)exception, object_getClassName(exception)); + void* callstack[500]; + int frameCount = backtrace(callstack, 500); + backtrace_symbols_fd(callstack, frameCount, fileno(stderr)); + } + + OBJC_RUNTIME_OBJC_EXCEPTION_THROW(exception); // dtrace probe to log throw activity. + xtab.throw_exc(exception); + _objc_fatal("objc_exception_throw failed"); +} + +void objc_exception_try_enter(void *localExceptionData) { + if (!xtab.throw_exc) { + set_default_handlers(); + } + xtab.try_enter(localExceptionData); +} + + +void objc_exception_try_exit(void *localExceptionData) { + if (!xtab.throw_exc) { + set_default_handlers(); + } + xtab.try_exit(localExceptionData); +} + + +id objc_exception_extract(void *localExceptionData) { + if (!xtab.throw_exc) { + set_default_handlers(); + } + return xtab.extract(localExceptionData); +} + + +int objc_exception_match(Class exceptionClass, id exception) { + if (!xtab.throw_exc) { + set_default_handlers(); + } + return xtab.match(exceptionClass, exception); +} + + +// quick and dirty exception handling code +// default implementation - mostly a toy for use outside/before Foundation +// provides its implementation +// Perhaps the default implementation should just complain loudly and quit + + +extern void _objc_inform(const char *fmt, ...); + +typedef struct { jmp_buf buf; void *pointers[4]; } LocalData_t; + +typedef struct _threadChain { + LocalData_t *topHandler; + objc_thread_t perThreadID; + struct _threadChain *next; +} + ThreadChainLink_t; + +static ThreadChainLink_t ThreadChainLink; + +static ThreadChainLink_t *getChainLink() { + // follow links until thread_self() found (someday) XXX + objc_thread_t self = thread_self(); + ThreadChainLink_t *walker = &ThreadChainLink; + while (walker->perThreadID != self) { + if (walker->next != nil) { + walker = walker->next; + continue; + } + // create a new one + // XXX not thread safe (!) + // XXX Also, we don't register to deallocate on thread death + walker->next = (ThreadChainLink_t *)malloc(sizeof(ThreadChainLink_t)); + walker = walker->next; + walker->next = nil; + walker->topHandler = nil; + walker->perThreadID = self; + } + return walker; +} + +static void default_try_enter(void *localExceptionData) { + LocalData_t *data = (LocalData_t *)localExceptionData; + ThreadChainLink_t *chainLink = getChainLink(); + data->pointers[1] = chainLink->topHandler; + chainLink->topHandler = data; + if (PrintExceptions) _objc_inform("EXCEPTIONS: entered try block %p\n", chainLink->topHandler); +} + +static void default_throw(id value) { + ThreadChainLink_t *chainLink = getChainLink(); + LocalData_t *led; + if (value == nil) { + if (PrintExceptions) _objc_inform("EXCEPTIONS: objc_exception_throw with nil value\n"); + return; + } + if (chainLink == nil) { + if (PrintExceptions) _objc_inform("EXCEPTIONS: No handler in place!\n"); + return; + } + if (PrintExceptions) _objc_inform("EXCEPTIONS: exception thrown, going to handler block %p\n", chainLink->topHandler); + led = chainLink->topHandler; + chainLink->topHandler = (LocalData_t *) + led->pointers[1]; // pop top handler + led->pointers[0] = value; // store exception that is thrown +#if TARGET_OS_WIN32 + longjmp(led->buf, 1); +#else + _longjmp(led->buf, 1); +#endif +} + +static void default_try_exit(void *led) { + ThreadChainLink_t *chainLink = getChainLink(); + if (!chainLink || led != chainLink->topHandler) { + if (PrintExceptions) _objc_inform("EXCEPTIONS: *** mismatched try block exit handlers\n"); + return; + } + if (PrintExceptions) _objc_inform("EXCEPTIONS: removing try block handler %p\n", chainLink->topHandler); + chainLink->topHandler = (LocalData_t *) + chainLink->topHandler->pointers[1]; // pop top handler +} + +static id default_extract(void *localExceptionData) { + LocalData_t *led = (LocalData_t *)localExceptionData; + return (id)led->pointers[0]; +} + +static int default_match(Class exceptionClass, id exception) { + //return [exception isKindOfClass:exceptionClass]; + Class cls; + for (cls = exception->getIsa(); nil != cls; cls = cls->superclass) + if (cls == exceptionClass) return 1; + return 0; +} + +static void set_default_handlers() { + objc_exception_functions_t default_functions = { + 0, default_throw, default_try_enter, default_try_exit, default_extract, default_match }; + + // should this always print? + if (PrintExceptions) _objc_inform("EXCEPTIONS: *** Setting default (non-Foundation) exception mechanism\n"); + objc_exception_set_functions(&default_functions); +} + + +void exception_init(void) +{ + // nothing to do +} + +void _destroyAltHandlerList(struct alt_handler_list *list) +{ + // nothing to do +} + + +// !__OBJC2__ +#else +// __OBJC2__ + +/*********************************************************************** +* 64-bit implementation. +**********************************************************************/ + +#include "objc-private.h" +#include +#include +#include + +// unwind library types and functions +// Mostly adapted from Itanium C++ ABI: Exception Handling +// http://www.codesourcery.com/cxx-abi/abi-eh.html + +struct _Unwind_Exception; +struct _Unwind_Context; + +typedef int _Unwind_Action; +enum : _Unwind_Action { + _UA_SEARCH_PHASE = 1, + _UA_CLEANUP_PHASE = 2, + _UA_HANDLER_FRAME = 4, + _UA_FORCE_UNWIND = 8 +}; + +typedef int _Unwind_Reason_Code; +enum : _Unwind_Reason_Code { + _URC_NO_REASON = 0, + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_FATAL_PHASE2_ERROR = 2, + _URC_FATAL_PHASE1_ERROR = 3, + _URC_NORMAL_STOP = 4, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8 +}; + +struct dwarf_eh_bases +{ + uintptr_t tbase; + uintptr_t dbase; + uintptr_t func; +}; + +OBJC_EXTERN uintptr_t _Unwind_GetIP (struct _Unwind_Context *); +OBJC_EXTERN uintptr_t _Unwind_GetCFA (struct _Unwind_Context *); +OBJC_EXTERN uintptr_t _Unwind_GetLanguageSpecificData(struct _Unwind_Context *); + + +// C++ runtime types and functions +// copied from cxxabi.h + +OBJC_EXTERN void *__cxa_allocate_exception(size_t thrown_size); +OBJC_EXTERN void __cxa_throw(void *exc, void *typeinfo, void (*destructor)(void *)) __attribute__((noreturn)); +OBJC_EXTERN void *__cxa_begin_catch(void *exc); +OBJC_EXTERN void __cxa_end_catch(void); +OBJC_EXTERN void __cxa_rethrow(void); +OBJC_EXTERN void *__cxa_current_exception_type(void); + +#if SUPPORT_ZEROCOST_EXCEPTIONS +# define CXX_PERSONALITY __gxx_personality_v0 +#else +# define CXX_PERSONALITY __gxx_personality_sj0 +#endif + +OBJC_EXTERN _Unwind_Reason_Code +CXX_PERSONALITY(int version, + _Unwind_Action actions, + uint64_t exceptionClass, + struct _Unwind_Exception *exceptionObject, + struct _Unwind_Context *context); + + +// objc's internal exception types and data + +struct objc_typeinfo { + // Position of vtable and name fields must match C++ typeinfo object + const void **vtable; // always objc_ehtype_vtable+2 + const char *name; // c++ typeinfo string + + Class cls_unremapped; +}; + +struct objc_exception { + id obj; + struct objc_typeinfo tinfo; +}; + + +static void _objc_exception_noop(void) { } +static bool _objc_exception_false(void) { return 0; } +// static bool _objc_exception_true(void) { return 1; } +static void _objc_exception_abort1(void) { + _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 1); +} +static void _objc_exception_abort2(void) { + _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 2); +} +static void _objc_exception_abort3(void) { + _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 3); +} +static void _objc_exception_abort4(void) { + _objc_fatal("unexpected call into objc exception typeinfo vtable %d", 4); +} + +static bool _objc_exception_do_catch(struct objc_typeinfo *catch_tinfo, + struct objc_typeinfo *throw_tinfo, + void **throw_obj_p, + unsigned outer); + +// forward declaration +OBJC_EXPORT struct objc_typeinfo OBJC_EHTYPE_id; + +OBJC_EXPORT +const void *objc_ehtype_vtable[] = { + nil, // typeinfo's vtable? - fixme + (void*)&OBJC_EHTYPE_id, // typeinfo's typeinfo - hack + (void*)_objc_exception_noop, // in-place destructor? + (void*)_objc_exception_noop, // destructor? + (void*)_objc_exception_false, // OLD __is_pointer_p + (void*)_objc_exception_false, // OLD __is_function_p + (void*)_objc_exception_do_catch, // OLD __do_catch, NEW can_catch + (void*)_objc_exception_false, // OLD __do_upcast, NEW search_above_dst + (void*)_objc_exception_false, // NEW search_below_dst + (void*)_objc_exception_abort1, // paranoia: blow up if libc++abi + (void*)_objc_exception_abort2, // adds something new + (void*)_objc_exception_abort3, + (void*)_objc_exception_abort4, +}; + +OBJC_EXPORT +struct objc_typeinfo OBJC_EHTYPE_id = { + objc_ehtype_vtable+2, + "id", + nil +}; + + + +/*********************************************************************** +* Foundation customization +**********************************************************************/ + +/*********************************************************************** +* _objc_default_exception_preprocessor +* Default exception preprocessor. Expected to be overridden by Foundation. +**********************************************************************/ +static id _objc_default_exception_preprocessor(id exception) +{ + return exception; +} +static objc_exception_preprocessor exception_preprocessor = _objc_default_exception_preprocessor; + + +/*********************************************************************** +* _objc_default_exception_matcher +* Default exception matcher. Expected to be overridden by Foundation. +**********************************************************************/ +static int _objc_default_exception_matcher(Class catch_cls, id exception) +{ + Class cls; + for (cls = exception->getIsa(); + cls != nil; + cls = cls->superclass) + { + if (cls == catch_cls) return 1; + } + + return 0; +} +static objc_exception_matcher exception_matcher = _objc_default_exception_matcher; + + +/*********************************************************************** +* _objc_default_uncaught_exception_handler +* Default uncaught exception handler. Expected to be overridden by Foundation. +**********************************************************************/ +static void _objc_default_uncaught_exception_handler(id exception) +{ +} +static objc_uncaught_exception_handler uncaught_handler = _objc_default_uncaught_exception_handler; + + +/*********************************************************************** +* objc_setExceptionPreprocessor +* Set a handler for preprocessing Objective-C exceptions. +* Returns the previous handler. +**********************************************************************/ +objc_exception_preprocessor +objc_setExceptionPreprocessor(objc_exception_preprocessor fn) +{ + objc_exception_preprocessor result = exception_preprocessor; + exception_preprocessor = fn; + return result; +} + + +/*********************************************************************** +* objc_setExceptionMatcher +* Set a handler for matching Objective-C exceptions. +* Returns the previous handler. +**********************************************************************/ +objc_exception_matcher +objc_setExceptionMatcher(objc_exception_matcher fn) +{ + objc_exception_matcher result = exception_matcher; + exception_matcher = fn; + return result; +} + + +/*********************************************************************** +* objc_setUncaughtExceptionHandler +* Set a handler for uncaught Objective-C exceptions. +* Returns the previous handler. +**********************************************************************/ +objc_uncaught_exception_handler +objc_setUncaughtExceptionHandler(objc_uncaught_exception_handler fn) +{ + objc_uncaught_exception_handler result = uncaught_handler; + uncaught_handler = fn; + return result; +} + + +/*********************************************************************** +* Exception personality +**********************************************************************/ + +static void call_alt_handlers(struct _Unwind_Context *ctx); + +_Unwind_Reason_Code +__objc_personality_v0(int version, + _Unwind_Action actions, + uint64_t exceptionClass, + struct _Unwind_Exception *exceptionObject, + struct _Unwind_Context *context) +{ + bool unwinding = ((actions & _UA_CLEANUP_PHASE) || + (actions & _UA_FORCE_UNWIND)); + + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: %s through frame [ip=%p sp=%p] " + "for exception %p", + unwinding ? "unwinding" : "searching", + (void*)(_Unwind_GetIP(context)-1), + (void*)_Unwind_GetCFA(context), exceptionObject); + } + + // If we're executing the unwind, call this frame's alt handlers, if any. + if (unwinding) { + call_alt_handlers(context); + } + + // Let C++ handle the unwind itself. + return CXX_PERSONALITY(version, actions, exceptionClass, + exceptionObject, context); +} + + +/*********************************************************************** +* Compiler ABI +**********************************************************************/ + +static void _objc_exception_destructor(void *exc_gen) +{ + // Release the retain from objc_exception_throw(). + + struct objc_exception *exc = (struct objc_exception *)exc_gen; + id obj = exc->obj; + + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: releasing completed exception %p (object %p, a %s)", + exc, obj, object_getClassName(obj)); + } + + [obj release]; +} + + +void objc_exception_throw(id obj) +{ + struct objc_exception *exc = (struct objc_exception *) + __cxa_allocate_exception(sizeof(struct objc_exception)); + + obj = (*exception_preprocessor)(obj); + + // Retain the exception object during unwinding + // because otherwise an autorelease pool pop can cause a crash + [obj retain]; + + exc->obj = obj; + exc->tinfo.vtable = objc_ehtype_vtable+2; + exc->tinfo.name = object_getClassName(obj); + exc->tinfo.cls_unremapped = obj ? obj->getIsa() : Nil; + + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: throwing %p (object %p, a %s)", + exc, (void*)obj, object_getClassName(obj)); + } + + if (PrintExceptionThrow) { + if (!PrintExceptions) + _objc_inform("EXCEPTIONS: throwing %p (object %p, a %s)", + exc, (void*)obj, object_getClassName(obj)); + void* callstack[500]; + int frameCount = backtrace(callstack, 500); + backtrace_symbols_fd(callstack, frameCount, fileno(stderr)); + } + + OBJC_RUNTIME_OBJC_EXCEPTION_THROW(obj); // dtrace probe to log throw activity + __cxa_throw(exc, &exc->tinfo, &_objc_exception_destructor); + __builtin_trap(); +} + + +void objc_exception_rethrow(void) +{ + // exception_preprocessor doesn't get another bite of the apple + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: rethrowing current exception"); + } + + OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW(); // dtrace probe to log throw activity. + __cxa_rethrow(); + __builtin_trap(); +} + + +id objc_begin_catch(void *exc_gen) +{ + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: handling exception %p at %p", + exc_gen, __builtin_return_address(0)); + } + // NOT actually an id in the catch(...) case! + return (id)__cxa_begin_catch(exc_gen); +} + + +void objc_end_catch(void) +{ + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: finishing handler"); + } + __cxa_end_catch(); +} + + +// `outer` is not passed by the new libcxxabi +static bool _objc_exception_do_catch(struct objc_typeinfo *catch_tinfo, + struct objc_typeinfo *throw_tinfo, + void **throw_obj_p, + unsigned outer UNAVAILABLE_ATTRIBUTE) +{ + id exception; + + if (throw_tinfo->vtable != objc_ehtype_vtable+2) { + // Only objc types can be caught here. + if (PrintExceptions) _objc_inform("EXCEPTIONS: skipping catch(?)"); + return false; + } + + // Adjust exception pointer. + // Old libcppabi: we lied about __is_pointer_p() so we have to do it here + // New libcxxabi: we have to do it here regardless + *throw_obj_p = **(void***)throw_obj_p; + + // `catch (id)` always catches objc types. + if (catch_tinfo == &OBJC_EHTYPE_id) { + if (PrintExceptions) _objc_inform("EXCEPTIONS: catch(id)"); + return true; + } + + exception = *(id *)throw_obj_p; + + Class handler_cls = _class_remap(catch_tinfo->cls_unremapped); + if (!handler_cls) { + // catch handler's class is weak-linked and missing. Not a match. + } + else if ((*exception_matcher)(handler_cls, exception)) { + if (PrintExceptions) _objc_inform("EXCEPTIONS: catch(%s)", + handler_cls->nameForLogging()); + return true; + } + + if (PrintExceptions) _objc_inform("EXCEPTIONS: skipping catch(%s)", + handler_cls->nameForLogging()); + + return false; +} + + +/*********************************************************************** +* _objc_terminate +* Custom std::terminate handler. +* +* The uncaught exception callback is implemented as a std::terminate handler. +* 1. Check if there's an active exception +* 2. If so, check if it's an Objective-C exception +* 3. If so, call our registered callback with the object. +* 4. Finally, call the previous terminate handler. +**********************************************************************/ +static void (*old_terminate)(void) = nil; +static void _objc_terminate(void) +{ + if (PrintExceptions) { + _objc_inform("EXCEPTIONS: terminating"); + } + + if (! __cxa_current_exception_type()) { + // No current exception. + (*old_terminate)(); + } + else { + // There is a current exception. Check if it's an objc exception. + @try { + __cxa_rethrow(); + } @catch (id e) { + // It's an objc object. Call Foundation's handler, if any. + (*uncaught_handler)((id)e); + (*old_terminate)(); + } @catch (...) { + // It's not an objc object. Continue to C++ terminate. + (*old_terminate)(); + } + } +} + + +/*********************************************************************** +* objc_terminate +* Calls std::terminate for clients who don't link to C++ themselves. +* Called by the compiler if an exception is thrown +* from a context where exceptions may not be thrown. +**********************************************************************/ +void objc_terminate(void) +{ + std::terminate(); +} + + +/*********************************************************************** +* alt handler support - zerocost implementation only +**********************************************************************/ + +#if !SUPPORT_ALT_HANDLERS + +void _destroyAltHandlerList(struct alt_handler_list *list) +{ +} + +static void call_alt_handlers(struct _Unwind_Context *ctx) +{ + // unsupported in sjlj environments +} + +#else + +#include +#include +#include + +// Dwarf eh data encodings +#define DW_EH_PE_omit 0xff // no data follows + +#define DW_EH_PE_absptr 0x00 +#define DW_EH_PE_uleb128 0x01 +#define DW_EH_PE_udata2 0x02 +#define DW_EH_PE_udata4 0x03 +#define DW_EH_PE_udata8 0x04 +#define DW_EH_PE_sleb128 0x09 +#define DW_EH_PE_sdata2 0x0A +#define DW_EH_PE_sdata4 0x0B +#define DW_EH_PE_sdata8 0x0C + +#define DW_EH_PE_pcrel 0x10 +#define DW_EH_PE_textrel 0x20 +#define DW_EH_PE_datarel 0x30 +#define DW_EH_PE_funcrel 0x40 +#define DW_EH_PE_aligned 0x50 // fixme + +#define DW_EH_PE_indirect 0x80 // gcc extension + + +/*********************************************************************** +* read_uleb +* Read a LEB-encoded unsigned integer from the address stored in *pp. +* Increments *pp past the bytes read. +* Adapted from DWARF Debugging Information Format 1.1, appendix 4 +**********************************************************************/ +static uintptr_t read_uleb(uintptr_t *pp) +{ + uintptr_t result = 0; + uintptr_t shift = 0; + unsigned char byte; + do { + byte = *(const unsigned char *)(*pp)++; + result |= (byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + return result; +} + + +/*********************************************************************** +* read_sleb +* Read a LEB-encoded signed integer from the address stored in *pp. +* Increments *pp past the bytes read. +* Adapted from DWARF Debugging Information Format 1.1, appendix 4 +**********************************************************************/ +static intptr_t read_sleb(uintptr_t *pp) +{ + uintptr_t result = 0; + uintptr_t shift = 0; + unsigned char byte; + do { + byte = *(const unsigned char *)(*pp)++; + result |= (byte & 0x7f) << shift; + shift += 7; + } while (byte & 0x80); + if ((shift < 8*sizeof(intptr_t)) && (byte & 0x40)) { + result |= ((intptr_t)-1) << shift; + } + return result; +} + + +/*********************************************************************** +* read_address +* Reads an encoded address from the address stored in *pp. +* Increments *pp past the bytes read. +* The data is interpreted according to the given dwarf encoding +* and base addresses. +**********************************************************************/ +static uintptr_t read_address(uintptr_t *pp, + const struct dwarf_eh_bases *bases, + unsigned char encoding) +{ + uintptr_t result = 0; + uintptr_t oldp = *pp; + + // fixme need DW_EH_PE_aligned? + +#define READ(type) \ + result = *(type *)(*pp); \ + *pp += sizeof(type); + + if (encoding == DW_EH_PE_omit) return 0; + + switch (encoding & 0x0f) { + case DW_EH_PE_absptr: + READ(uintptr_t); + break; + case DW_EH_PE_uleb128: + result = read_uleb(pp); + break; + case DW_EH_PE_udata2: + READ(uint16_t); + break; + case DW_EH_PE_udata4: + READ(uint32_t); + break; +#if __LP64__ + case DW_EH_PE_udata8: + READ(uint64_t); + break; +#endif + case DW_EH_PE_sleb128: + result = read_sleb(pp); + break; + case DW_EH_PE_sdata2: + READ(int16_t); + break; + case DW_EH_PE_sdata4: + READ(int32_t); + break; +#if __LP64__ + case DW_EH_PE_sdata8: + READ(int64_t); + break; +#endif + default: + _objc_inform("unknown DWARF EH encoding 0x%x at %p", + encoding, (void *)*pp); + break; + } + +#undef READ + + if (result) { + switch (encoding & 0x70) { + case DW_EH_PE_pcrel: + // fixme correct? + result += (uintptr_t)oldp; + break; + case DW_EH_PE_textrel: + result += bases->tbase; + break; + case DW_EH_PE_datarel: + result += bases->dbase; + break; + case DW_EH_PE_funcrel: + result += bases->func; + break; + case DW_EH_PE_aligned: + _objc_inform("unknown DWARF EH encoding 0x%x at %p", + encoding, (void *)*pp); + break; + default: + // no adjustment + break; + } + + if (encoding & DW_EH_PE_indirect) { + result = *(uintptr_t *)result; + } + } + + return (uintptr_t)result; +} + + +struct frame_ips { + uintptr_t start; + uintptr_t end; +}; +struct frame_range { + uintptr_t ip_start; + uintptr_t ip_end; + uintptr_t cfa; + // precise ranges within ip_start..ip_end; nil or {0,0} terminated + frame_ips *ips; +}; + + +static bool isObjCExceptionCatcher(uintptr_t lsda, uintptr_t ip, + const struct dwarf_eh_bases* bases, + struct frame_range *frame) +{ + unsigned char LPStart_enc = *(const unsigned char *)lsda++; + + if (LPStart_enc != DW_EH_PE_omit) { + read_address(&lsda, bases, LPStart_enc); // LPStart + } + + unsigned char TType_enc = *(const unsigned char *)lsda++; + if (TType_enc != DW_EH_PE_omit) { + read_uleb(&lsda); // TType + } + + unsigned char call_site_enc = *(const unsigned char *)lsda++; + uintptr_t length = read_uleb(&lsda); + uintptr_t call_site_table = lsda; + uintptr_t call_site_table_end = call_site_table + length; + uintptr_t action_record_table = call_site_table_end; + + uintptr_t action_record = 0; + uintptr_t p = call_site_table; + + uintptr_t try_start; + uintptr_t try_end; + uintptr_t try_landing_pad; + + while (p < call_site_table_end) { + uintptr_t start = read_address(&p, bases, call_site_enc)+bases->func; + uintptr_t len = read_address(&p, bases, call_site_enc); + uintptr_t pad = read_address(&p, bases, call_site_enc); + uintptr_t action = read_uleb(&p); + + if (ip < start) { + // no more source ranges + return false; + } + else if (ip < start + len) { + // found the range + if (!pad) return false; // ...but it has no landing pad + // found the landing pad + action_record = action ? action_record_table + action - 1 : 0; + try_start = start; + try_end = start + len; + try_landing_pad = pad; + break; + } + } + + if (!action_record) return false; // no catch handlers + + // has handlers, destructors, and/or throws specifications + // Use this frame if it has any handlers + bool has_handler = false; + p = action_record; + intptr_t offset; + do { + intptr_t filter = read_sleb(&p); + uintptr_t temp = p; + offset = read_sleb(&temp); + p += offset; + + if (filter < 0) { + // throws specification - ignore + } else if (filter == 0) { + // destructor - ignore + } else /* filter >= 0 */ { + // catch handler - use this frame + has_handler = true; + break; + } + } while (offset); + + if (!has_handler) return false; + + // Count the number of source ranges with the same landing pad as our match + unsigned int range_count = 0; + p = call_site_table; + while (p < call_site_table_end) { + /*start*/ read_address(&p, bases, call_site_enc)/*+bases->func*/; + /*len*/ read_address(&p, bases, call_site_enc); + uintptr_t pad = read_address(&p, bases, call_site_enc); + /*action*/ read_uleb(&p); + + if (pad == try_landing_pad) { + range_count++; + } + } + + if (range_count == 1) { + // No other source ranges with the same landing pad. We're done here. + frame->ips = nil; + } + else { + // Record all ranges with the same landing pad as our match. + frame->ips = (frame_ips *) + malloc((range_count + 1) * sizeof(frame->ips[0])); + unsigned int r = 0; + p = call_site_table; + while (p < call_site_table_end) { + uintptr_t start = read_address(&p, bases, call_site_enc)+bases->func; + uintptr_t len = read_address(&p, bases, call_site_enc); + uintptr_t pad = read_address(&p, bases, call_site_enc); + /*action*/ read_uleb(&p); + + if (pad == try_landing_pad) { + if (start < try_start) try_start = start; + if (start+len > try_end) try_end = start+len; + frame->ips[r].start = start; + frame->ips[r].end = start+len; + r++; + } + } + + frame->ips[r].start = 0; + frame->ips[r].end = 0; + } + + frame->ip_start = try_start; + frame->ip_end = try_end; + + return true; +} + + +static struct frame_range findHandler(void) +{ + // walk stack looking for frame with objc catch handler + unw_context_t uc; + unw_cursor_t cursor; + unw_proc_info_t info; + unw_getcontext(&uc); + unw_init_local(&cursor, &uc); + while ( (unw_step(&cursor) > 0) && (unw_get_proc_info(&cursor, &info) == UNW_ESUCCESS) ) { + // must use objc personality handler + if ( info.handler != (uintptr_t)__objc_personality_v0 ) + continue; + // must have landing pad + if ( info.lsda == 0 ) + continue; + // must have landing pad that catches objc exceptions + struct dwarf_eh_bases bases; + bases.tbase = 0; // from unwind-dw2-fde-darwin.c:examine_objects() + bases.dbase = 0; // from unwind-dw2-fde-darwin.c:examine_objects() + bases.func = info.start_ip; + unw_word_t ip; + unw_get_reg(&cursor, UNW_REG_IP, &ip); + ip -= 1; + struct frame_range try_range = {0, 0, 0, 0}; + if ( isObjCExceptionCatcher(info.lsda, ip, &bases, &try_range) ) { + unw_word_t cfa; + unw_get_reg(&cursor, UNW_REG_SP, &cfa); + try_range.cfa = cfa; + return try_range; + } + } + + return (struct frame_range){0, 0, 0, 0}; +} + + +// This data structure assumes the number of +// active alt handlers per frame is small. + +// for OBJC_DEBUG_ALT_HANDLERS, record the call to objc_addExceptionHandler. +#define BACKTRACE_COUNT 46 +#define THREADNAME_COUNT 64 +struct alt_handler_debug { + uintptr_t token; + int backtraceSize; + void *backtrace[BACKTRACE_COUNT]; + char thread[THREADNAME_COUNT]; + char queue[THREADNAME_COUNT]; +}; + +struct alt_handler_data { + struct frame_range frame; + objc_exception_handler fn; + void *context; + struct alt_handler_debug *debug; +}; + +struct alt_handler_list { + unsigned int allocated; + unsigned int used; + struct alt_handler_data *handlers; + struct alt_handler_list *next_DEBUGONLY; +}; + +static mutex_t DebugLock; +static struct alt_handler_list *DebugLists; +static uintptr_t DebugCounter; + +__attribute__((noinline, noreturn)) +void alt_handler_error(uintptr_t token); + +static struct alt_handler_list * +fetch_handler_list(bool create) +{ + _objc_pthread_data *data = _objc_fetch_pthread_data(create); + if (!data) return nil; + + struct alt_handler_list *list = data->handlerList; + if (!list) { + if (!create) return nil; + list = (struct alt_handler_list *)calloc(1, sizeof(*list)); + data->handlerList = list; + + if (DebugAltHandlers) { + // Save this list so the debug code can find it from other threads + mutex_locker_t lock(DebugLock); + list->next_DEBUGONLY = DebugLists; + DebugLists = list; + } + } + + return list; +} + + +void _destroyAltHandlerList(struct alt_handler_list *list) +{ + if (list) { + if (DebugAltHandlers) { + // Detach from the list-of-lists. + mutex_locker_t lock(DebugLock); + struct alt_handler_list **listp = &DebugLists; + while (*listp && *listp != list) listp = &(*listp)->next_DEBUGONLY; + if (*listp) *listp = (*listp)->next_DEBUGONLY; + } + + if (list->handlers) { + for (unsigned int i = 0; i < list->allocated; i++) { + if (list->handlers[i].frame.ips) { + free(list->handlers[i].frame.ips); + } + } + free(list->handlers); + } + free(list); + } +} + + +uintptr_t objc_addExceptionHandler(objc_exception_handler fn, void *context) +{ + // Find the closest enclosing frame with objc catch handlers + struct frame_range target_frame = findHandler(); + if (!target_frame.ip_start) { + // No suitable enclosing handler found. + return 0; + } + + // Record this alt handler for the discovered frame. + struct alt_handler_list *list = fetch_handler_list(YES); + unsigned int i = 0; + + if (list->used == list->allocated) { + list->allocated = list->allocated*2 ?: 4; + list->handlers = (struct alt_handler_data *) + realloc(list->handlers, + list->allocated * sizeof(list->handlers[0])); + bzero(&list->handlers[list->used], (list->allocated - list->used) * sizeof(list->handlers[0])); + i = list->used; + } + else { + for (i = 0; i < list->allocated; i++) { + if (list->handlers[i].frame.ip_start == 0 && + list->handlers[i].frame.ip_end == 0 && + list->handlers[i].frame.cfa == 0) + { + break; + } + } + if (i == list->allocated) { + _objc_fatal("alt handlers in objc runtime are buggy!"); + } + } + + struct alt_handler_data *data = &list->handlers[i]; + + data->frame = target_frame; + data->fn = fn; + data->context = context; + list->used++; + + uintptr_t token = i+1; + + if (DebugAltHandlers) { + // Record backtrace in case this handler is misused later. + mutex_locker_t lock(DebugLock); + + token = DebugCounter++; + if (token == 0) token = DebugCounter++; + + if (!data->debug) { + data->debug = (struct alt_handler_debug *) + calloc(sizeof(*data->debug), 1); + } else { + bzero(data->debug, sizeof(*data->debug)); + } + + pthread_getname_np(pthread_self(), data->debug->thread, THREADNAME_COUNT); + strlcpy(data->debug->queue, + dispatch_queue_get_label(dispatch_get_current_queue()), + THREADNAME_COUNT); + data->debug->backtraceSize = + backtrace(data->debug->backtrace, BACKTRACE_COUNT); + data->debug->token = token; + } + + if (PrintAltHandlers) { + _objc_inform("ALT HANDLERS: installing alt handler #%lu %p(%p) on " + "frame [ip=%p..%p sp=%p]", (unsigned long)token, + data->fn, data->context, (void *)data->frame.ip_start, + (void *)data->frame.ip_end, (void *)data->frame.cfa); + if (data->frame.ips) { + unsigned int r = 0; + while (1) { + uintptr_t start = data->frame.ips[r].start; + uintptr_t end = data->frame.ips[r].end; + r++; + if (start == 0 && end == 0) break; + _objc_inform("ALT HANDLERS: ip=%p..%p", + (void*)start, (void*)end); + } + } + } + + if (list->used > 1000) { + static int warned = 0; + if (!warned) { + _objc_inform("ALT HANDLERS: *** over 1000 alt handlers installed; " + "this is probably a bug"); + warned = 1; + } + } + + return token; +} + + +void objc_removeExceptionHandler(uintptr_t token) +{ + if (!token) { + // objc_addExceptionHandler failed + return; + } + + struct alt_handler_list *list = fetch_handler_list(NO); + if (!list || !list->handlers) { + // no alt handlers active + alt_handler_error(token); + } + + uintptr_t i = token-1; + + if (DebugAltHandlers) { + // search for the token instead of using token-1 + for (i = 0; i < list->allocated; i++) { + struct alt_handler_data *data = &list->handlers[i]; + if (data->debug && data->debug->token == token) break; + } + } + + if (i >= list->allocated) { + // token out of range + alt_handler_error(token); + } + + struct alt_handler_data *data = &list->handlers[i]; + + if (data->frame.ip_start == 0 && data->frame.ip_end == 0 && data->frame.cfa == 0) { + // token in range, but invalid + alt_handler_error(token); + } + + if (PrintAltHandlers) { + _objc_inform("ALT HANDLERS: removing alt handler #%lu %p(%p) on " + "frame [ip=%p..%p sp=%p]", (unsigned long)token, + data->fn, data->context, (void *)data->frame.ip_start, + (void *)data->frame.ip_end, (void *)data->frame.cfa); + } + + if (data->debug) free(data->debug); + if (data->frame.ips) free(data->frame.ips); + bzero(data, sizeof(*data)); + list->used--; +} + + +BREAKPOINT_FUNCTION( +void objc_alt_handler_error(void)); + +__attribute__((noinline, noreturn)) +void alt_handler_error(uintptr_t token) +{ + _objc_inform + ("objc_removeExceptionHandler() called with unknown alt handler; " + "this is probably a bug in multithreaded AppKit use. " + "Set environment variable OBJC_DEBUG_ALT_HANDLERS=YES " + "or break in objc_alt_handler_error() to debug."); + + if (DebugAltHandlers) { + DebugLock.lock(); + + // Search other threads' alt handler lists for this handler. + struct alt_handler_list *list; + for (list = DebugLists; list; list = list->next_DEBUGONLY) { + unsigned h; + for (h = 0; h < list->allocated; h++) { + struct alt_handler_data *data = &list->handlers[h]; + if (data->debug && data->debug->token == token) { + // found it + int i; + + // Build a string from the recorded backtrace + char *symbolString; + char **symbols = + backtrace_symbols(data->debug->backtrace, + data->debug->backtraceSize); + size_t len = 1; + for (i = 0; i < data->debug->backtraceSize; i++){ + len += 4 + strlen(symbols[i]) + 1; + } + symbolString = (char *)calloc(len, 1); + for (i = 0; i < data->debug->backtraceSize; i++){ + strcat(symbolString, " "); + strcat(symbolString, symbols[i]); + strcat(symbolString, "\n"); + } + + free(symbols); + + _objc_inform_now_and_on_crash + ("The matching objc_addExceptionHandler() was called " + "by:\nThread '%s': Dispatch queue: '%s': \n%s", + data->debug->thread, data->debug->queue, symbolString); + + goto done; + } + } + } + done: + DebugLock.unlock(); + } + + + objc_alt_handler_error(); + + _objc_fatal + ("objc_removeExceptionHandler() called with unknown alt handler; " + "this is probably a bug in multithreaded AppKit use. "); +} + +// called in order registered, to match 32-bit _NSAddAltHandler2 +// fixme reverse registration order matches c++ destructors better +static void call_alt_handlers(struct _Unwind_Context *ctx) +{ + uintptr_t ip = _Unwind_GetIP(ctx) - 1; + uintptr_t cfa = _Unwind_GetCFA(ctx); + unsigned int i; + + struct alt_handler_list *list = fetch_handler_list(NO); + if (!list || list->used == 0) return; + + for (i = 0; i < list->allocated; i++) { + struct alt_handler_data *data = &list->handlers[i]; + if (ip >= data->frame.ip_start && ip < data->frame.ip_end && data->frame.cfa == cfa) + { + if (data->frame.ips) { + unsigned int r = 0; + bool found; + while (1) { + uintptr_t start = data->frame.ips[r].start; + uintptr_t end = data->frame.ips[r].end; + r++; + if (start == 0 && end == 0) { + found = false; + break; + } + if (ip >= start && ip < end) { + found = true; + break; + } + } + if (!found) continue; + } + + // Copy and clear before the callback, in case the + // callback manipulates the alt handler list. + struct alt_handler_data copy = *data; + bzero(data, sizeof(*data)); + list->used--; + if (PrintExceptions || PrintAltHandlers) { + _objc_inform("EXCEPTIONS: calling alt handler %p(%p) from " + "frame [ip=%p..%p sp=%p]", copy.fn, copy.context, + (void *)copy.frame.ip_start, + (void *)copy.frame.ip_end, + (void *)copy.frame.cfa); + } + if (copy.fn) (*copy.fn)(nil, copy.context); + if (copy.frame.ips) free(copy.frame.ips); + } + } +} + +// SUPPORT_ALT_HANDLERS +#endif + + +/*********************************************************************** +* exception_init +* Initialize libobjc's exception handling system. +* Called by map_images(). +**********************************************************************/ +void exception_init(void) +{ + old_terminate = std::set_terminate(&_objc_terminate); +} + + +// __OBJC2__ +#endif diff --git a/runtime/objc-file-old.h b/runtime/objc-file-old.h new file mode 100644 index 0000000..3feb82b --- /dev/null +++ b/runtime/objc-file-old.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_FILE_OLD_H +#define _OBJC_FILE_OLD_H + +#if !__OBJC2__ + +#include "objc-os.h" + +struct objc_module; +struct old_protocol; +struct old_class; + +__BEGIN_DECLS + +extern struct objc_module *_getObjcModules(const header_info *hi, size_t *nmodules); +extern SEL *_getObjcSelectorRefs(const header_info *hi, size_t *nmess); +extern struct old_protocol **_getObjcProtocols(const header_info *hi, size_t *nprotos); +extern Class *_getObjcClassRefs(const header_info *hi, size_t *nclasses); +extern const char *_getObjcClassNames(const header_info *hi, size_t *size); + +using Initializer = void(*)(void); +extern Initializer* getLibobjcInitializers(const headerType *mhdr, size_t *count); + +__END_DECLS + +#endif + +#endif diff --git a/runtime/objc-file-old.mm b/runtime/objc-file-old.mm new file mode 100644 index 0000000..8e170d4 --- /dev/null +++ b/runtime/objc-file-old.mm @@ -0,0 +1,165 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +// Copyright 1988-1996 NeXT Software, Inc. + +#if !__OBJC2__ + +#include "objc-private.h" +#include "objc-runtime-old.h" +#include "objc-file-old.h" + +#if TARGET_OS_WIN32 + +/* +Module +_getObjcModules(const header_info *hi, size_t *nmodules) +{ + if (nmodules) *nmodules = hi->moduleCount; + return hi->modules; +} +*/ +SEL * +_getObjcSelectorRefs(const header_info *hi, size_t *nmess) +{ + if (nmess) *nmess = hi->selrefCount; + return hi->selrefs; +} + +struct old_protocol ** +_getObjcProtocols(const header_info *hi, size_t *nprotos) +{ + if (nprotos) *nprotos = hi->protocolCount; + return hi->protocols; +} + +Class* +_getObjcClassRefs(const header_info *hi, size_t *nclasses) +{ + if (nclasses) *nclasses = hi->clsrefCount; + return (Class*)hi->clsrefs; +} + +// __OBJC,__class_names section only emitted by CodeWarrior rdar://4951638 +const char * +_getObjcClassNames(const header_info *hi, size_t *size) +{ + if (size) *size = 0; + return NULL; +} + +#else + +#define GETSECT(name, type, segname, sectname) \ + type *name(const headerType *mhdr, size_t *outCount) \ + { \ + unsigned long byteCount = 0; \ + type *data = (type *) \ + getsectiondata(mhdr, segname, sectname, &byteCount); \ + *outCount = byteCount / sizeof(type); \ + return data; \ + } \ + type *name(const header_info *hi, size_t *outCount) \ + { \ + return name(hi->mhdr(), outCount); \ + } + +GETSECT(_getObjcModules, objc_module, "__OBJC", "__module_info"); +GETSECT(_getObjcSelectorRefs, SEL, "__OBJC", "__message_refs"); +GETSECT(_getObjcClassRefs, Class, "__OBJC", "__cls_refs"); +GETSECT(_getObjcClassNames, const char, "__OBJC", "__class_names"); +// __OBJC,__class_names section only emitted by CodeWarrior rdar://4951638 +GETSECT(getLibobjcInitializers, Initializer, "__DATA", "__objc_init_func"); + + +objc_image_info * +_getObjcImageInfo(const headerType *mhdr, size_t *outBytes) +{ + unsigned long byteCount = 0; + objc_image_info *info = (objc_image_info *) + getsectiondata(mhdr, SEG_OBJC, "__image_info", &byteCount); + *outBytes = byteCount; + return info; +} + + +struct old_protocol ** +_getObjcProtocols(const header_info *hi, size_t *nprotos) +{ + unsigned long size = 0; + struct old_protocol *protos = (struct old_protocol *) + getsectiondata(hi->mhdr(), SEG_OBJC, "__protocol", &size); + *nprotos = size / sizeof(struct old_protocol); + + if (!hi->proto_refs && *nprotos) { + size_t i; + header_info *whi = (header_info *)hi; + whi->proto_refs = (struct old_protocol **) + malloc(*nprotos * sizeof(*hi->proto_refs)); + for (i = 0; i < *nprotos; i++) { + hi->proto_refs[i] = protos+i; + } + } + + return hi->proto_refs; +} + + +static const segmentType * +getsegbynamefromheader(const headerType *head, const char *segname) +{ + const segmentType *sgp; + unsigned long i; + + sgp = (const segmentType *) (head + 1); + for (i = 0; i < head->ncmds; i++){ + if (sgp->cmd == SEGMENT_CMD) { + if (strncmp(sgp->segname, segname, sizeof(sgp->segname)) == 0) { + return sgp; + } + } + sgp = (const segmentType *)((char *)sgp + sgp->cmdsize); + } + return NULL; +} + +bool +_hasObjcContents(const header_info *hi) +{ + // Look for an __OBJC,* section other than __OBJC,__image_info + const segmentType *seg = getsegbynamefromheader(hi->mhdr(), "__OBJC"); + const sectionType *sect; + uint32_t i; + for (i = 0; i < seg->nsects; i++) { + sect = ((const sectionType *)(seg+1))+i; + if (0 != strncmp(sect->sectname, "__image_info", 12)) { + return YES; + } + } + + return NO; +} + + +#endif + +#endif diff --git a/runtime/objc-file.h b/runtime/objc-file.h new file mode 100644 index 0000000..5e78b61 --- /dev/null +++ b/runtime/objc-file.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_FILE_NEW_H +#define _OBJC_FILE_NEW_H + +#if __OBJC2__ + +#include "objc-runtime-new.h" + +// classref_t is not fixed up at launch; use remapClass() to convert + +extern SEL *_getObjc2SelectorRefs(const header_info *hi, size_t *count); +extern message_ref_t *_getObjc2MessageRefs(const header_info *hi, size_t *count); +extern Class*_getObjc2ClassRefs(const header_info *hi, size_t *count); +extern Class*_getObjc2SuperRefs(const header_info *hi, size_t *count); +extern classref_t *_getObjc2ClassList(const header_info *hi, size_t *count); +extern classref_t *_getObjc2NonlazyClassList(const header_info *hi, size_t *count); +extern category_t **_getObjc2CategoryList(const header_info *hi, size_t *count); +extern category_t **_getObjc2NonlazyCategoryList(const header_info *hi, size_t *count); +extern protocol_t **_getObjc2ProtocolList(const header_info *hi, size_t *count); +extern protocol_t **_getObjc2ProtocolRefs(const header_info *hi, size_t *count); +using Initializer = void(*)(void); +extern Initializer* getLibobjcInitializers(const header_info *hi, size_t *count); + +extern classref_t *_getObjc2NonlazyClassList(const headerType *mhdr, size_t *count); +extern category_t **_getObjc2NonlazyCategoryList(const headerType *mhdr, size_t *count); +extern Initializer* getLibobjcInitializers(const headerType *mhdr, size_t *count); + +#endif + +#endif diff --git a/runtime/objc-file.mm b/runtime/objc-file.mm new file mode 100644 index 0000000..c9ec260 --- /dev/null +++ b/runtime/objc-file.mm @@ -0,0 +1,127 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#if __OBJC2__ + +#include "objc-private.h" +#include "objc-file.h" + + +// Look for a __DATA or __DATA_CONST or __DATA_DIRTY section +// with the given name that stores an array of T. +template +T* getDataSection(const headerType *mhdr, const char *sectname, + size_t *outBytes, size_t *outCount) +{ + unsigned long byteCount = 0; + T* data = (T*)getsectiondata(mhdr, "__DATA", sectname, &byteCount); + if (!data) { + data = (T*)getsectiondata(mhdr, "__DATA_CONST", sectname, &byteCount); + } + if (!data) { + data = (T*)getsectiondata(mhdr, "__DATA_DIRTY", sectname, &byteCount); + } + if (outBytes) *outBytes = byteCount; + if (outCount) *outCount = byteCount / sizeof(T); + return data; +} + +#define GETSECT(name, type, sectname) \ + type *name(const headerType *mhdr, size_t *outCount) { \ + return getDataSection(mhdr, sectname, nil, outCount); \ + } \ + type *name(const header_info *hi, size_t *outCount) { \ + return getDataSection(hi->mhdr(), sectname, nil, outCount); \ + } + +// function name content type section name +GETSECT(_getObjc2SelectorRefs, SEL, "__objc_selrefs"); +GETSECT(_getObjc2MessageRefs, message_ref_t, "__objc_msgrefs"); +GETSECT(_getObjc2ClassRefs, Class, "__objc_classrefs"); +GETSECT(_getObjc2SuperRefs, Class, "__objc_superrefs"); +GETSECT(_getObjc2ClassList, classref_t, "__objc_classlist"); +GETSECT(_getObjc2NonlazyClassList, classref_t, "__objc_nlclslist"); +GETSECT(_getObjc2CategoryList, category_t *, "__objc_catlist"); +GETSECT(_getObjc2NonlazyCategoryList, category_t *, "__objc_nlcatlist"); +GETSECT(_getObjc2ProtocolList, protocol_t *, "__objc_protolist"); +GETSECT(_getObjc2ProtocolRefs, protocol_t *, "__objc_protorefs"); +GETSECT(getLibobjcInitializers, Initializer, "__objc_init_func"); + + +objc_image_info * +_getObjcImageInfo(const headerType *mhdr, size_t *outBytes) +{ + return getDataSection(mhdr, "__objc_imageinfo", + outBytes, nil); +} + + +static const segmentType * +getsegbynamefromheader(const headerType *mhdr, const char *segname) +{ + const segmentType *seg = (const segmentType *) (mhdr + 1); + for (unsigned long i = 0; i < mhdr->ncmds; i++){ + if (seg->cmd == SEGMENT_CMD && segnameEquals(seg->segname, segname)) { + return seg; + } + seg = (const segmentType *)((char *)seg + seg->cmdsize); + } + return nil; +} + +// Look for an __objc* section other than __objc_imageinfo +static bool segmentHasObjcContents(const segmentType *seg) +{ + if (seg) { + for (uint32_t i = 0; i < seg->nsects; i++) { + const sectionType *sect = ((const sectionType *)(seg+1))+i; + if (sectnameStartsWith(sect->sectname, "__objc_") && + !sectnameEquals(sect->sectname, "__objc_imageinfo")) + { + return true; + } + } + } + + return false; +} + +// Look for an __objc* section other than __objc_imageinfo +bool +_hasObjcContents(const header_info *hi) +{ + const segmentType *data = + getsegbynamefromheader(hi->mhdr(), "__DATA"); + const segmentType *data_const = + getsegbynamefromheader(hi->mhdr(), "__DATA_CONST"); + const segmentType *data_dirty = + getsegbynamefromheader(hi->mhdr(), "__DATA_DIRTY"); + + return segmentHasObjcContents(data) + || segmentHasObjcContents(data_const) + || segmentHasObjcContents(data_dirty); +} + + +// OBJC2 +#endif diff --git a/runtime/objc-gdb.h b/runtime/objc-gdb.h new file mode 100644 index 0000000..0c01590 --- /dev/null +++ b/runtime/objc-gdb.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2008 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_GDB_H +#define _OBJC_GDB_H + +/* + * WARNING DANGER HAZARD BEWARE EEK + * + * Everything in this file is for debugger and developer tool use only. + * These will change in arbitrary OS updates and in unpredictable ways. + * When your program breaks, you get to keep both pieces. + */ + +#ifdef __APPLE_API_PRIVATE + +#define _OBJC_PRIVATE_H_ +#include +#include +#include + +__BEGIN_DECLS + + +/*********************************************************************** +* Class pointer preflighting +**********************************************************************/ + +// Return cls if it's a valid class, or crash. +OBJC_EXPORT Class gdb_class_getClass(Class cls) +#if __OBJC2__ + OBJC_AVAILABLE(10.6, 3.1, 9.0, 1.0); +#else + OBJC_AVAILABLE(10.7, 3.1, 9.0, 1.0); +#endif + +// Same as gdb_class_getClass(object_getClass(cls)). +OBJC_EXPORT Class gdb_object_getClass(id obj) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + + +/*********************************************************************** +* Class lists for heap. +**********************************************************************/ + +#if __OBJC2__ + +// Maps class name to Class, for in-use classes only. NXStrValueMapPrototype. +OBJC_EXPORT NXMapTable *gdb_objc_realized_classes + OBJC_AVAILABLE(10.6, 3.1, 9.0, 1.0); + +#else + +// Hashes Classes, for all known classes. Custom prototype. +OBJC_EXPORT NXHashTable *_objc_debug_class_hash + __OSX_AVAILABLE(10.2) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +#endif + + +/*********************************************************************** +* Non-pointer isa +**********************************************************************/ + +#if __OBJC2__ + +// Extract isa pointer from an isa field. +// (Class)(isa & mask) == class pointer +OBJC_EXPORT const uintptr_t objc_debug_isa_class_mask + OBJC_AVAILABLE(10.10, 7.0, 9.0, 1.0); + +// Extract magic cookie from an isa field. +// (isa & magic_mask) == magic_value +OBJC_EXPORT const uintptr_t objc_debug_isa_magic_mask + OBJC_AVAILABLE(10.10, 7.0, 9.0, 1.0); +OBJC_EXPORT const uintptr_t objc_debug_isa_magic_value + OBJC_AVAILABLE(10.10, 7.0, 9.0, 1.0); + +// Use indexed ISAs for targets which store index of the class in the ISA. +// This index can be used to index the array of classes. +OBJC_EXPORT const uintptr_t objc_debug_indexed_isa_magic_mask; +OBJC_EXPORT const uintptr_t objc_debug_indexed_isa_magic_value; + +// Then these are used to extract the index from the ISA. +OBJC_EXPORT const uintptr_t objc_debug_indexed_isa_index_mask; +OBJC_EXPORT const uintptr_t objc_debug_indexed_isa_index_shift; + +// And then we can use that index to get the class from this array. Note +// the size is provided so that clients can ensure the index they get is in +// bounds and not read off the end of the array. +OBJC_EXPORT Class objc_indexed_classes[]; + +// When we don't have enough bits to store a class*, we can instead store an +// index in to this array. Classes are added here when they are realized. +// Note, an index of 0 is illegal. +OBJC_EXPORT uintptr_t objc_indexed_classes_count; + +// Absolute symbols for some of the above values are in objc-abi.h. + +#endif + + +/*********************************************************************** +* Tagged pointer decoding +**********************************************************************/ +#if __OBJC2__ + +// Basic tagged pointers (7 classes, 60-bit payload). + +// if (obj & mask) obj is a tagged pointer object +OBJC_EXPORT uintptr_t objc_debug_taggedpointer_mask + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +// tag_slot = (obj >> slot_shift) & slot_mask +OBJC_EXPORT unsigned int objc_debug_taggedpointer_slot_shift + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); +OBJC_EXPORT uintptr_t objc_debug_taggedpointer_slot_mask + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +// class = classes[tag_slot] +OBJC_EXPORT Class objc_debug_taggedpointer_classes[] + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +// payload = (obj << payload_lshift) >> payload_rshift +// Payload signedness is determined by the signedness of the right-shift. +OBJC_EXPORT unsigned int objc_debug_taggedpointer_payload_lshift + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); +OBJC_EXPORT unsigned int objc_debug_taggedpointer_payload_rshift + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + + +// Extended tagged pointers (255 classes, 52-bit payload). + +// If you interrogate an extended tagged pointer using the basic +// tagged pointer scheme alone, it will appear to have an isa +// that is either nil or class __NSUnrecognizedTaggedPointer. + +// if (ext_mask != 0 && (obj & ext_mask) == ext_mask) +// obj is a ext tagged pointer object +OBJC_EXPORT uintptr_t objc_debug_taggedpointer_ext_mask + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +// ext_tag_slot = (obj >> ext_slot_shift) & ext_slot_mask +OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_slot_shift + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT uintptr_t objc_debug_taggedpointer_ext_slot_mask + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +// class = ext_classes[ext_tag_slot] +OBJC_EXPORT Class objc_debug_taggedpointer_ext_classes[] + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +// payload = (obj << ext_payload_lshift) >> ext_payload_rshift +// Payload signedness is determined by the signedness of the right-shift. +OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_lshift + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +OBJC_EXPORT unsigned int objc_debug_taggedpointer_ext_payload_rshift + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +#endif + + +/*********************************************************************** +* Breakpoints in objc_msgSend for debugger stepping. +* The array is a {0,0} terminated list of addresses. +* Each address is one of the following: +* OBJC_MESSENGER_START: Address is the start of a messenger function. +* OBJC_MESSENGER_END_FAST: Address is a jump insn that calls an IMP. +* OBJC_MESSENGER_END_SLOW: Address is some insn in the slow lookup path. +* OBJC_MESSENGER_END_NIL: Address is a return insn for messages to nil. +* +* Every path from OBJC_MESSENGER_START should reach some OBJC_MESSENGER_END. +* At all ENDs, the stack and parameter register state is the same as START. +* +* In some cases, the END_FAST case jumps to something other than the +* method's implementation. In those cases the jump's destination will +* be another function that is marked OBJC_MESSENGER_START. +**********************************************************************/ +#if __OBJC2__ + +#define OBJC_MESSENGER_START 1 +#define OBJC_MESSENGER_END_FAST 2 +#define OBJC_MESSENGER_END_SLOW 3 +#define OBJC_MESSENGER_END_NIL 4 + +struct objc_messenger_breakpoint { + uintptr_t address; + uintptr_t kind; +}; + +OBJC_EXPORT struct objc_messenger_breakpoint +gdb_objc_messenger_breakpoints[] + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +#endif + + +__END_DECLS + +#endif + +#endif diff --git a/runtime/objc-initialize.h b/runtime/objc-initialize.h new file mode 100644 index 0000000..9ec99b5 --- /dev/null +++ b/runtime/objc-initialize.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2005-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_INITIALIZE_H +#define _OBJC_INITIALIZE_H + +#include "objc-private.h" + +__BEGIN_DECLS + +struct _objc_initializing_classes; + +extern void _class_initialize(Class cls); + +extern void _destroyInitializingClassList(struct _objc_initializing_classes *list); + +extern bool _thisThreadIsInitializingClass(Class cls); + +__END_DECLS + +#endif diff --git a/runtime/objc-initialize.mm b/runtime/objc-initialize.mm new file mode 100644 index 0000000..0857305 --- /dev/null +++ b/runtime/objc-initialize.mm @@ -0,0 +1,474 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-initialize.m +* +initialize support +**********************************************************************/ + +/*********************************************************************** + * Thread-safety during class initialization (GrP 2001-9-24) + * + * Initial state: CLS_INITIALIZING and CLS_INITIALIZED both clear. + * During initialization: CLS_INITIALIZING is set + * After initialization: CLS_INITIALIZING clear and CLS_INITIALIZED set. + * CLS_INITIALIZING and CLS_INITIALIZED are never set at the same time. + * CLS_INITIALIZED is never cleared once set. + * + * Only one thread is allowed to actually initialize a class and send + * +initialize. Enforced by allowing only one thread to set CLS_INITIALIZING. + * + * Additionally, threads trying to send messages to a class must wait for + * +initialize to finish. During initialization of a class, that class's + * method cache is kept empty. objc_msgSend will revert to + * class_lookupMethodAndLoadCache, which checks CLS_INITIALIZED before + * messaging. If CLS_INITIALIZED is clear but CLS_INITIALIZING is set, + * the thread must block, unless it is the thread that started + * initializing the class in the first place. + * + * Each thread keeps a list of classes it's initializing. + * The global classInitLock is used to synchronize changes to CLS_INITIALIZED + * and CLS_INITIALIZING: the transition to CLS_INITIALIZING must be + * an atomic test-and-set with respect to itself and the transition + * to CLS_INITIALIZED. + * The global classInitWaitCond is used to block threads waiting for an + * initialization to complete. The classInitLock synchronizes + * condition checking and the condition variable. + **********************************************************************/ + +/*********************************************************************** + * +initialize deadlock case when a class is marked initializing while + * its superclass is initialized. Solved by completely initializing + * superclasses before beginning to initialize a class. + * + * OmniWeb class hierarchy: + * OBObject + * | ` OBPostLoader + * OFObject + * / \ + * OWAddressEntry OWController + * | + * OWConsoleController + * + * Thread 1 (evil testing thread): + * initialize OWAddressEntry + * super init OFObject + * super init OBObject + * [OBObject initialize] runs OBPostLoader, which inits lots of classes... + * initialize OWConsoleController + * super init OWController - wait for Thread 2 to finish OWController init + * + * Thread 2 (normal OmniWeb thread): + * initialize OWController + * super init OFObject - wait for Thread 1 to finish OFObject init + * + * deadlock! + * + * Solution: fully initialize super classes before beginning to initialize + * a subclass. Then the initializing+initialized part of the class hierarchy + * will be a contiguous subtree starting at the root, so other threads + * can't jump into the middle between two initializing classes, and we won't + * get stuck while a superclass waits for its subclass which waits for the + * superclass. + **********************************************************************/ + +#include "objc-private.h" +#include "message.h" +#include "objc-initialize.h" + +/* classInitLock protects CLS_INITIALIZED and CLS_INITIALIZING, and + * is signalled when any class is done initializing. + * Threads that are waiting for a class to finish initializing wait on this. */ +static monitor_t classInitLock; + + +/*********************************************************************** +* struct _objc_initializing_classes +* Per-thread list of classes currently being initialized by that thread. +* During initialization, that thread is allowed to send messages to that +* class, but other threads have to wait. +* The list is a simple array of metaclasses (the metaclass stores +* the initialization state). +**********************************************************************/ +typedef struct _objc_initializing_classes { + int classesAllocated; + Class *metaclasses; +} _objc_initializing_classes; + + +/*********************************************************************** +* _fetchInitializingClassList +* Return the list of classes being initialized by this thread. +* If create == YES, create the list when no classes are being initialized by this thread. +* If create == NO, return nil when no classes are being initialized by this thread. +**********************************************************************/ +static _objc_initializing_classes *_fetchInitializingClassList(bool create) +{ + _objc_pthread_data *data; + _objc_initializing_classes *list; + Class *classes; + + data = _objc_fetch_pthread_data(create); + if (data == nil) return nil; + + list = data->initializingClasses; + if (list == nil) { + if (!create) { + return nil; + } else { + list = (_objc_initializing_classes *) + calloc(1, sizeof(_objc_initializing_classes)); + data->initializingClasses = list; + } + } + + classes = list->metaclasses; + if (classes == nil) { + // If _objc_initializing_classes exists, allocate metaclass array, + // even if create == NO. + // Allow 4 simultaneous class inits on this thread before realloc. + list->classesAllocated = 4; + classes = (Class *) + calloc(list->classesAllocated, sizeof(Class)); + list->metaclasses = classes; + } + return list; +} + + +/*********************************************************************** +* _destroyInitializingClassList +* Deallocate memory used by the given initialization list. +* Any part of the list may be nil. +* Called from _objc_pthread_destroyspecific(). +**********************************************************************/ + +void _destroyInitializingClassList(struct _objc_initializing_classes *list) +{ + if (list != nil) { + if (list->metaclasses != nil) { + free(list->metaclasses); + } + free(list); + } +} + + +/*********************************************************************** +* _thisThreadIsInitializingClass +* Return TRUE if this thread is currently initializing the given class. +**********************************************************************/ +bool _thisThreadIsInitializingClass(Class cls) +{ + int i; + + _objc_initializing_classes *list = _fetchInitializingClassList(NO); + if (list) { + cls = cls->getMeta(); + for (i = 0; i < list->classesAllocated; i++) { + if (cls == list->metaclasses[i]) return YES; + } + } + + // no list or not found in list + return NO; +} + + +/*********************************************************************** +* _setThisThreadIsInitializingClass +* Record that this thread is currently initializing the given class. +* This thread will be allowed to send messages to the class, but +* other threads will have to wait. +**********************************************************************/ +static void _setThisThreadIsInitializingClass(Class cls) +{ + int i; + _objc_initializing_classes *list = _fetchInitializingClassList(YES); + cls = cls->getMeta(); + + // paranoia: explicitly disallow duplicates + for (i = 0; i < list->classesAllocated; i++) { + if (cls == list->metaclasses[i]) { + _objc_fatal("thread is already initializing this class!"); + return; // already the initializer + } + } + + for (i = 0; i < list->classesAllocated; i++) { + if (! list->metaclasses[i]) { + list->metaclasses[i] = cls; + return; + } + } + + // class list is full - reallocate + list->classesAllocated = list->classesAllocated * 2 + 1; + list->metaclasses = (Class *) + realloc(list->metaclasses, + list->classesAllocated * sizeof(Class)); + // zero out the new entries + list->metaclasses[i++] = cls; + for ( ; i < list->classesAllocated; i++) { + list->metaclasses[i] = nil; + } +} + + +/*********************************************************************** +* _setThisThreadIsNotInitializingClass +* Record that this thread is no longer initializing the given class. +**********************************************************************/ +static void _setThisThreadIsNotInitializingClass(Class cls) +{ + int i; + + _objc_initializing_classes *list = _fetchInitializingClassList(NO); + if (list) { + cls = cls->getMeta(); + for (i = 0; i < list->classesAllocated; i++) { + if (cls == list->metaclasses[i]) { + list->metaclasses[i] = nil; + return; + } + } + } + + // no list or not found in list + _objc_fatal("thread is not initializing this class!"); +} + + +typedef struct PendingInitialize { + Class subclass; + struct PendingInitialize *next; +} PendingInitialize; + +static NXMapTable *pendingInitializeMap; + +/*********************************************************************** +* _finishInitializing +* cls has completed its +initialize method, and so has its superclass. +* Mark cls as initialized as well, then mark any of cls's subclasses +* that have already finished their own +initialize methods. +**********************************************************************/ +static void _finishInitializing(Class cls, Class supercls) +{ + PendingInitialize *pending; + + classInitLock.assertLocked(); + assert(!supercls || supercls->isInitialized()); + + if (PrintInitializing) { + _objc_inform("INITIALIZE: %s is fully +initialized", + cls->nameForLogging()); + } + + // mark this class as fully +initialized + cls->setInitialized(); + classInitLock.notifyAll(); + _setThisThreadIsNotInitializingClass(cls); + + // mark any subclasses that were merely waiting for this class + if (!pendingInitializeMap) return; + pending = (PendingInitialize *)NXMapGet(pendingInitializeMap, cls); + if (!pending) return; + + NXMapRemove(pendingInitializeMap, cls); + + // Destroy the pending table if it's now empty, to save memory. + if (NXCountMapTable(pendingInitializeMap) == 0) { + NXFreeMapTable(pendingInitializeMap); + pendingInitializeMap = nil; + } + + while (pending) { + PendingInitialize *next = pending->next; + if (pending->subclass) _finishInitializing(pending->subclass, cls); + free(pending); + pending = next; + } +} + + +/*********************************************************************** +* _finishInitializingAfter +* cls has completed its +initialize method, but its superclass has not. +* Wait until supercls finishes before marking cls as initialized. +**********************************************************************/ +static void _finishInitializingAfter(Class cls, Class supercls) +{ + PendingInitialize *pending; + + classInitLock.assertLocked(); + + if (PrintInitializing) { + _objc_inform("INITIALIZE: %s waiting for superclass +[%s initialize]", + cls->nameForLogging(), supercls->nameForLogging()); + } + + if (!pendingInitializeMap) { + pendingInitializeMap = + NXCreateMapTable(NXPtrValueMapPrototype, 10); + // fixme pre-size this table for CF/NSObject +initialize + } + + pending = (PendingInitialize *)malloc(sizeof(*pending)); + pending->subclass = cls; + pending->next = (PendingInitialize *) + NXMapGet(pendingInitializeMap, supercls); + NXMapInsert(pendingInitializeMap, supercls, pending); +} + + +// Provide helpful messages in stack traces. +OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) +void waitForInitializeToComplete(Class cls) + asm("_WAITING_FOR_ANOTHER_THREAD_TO_FINISH_CALLING_+initialize"); +OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) +void callInitialize(Class cls) + asm("_CALLING_SOME_+initialize_METHOD"); + + +void waitForInitializeToComplete(Class cls) +{ + monitor_locker_t lock(classInitLock); + while (!cls->isInitialized()) { + classInitLock.wait(); + } + asm(""); +} + + +void callInitialize(Class cls) +{ + ((void(*)(Class, SEL))objc_msgSend)(cls, SEL_initialize); + asm(""); +} + + +/*********************************************************************** +* class_initialize. Send the '+initialize' message on demand to any +* uninitialized class. Force initialization of superclasses first. +**********************************************************************/ +void _class_initialize(Class cls) +{ + assert(!cls->isMetaClass()); + + Class supercls; + bool reallyInitialize = NO; + + // Make sure super is done initializing BEFORE beginning to initialize cls. + // See note about deadlock above. + supercls = cls->superclass; + if (supercls && !supercls->isInitialized()) { + _class_initialize(supercls); + } + + // Try to atomically set CLS_INITIALIZING. + { + monitor_locker_t lock(classInitLock); + if (!cls->isInitialized() && !cls->isInitializing()) { + cls->setInitializing(); + reallyInitialize = YES; + } + } + + if (reallyInitialize) { + // We successfully set the CLS_INITIALIZING bit. Initialize the class. + + // Record that we're initializing this class so we can message it. + _setThisThreadIsInitializingClass(cls); + + // Send the +initialize message. + // Note that +initialize is sent to the superclass (again) if + // this class doesn't implement +initialize. 2157218 + if (PrintInitializing) { + _objc_inform("INITIALIZE: calling +[%s initialize]", + cls->nameForLogging()); + } + + // Exceptions: A +initialize call that throws an exception + // is deemed to be a complete and successful +initialize. + @try { + callInitialize(cls); + + if (PrintInitializing) { + _objc_inform("INITIALIZE: finished +[%s initialize]", + cls->nameForLogging()); + } + } + @catch (...) { + if (PrintInitializing) { + _objc_inform("INITIALIZE: +[%s initialize] threw an exception", + cls->nameForLogging()); + } + @throw; + } + @finally { + // Done initializing. + // If the superclass is also done initializing, then update + // the info bits and notify waiting threads. + // If not, update them later. (This can happen if this +initialize + // was itself triggered from inside a superclass +initialize.) + monitor_locker_t lock(classInitLock); + if (!supercls || supercls->isInitialized()) { + _finishInitializing(cls, supercls); + } else { + _finishInitializingAfter(cls, supercls); + } + } + return; + } + + else if (cls->isInitializing()) { + // We couldn't set INITIALIZING because INITIALIZING was already set. + // If this thread set it earlier, continue normally. + // If some other thread set it, block until initialize is done. + // It's ok if INITIALIZING changes to INITIALIZED while we're here, + // because we safely check for INITIALIZED inside the lock + // before blocking. + if (_thisThreadIsInitializingClass(cls)) { + return; + } else { + waitForInitializeToComplete(cls); + return; + } + } + + else if (cls->isInitialized()) { + // Set CLS_INITIALIZING failed because someone else already + // initialized the class. Continue normally. + // NOTE this check must come AFTER the ISINITIALIZING case. + // Otherwise: Another thread is initializing this class. ISINITIALIZED + // is false. Skip this clause. Then the other thread finishes + // initialization and sets INITIALIZING=no and INITIALIZED=yes. + // Skip the ISINITIALIZING clause. Die horribly. + return; + } + + else { + // We shouldn't be here. + _objc_fatal("thread-safe class init in objc runtime is buggy!"); + } +} diff --git a/runtime/objc-internal.h b/runtime/objc-internal.h new file mode 100644 index 0000000..5bcb28c --- /dev/null +++ b/runtime/objc-internal.h @@ -0,0 +1,735 @@ +/* + * Copyright (c) 2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +#ifndef _OBJC_INTERNAL_H +#define _OBJC_INTERNAL_H + +/* + * WARNING DANGER HAZARD BEWARE EEK + * + * Everything in this file is for Apple Internal use only. + * These will change in arbitrary OS updates and in unpredictable ways. + * When your program breaks, you get to keep both pieces. + */ + +/* + * objc-internal.h: Private SPI for use by other system frameworks. + */ + +#include +#include +#include +#include +#include + +__BEGIN_DECLS + +// Termination reasons in the OS_REASON_OBJC namespace. +#define OBJC_EXIT_REASON_UNSPECIFIED 1 +#define OBJC_EXIT_REASON_GC_NOT_SUPPORTED 2 + +// This is the allocation size required for each of the class and the metaclass +// with objc_initializeClassPair() and objc_readClassPair(). +// The runtime's class structure will never grow beyond this. +#define OBJC_MAX_CLASS_SIZE (32*sizeof(void*)) + +// In-place construction of an Objective-C class. +// cls and metacls must each be OBJC_MAX_CLASS_SIZE bytes. +// Returns nil if a class with the same name already exists. +// Returns nil if the superclass is under construction. +// Call objc_registerClassPair() when you are done. +OBJC_EXPORT Class objc_initializeClassPair(Class superclass, const char *name, Class cls, Class metacls) + OBJC_AVAILABLE(10.6, 3.0, 9.0, 1.0); + +// Class and metaclass construction from a compiler-generated memory image. +// cls and cls->isa must each be OBJC_MAX_CLASS_SIZE bytes. +// Extra bytes not used the the metadata must be zero. +// info is the same objc_image_info that would be emitted by a static compiler. +// Returns nil if a class with the same name already exists. +// Returns nil if the superclass is nil and the class is not marked as a root. +// Returns nil if the superclass is under construction. +// Do not call objc_registerClassPair(). +#if __OBJC2__ +struct objc_image_info; +OBJC_EXPORT Class objc_readClassPair(Class cls, + const struct objc_image_info *info) + OBJC_AVAILABLE(10.10, 8.0, 9.0, 1.0); +#endif + +// Batch object allocation using malloc_zone_batch_malloc(). +OBJC_EXPORT unsigned class_createInstances(Class cls, size_t extraBytes, + id *results, unsigned num_requested) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; + +// Get the isa pointer written into objects just before being freed. +OBJC_EXPORT Class _objc_getFreedObjectClass(void) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +// env NSObjCMessageLoggingEnabled +OBJC_EXPORT void instrumentObjcMessageSends(BOOL flag) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +// Initializer called by libSystem +OBJC_EXPORT void _objc_init(void) +#if __OBJC2__ + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); +#else + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); +#endif + +// Return YES if GC is on and `object` is a GC allocation. +OBJC_EXPORT BOOL objc_isAuto(id object) + __OSX_DEPRECATED(10.4, 10.8, "it always returns NO") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +// GC startup callback from Foundation +OBJC_EXPORT malloc_zone_t *objc_collect_init(int (*callback)(void)) + __OSX_DEPRECATED(10.4, 10.8, "it does nothing") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +// Plainly-implemented GC barriers. Rosetta used to use these. +OBJC_EXPORT id objc_assign_strongCast_generic(id value, id *dest) + UNAVAILABLE_ATTRIBUTE; +OBJC_EXPORT id objc_assign_global_generic(id value, id *dest) + UNAVAILABLE_ATTRIBUTE; +OBJC_EXPORT id objc_assign_threadlocal_generic(id value, id *dest) + UNAVAILABLE_ATTRIBUTE; +OBJC_EXPORT id objc_assign_ivar_generic(id value, id dest, ptrdiff_t offset) + UNAVAILABLE_ATTRIBUTE; + +// GC preflight for an app executable. +// 1: some slice requires GC +// 0: no slice requires GC +// -1: I/O or file format error +OBJC_EXPORT int objc_appRequiresGC(int fd) + __OSX_AVAILABLE(10.11) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +// Install missing-class callback. Used by the late unlamented ZeroLink. +OBJC_EXPORT void _objc_setClassLoader(BOOL (*newClassLoader)(const char *)) OBJC2_UNAVAILABLE; + +// Install handler for allocation failures. +// Handler may abort, or throw, or provide an object to return. +OBJC_EXPORT void _objc_setBadAllocHandler(id (*newHandler)(Class isa)) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); + +// This can go away when AppKit stops calling it (rdar://7811851) +#if __OBJC2__ +OBJC_EXPORT void objc_setMultithreaded (BOOL flag) + __OSX_DEPRECATED(10.0, 10.5, "multithreading is always available") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +#endif + +// Used by ExceptionHandling.framework +#if !__OBJC2__ +OBJC_EXPORT void _objc_error(id rcv, const char *fmt, va_list args) + __attribute__((noreturn)) + __OSX_DEPRECATED(10.0, 10.5, "use other logging facilities instead") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +#endif + + +// Tagged pointer objects. + +#if __LP64__ +#define OBJC_HAVE_TAGGED_POINTERS 1 +#endif + +#if OBJC_HAVE_TAGGED_POINTERS + +// Tagged pointer layout and usage is subject to change on different OS versions. + +// Tag indexes 0..<7 have a 60-bit payload. +// Tag index 7 is reserved. +// Tag indexes 8..<264 have a 52-bit payload. +// Tag index 264 is reserved. + +#if __has_feature(objc_fixed_enum) || __cplusplus >= 201103L +enum objc_tag_index_t : uint16_t +#else +typedef uint16_t objc_tag_index_t; +enum +#endif +{ + OBJC_TAG_NSAtom = 0, + OBJC_TAG_1 = 1, + OBJC_TAG_NSString = 2, + OBJC_TAG_NSNumber = 3, + OBJC_TAG_NSIndexPath = 4, + OBJC_TAG_NSManagedObjectID = 5, + OBJC_TAG_NSDate = 6, + OBJC_TAG_RESERVED_7 = 7, + + OBJC_TAG_First60BitPayload = 0, + OBJC_TAG_Last60BitPayload = 6, + OBJC_TAG_First52BitPayload = 8, + OBJC_TAG_Last52BitPayload = 263, + + OBJC_TAG_RESERVED_264 = 264 +}; +#if __has_feature(objc_fixed_enum) && !defined(__cplusplus) +typedef enum objc_tag_index_t objc_tag_index_t; +#endif + + +// Returns true if tagged pointers are enabled. +// The other functions below must not be called if tagged pointers are disabled. +static inline bool +_objc_taggedPointersEnabled(void); + +// Register a class for a tagged pointer tag. +// Aborts if the tag is invalid or already in use. +OBJC_EXPORT void _objc_registerTaggedPointerClass(objc_tag_index_t tag, Class cls) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +// Returns the registered class for the given tag. +// Returns nil if the tag is valid but has no registered class. +// Aborts if the tag is invalid. +OBJC_EXPORT Class _objc_getClassForTag(objc_tag_index_t tag) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +// Create a tagged pointer object with the given tag and payload. +// Assumes the tag is valid. +// Assumes tagged pointers are enabled. +// The payload will be silently truncated to fit. +static inline void * +_objc_makeTaggedPointer(objc_tag_index_t tag, uintptr_t payload); + +// Return true if ptr is a tagged pointer object. +// Does not check the validity of ptr's class. +static inline bool +_objc_isTaggedPointer(const void *ptr); + +// Extract the tag value from the given tagged pointer object. +// Assumes ptr is a valid tagged pointer object. +// Does not check the validity of ptr's tag. +static inline objc_tag_index_t +_objc_getTaggedPointerTag(const void *ptr); + +// Extract the payload from the given tagged pointer object. +// Assumes ptr is a valid tagged pointer object. +// The payload value is zero-extended. +static inline uintptr_t +_objc_getTaggedPointerValue(const void *ptr); + +// Extract the payload from the given tagged pointer object. +// Assumes ptr is a valid tagged pointer object. +// The payload value is sign-extended. +static inline intptr_t +_objc_getTaggedPointerSignedValue(const void *ptr); + +// Don't use the values below. Use the declarations above. + +#if TARGET_OS_OSX && __x86_64__ + // 64-bit Mac - tag bit is LSB +# define OBJC_MSB_TAGGED_POINTERS 0 +#else + // Everything else - tag bit is MSB +# define OBJC_MSB_TAGGED_POINTERS 1 +#endif + +#define _OBJC_TAG_INDEX_MASK 0x7 +// array slot includes the tag bit itself +#define _OBJC_TAG_SLOT_COUNT 16 +#define _OBJC_TAG_SLOT_MASK 0xf + +#define _OBJC_TAG_EXT_INDEX_MASK 0xff +// array slot has no extra bits +#define _OBJC_TAG_EXT_SLOT_COUNT 256 +#define _OBJC_TAG_EXT_SLOT_MASK 0xff + +#if OBJC_MSB_TAGGED_POINTERS +# define _OBJC_TAG_MASK (1ULL<<63) +# define _OBJC_TAG_INDEX_SHIFT 60 +# define _OBJC_TAG_SLOT_SHIFT 60 +# define _OBJC_TAG_PAYLOAD_LSHIFT 4 +# define _OBJC_TAG_PAYLOAD_RSHIFT 4 +# define _OBJC_TAG_EXT_MASK (0xfULL<<60) +# define _OBJC_TAG_EXT_INDEX_SHIFT 52 +# define _OBJC_TAG_EXT_SLOT_SHIFT 52 +# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 12 +# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 +#else +# define _OBJC_TAG_MASK 1 +# define _OBJC_TAG_INDEX_SHIFT 1 +# define _OBJC_TAG_SLOT_SHIFT 0 +# define _OBJC_TAG_PAYLOAD_LSHIFT 0 +# define _OBJC_TAG_PAYLOAD_RSHIFT 4 +# define _OBJC_TAG_EXT_MASK 0xfULL +# define _OBJC_TAG_EXT_INDEX_SHIFT 4 +# define _OBJC_TAG_EXT_SLOT_SHIFT 4 +# define _OBJC_TAG_EXT_PAYLOAD_LSHIFT 0 +# define _OBJC_TAG_EXT_PAYLOAD_RSHIFT 12 +#endif + +static inline bool +_objc_taggedPointersEnabled(void) +{ + extern uintptr_t objc_debug_taggedpointer_mask; + return (objc_debug_taggedpointer_mask != 0); +} + +static inline void * +_objc_makeTaggedPointer(objc_tag_index_t tag, uintptr_t value) +{ + // PAYLOAD_LSHIFT and PAYLOAD_RSHIFT are the payload extraction shifts. + // They are reversed here for payload insertion. + + // assert(_objc_taggedPointersEnabled()); + if (tag <= OBJC_TAG_Last60BitPayload) { + // assert(((value << _OBJC_TAG_PAYLOAD_RSHIFT) >> _OBJC_TAG_PAYLOAD_LSHIFT) == value); + return (void*) + (_OBJC_TAG_MASK | + ((uintptr_t)tag << _OBJC_TAG_INDEX_SHIFT) | + ((value << _OBJC_TAG_PAYLOAD_RSHIFT) >> _OBJC_TAG_PAYLOAD_LSHIFT)); + } else { + // assert(tag >= OBJC_TAG_First52BitPayload); + // assert(tag <= OBJC_TAG_Last52BitPayload); + // assert(((value << _OBJC_TAG_EXT_PAYLOAD_RSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_LSHIFT) == value); + return (void*) + (_OBJC_TAG_EXT_MASK | + ((uintptr_t)(tag - OBJC_TAG_First52BitPayload) << _OBJC_TAG_EXT_INDEX_SHIFT) | + ((value << _OBJC_TAG_EXT_PAYLOAD_RSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_LSHIFT)); + } +} + +static inline bool +_objc_isTaggedPointer(const void *ptr) +{ + return ((intptr_t)ptr & _OBJC_TAG_MASK) == _OBJC_TAG_MASK; +} + +static inline objc_tag_index_t +_objc_getTaggedPointerTag(const void *ptr) +{ + // assert(_objc_isTaggedPointer(ptr)); + uintptr_t basicTag = ((uintptr_t)ptr >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; + uintptr_t extTag = ((uintptr_t)ptr >> _OBJC_TAG_EXT_INDEX_SHIFT) & _OBJC_TAG_EXT_INDEX_MASK; + if (basicTag == _OBJC_TAG_INDEX_MASK) { + return (objc_tag_index_t)(extTag + OBJC_TAG_First52BitPayload); + } else { + return (objc_tag_index_t)basicTag; + } +} + +static inline uintptr_t +_objc_getTaggedPointerValue(const void *ptr) +{ + // assert(_objc_isTaggedPointer(ptr)); + uintptr_t basicTag = ((uintptr_t)ptr >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; + if (basicTag == _OBJC_TAG_INDEX_MASK) { + return ((uintptr_t)ptr << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT; + } else { + return ((uintptr_t)ptr << _OBJC_TAG_PAYLOAD_LSHIFT) >> _OBJC_TAG_PAYLOAD_RSHIFT; + } +} + +static inline intptr_t +_objc_getTaggedPointerSignedValue(const void *ptr) +{ + // assert(_objc_isTaggedPointer(ptr)); + uintptr_t basicTag = ((uintptr_t)ptr >> _OBJC_TAG_INDEX_SHIFT) & _OBJC_TAG_INDEX_MASK; + if (basicTag == _OBJC_TAG_INDEX_MASK) { + return ((intptr_t)ptr << _OBJC_TAG_EXT_PAYLOAD_LSHIFT) >> _OBJC_TAG_EXT_PAYLOAD_RSHIFT; + } else { + return ((intptr_t)ptr << _OBJC_TAG_PAYLOAD_LSHIFT) >> _OBJC_TAG_PAYLOAD_RSHIFT; + } +} + +// OBJC_HAVE_TAGGED_POINTERS +#endif + + +/** + * Returns the method implementation of an object. + * + * @param obj An Objective-C object. + * @param name An Objective-C selector. + * + * @return The IMP corresponding to the instance method implemented by + * the class of \e obj. + * + * @note Equivalent to: + * + * class_getMethodImplementation(object_getClass(obj), name); + */ +OBJC_EXPORT IMP object_getMethodImplementation(id obj, SEL name) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +OBJC_EXPORT IMP object_getMethodImplementation_stret(id obj, SEL name) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; + + +// Instance-specific instance variable layout. + +OBJC_EXPORT void _class_setIvarLayoutAccessor(Class cls_gen, const uint8_t* (*accessor) (id object)) + __OSX_AVAILABLE(10.7) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; +OBJC_EXPORT const uint8_t *_object_getIvarLayout(Class cls_gen, id object) + __OSX_AVAILABLE(10.7) + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE; + +/* + "Unknown" includes non-object ivars and non-ARC non-__weak ivars + "Strong" includes ARC __strong ivars + "Weak" includes ARC and new MRC __weak ivars + "Unretained" includes ARC __unsafe_unretained and old GC+MRC __weak ivars +*/ +typedef enum { + objc_ivar_memoryUnknown, // unknown / unknown + objc_ivar_memoryStrong, // direct access / objc_storeStrong + objc_ivar_memoryWeak, // objc_loadWeak[Retained] / objc_storeWeak + objc_ivar_memoryUnretained // direct access / direct access +} objc_ivar_memory_management_t; + +OBJC_EXPORT objc_ivar_memory_management_t _class_getIvarMemoryManagement(Class cls, Ivar ivar) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +OBJC_EXPORT BOOL _class_isFutureClass(Class cls) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + + +// API to only be called by root classes like NSObject or NSProxy + +OBJC_EXPORT +id +_objc_rootRetain(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +_objc_rootRelease(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +bool +_objc_rootReleaseWasZero(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +bool +_objc_rootTryRetain(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +bool +_objc_rootIsDeallocating(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +_objc_rootAutorelease(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +uintptr_t +_objc_rootRetainCount(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +_objc_rootInit(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +_objc_rootAllocWithZone(Class cls, malloc_zone_t *zone) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +_objc_rootAlloc(Class cls) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +_objc_rootDealloc(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +_objc_rootFinalize(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +malloc_zone_t * +_objc_rootZone(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +uintptr_t +_objc_rootHash(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void * +objc_autoreleasePoolPush(void) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +objc_autoreleasePoolPop(void *context) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + + +OBJC_EXPORT id objc_alloc(Class cls) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +OBJC_EXPORT id objc_allocWithZone(Class cls) + OBJC_AVAILABLE(10.9, 7.0, 9.0, 1.0); + +OBJC_EXPORT id objc_retain(id obj) + __asm__("_objc_retain") + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT void objc_release(id obj) + __asm__("_objc_release") + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT id objc_autorelease(id obj) + __asm__("_objc_autorelease") + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// Prepare a value at +1 for return through a +0 autoreleasing convention. +OBJC_EXPORT +id +objc_autoreleaseReturnValue(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// Prepare a value at +0 for return through a +0 autoreleasing convention. +OBJC_EXPORT +id +objc_retainAutoreleaseReturnValue(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// Accept a value returned through a +0 autoreleasing convention for use at +1. +OBJC_EXPORT +id +objc_retainAutoreleasedReturnValue(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// Accept a value returned through a +0 autoreleasing convention for use at +0. +OBJC_EXPORT +id +objc_unsafeClaimAutoreleasedReturnValue(id obj) + OBJC_AVAILABLE(10.11, 9.0, 9.0, 1.0); + +OBJC_EXPORT +void +objc_storeStrong(id *location, id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +objc_retainAutorelease(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// obsolete. +OBJC_EXPORT id objc_retain_autorelease(id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +objc_loadWeakRetained(id *location) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +id +objc_initWeak(id *location, id val) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// Like objc_storeWeak, but stores nil if the new object is deallocating +// or the new object's class does not support weak references. +// Returns the value stored (either the new object or nil). +OBJC_EXPORT +id +objc_storeWeakOrNil(id *location, id obj) + OBJC_AVAILABLE(10.11, 9.0, 9.0, 1.0); + +// Like objc_initWeak, but stores nil if the new object is deallocating +// or the new object's class does not support weak references. +// Returns the value stored (either the new object or nil). +OBJC_EXPORT +id +objc_initWeakOrNil(id *location, id val) + OBJC_AVAILABLE(10.11, 9.0, 9.0, 1.0); + +OBJC_EXPORT +void +objc_destroyWeak(id *location) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +objc_copyWeak(id *to, id *from) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +objc_moveWeak(id *to, id *from) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + + +OBJC_EXPORT +void +_objc_autoreleasePoolPrint(void) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT BOOL objc_should_deallocate(id object) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT void objc_clear_deallocating(id object) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + + +// to make CF link for now + +OBJC_EXPORT +void * +_objc_autoreleasePoolPush(void) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +OBJC_EXPORT +void +_objc_autoreleasePoolPop(void *context) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + + +// Extra @encode data for XPC, or NULL +OBJC_EXPORT const char *_protocol_getMethodTypeEncoding(Protocol *p, SEL sel, BOOL isRequiredMethod, BOOL isInstanceMethod) + OBJC_AVAILABLE(10.8, 6.0, 9.0, 1.0); + + +// API to only be called by classes that provide their own reference count storage + +OBJC_EXPORT +void +_objc_deallocOnMainThreadHelper(void *context) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +// On async versus sync deallocation and the _dealloc2main flag +// +// Theory: +// +// If order matters, then code must always: [self dealloc]. +// If order doesn't matter, then always async should be safe. +// +// Practice: +// +// The _dealloc2main bit is set for GUI objects that may be retained by other +// threads. Once deallocation begins on the main thread, doing more async +// deallocation will at best cause extra UI latency and at worst cause +// use-after-free bugs in unretained delegate style patterns. Yes, this is +// extremely fragile. Yes, in the long run, developers should switch to weak +// references. +// +// Note is NOT safe to do any equality check against the result of +// dispatch_get_current_queue(). The main thread can and does drain more than +// one dispatch queue. That is why we call pthread_main_np(). +// + +typedef enum { + _OBJC_RESURRECT_OBJECT = -1, /* _logicBlock has called -retain, and scheduled a -release for later. */ + _OBJC_DEALLOC_OBJECT_NOW = 1, /* call [self dealloc] immediately. */ + _OBJC_DEALLOC_OBJECT_LATER = 2 /* call [self dealloc] on the main queue. */ +} _objc_object_disposition_t; + +#define _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC_BLOCK(_rc_ivar, _logicBlock) \ + -(id)retain { \ + /* this will fail to compile if _rc_ivar is an unsigned type */ \ + int _retain_count_ivar_must_not_be_unsigned[0L - (__typeof__(_rc_ivar))-1] __attribute__((unused)); \ + __typeof__(_rc_ivar) _prev = __sync_fetch_and_add(&_rc_ivar, 2); \ + if (_prev < -2) { /* specifically allow resurrection from logical 0. */ \ + __builtin_trap(); /* BUG: retain of over-released ref */ \ + } \ + return self; \ + } \ + -(oneway void)release { \ + __typeof__(_rc_ivar) _prev = __sync_fetch_and_sub(&_rc_ivar, 2); \ + if (_prev > 0) { \ + return; \ + } else if (_prev < 0) { \ + __builtin_trap(); /* BUG: over-release */ \ + } \ + _objc_object_disposition_t fate = _logicBlock(self); \ + if (fate == _OBJC_RESURRECT_OBJECT) { \ + return; \ + } \ + /* mark the object as deallocating. */ \ + if (!__sync_bool_compare_and_swap(&_rc_ivar, -2, 1)) { \ + __builtin_trap(); /* BUG: dangling ref did a retain */ \ + } \ + if (fate == _OBJC_DEALLOC_OBJECT_NOW) { \ + [self dealloc]; \ + } else if (fate == _OBJC_DEALLOC_OBJECT_LATER) { \ + dispatch_barrier_async_f(dispatch_get_main_queue(), self, \ + _objc_deallocOnMainThreadHelper); \ + } else { \ + __builtin_trap(); /* BUG: bogus fate value */ \ + } \ + } \ + -(NSUInteger)retainCount { \ + return (_rc_ivar + 2) >> 1; \ + } \ + -(BOOL)_tryRetain { \ + __typeof__(_rc_ivar) _prev; \ + do { \ + _prev = _rc_ivar; \ + if (_prev & 1) { \ + return 0; \ + } else if (_prev == -2) { \ + return 0; \ + } else if (_prev < -2) { \ + __builtin_trap(); /* BUG: over-release elsewhere */ \ + } \ + } while ( ! __sync_bool_compare_and_swap(&_rc_ivar, _prev, _prev + 2)); \ + return 1; \ + } \ + -(BOOL)_isDeallocating { \ + if (_rc_ivar == -2) { \ + return 1; \ + } else if (_rc_ivar < -2) { \ + __builtin_trap(); /* BUG: over-release elsewhere */ \ + } \ + return _rc_ivar & 1; \ + } + +#define _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, _dealloc2main) \ + _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC_BLOCK(_rc_ivar, (^(id _self_ __attribute__((unused))) { \ + if (_dealloc2main && !pthread_main_np()) { \ + return _OBJC_DEALLOC_OBJECT_LATER; \ + } else { \ + return _OBJC_DEALLOC_OBJECT_NOW; \ + } \ + })) + +#define _OBJC_SUPPORTED_INLINE_REFCNT(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 0) +#define _OBJC_SUPPORTED_INLINE_REFCNT_WITH_DEALLOC2MAIN(_rc_ivar) _OBJC_SUPPORTED_INLINE_REFCNT_LOGIC(_rc_ivar, 1) + +__END_DECLS + +#endif diff --git a/runtime/objc-layout.mm b/runtime/objc-layout.mm new file mode 100644 index 0000000..d125f08 --- /dev/null +++ b/runtime/objc-layout.mm @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2004-2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include +#include + +#include "objc-private.h" + +/********************************************************************** +* Object Layouts. +* +* Layouts are used by the garbage collector to identify references from +* the object to other objects. +* +* Layout information is in the form of a '\0' terminated byte string. +* Each byte contains a word skip count in the high nibble and a +* consecutive references count in the low nibble. Counts that exceed 15 are +* continued in the succeeding byte with a zero in the opposite nibble. +* Objects that should be scanned conservatively will have a NULL layout. +* Objects that have no references have a empty byte string. +* +* Example; +* +* For a class with pointers at offsets 4,12, 16, 32-128 +* the layout is { 0x11, 0x12, 0x3f, 0x0a, 0x00 } or +* skip 1 - 1 reference (4) +* skip 1 - 2 references (12, 16) +* skip 3 - 15 references (32-88) +* no skip - 10 references (92-128) +* end +* +**********************************************************************/ + + +/********************************************************************** +* compress_layout +* Allocates and returns a compressed string matching the given layout bitmap. +**********************************************************************/ +static unsigned char * +compress_layout(const uint8_t *bits, size_t bitmap_bits, bool weak) +{ + bool all_set = YES; + bool none_set = YES; + unsigned char *result; + + // overallocate a lot; reallocate at correct size later + unsigned char * const layout = (unsigned char *) + calloc(bitmap_bits + 1, 1); + unsigned char *l = layout; + + size_t i = 0; + while (i < bitmap_bits) { + size_t skip = 0; + size_t scan = 0; + + // Count one range each of skip and scan. + while (i < bitmap_bits) { + uint8_t bit = (uint8_t)((bits[i/8] >> (i % 8)) & 1); + if (bit) break; + i++; + skip++; + } + while (i < bitmap_bits) { + uint8_t bit = (uint8_t)((bits[i/8] >> (i % 8)) & 1); + if (!bit) break; + i++; + scan++; + none_set = NO; + } + + // Record skip and scan + if (skip) all_set = NO; + if (scan) none_set = NO; + while (skip > 0xf) { + *l++ = 0xf0; + skip -= 0xf; + } + if (skip || scan) { + *l = (uint8_t)(skip << 4); // NOT incremented - merges with scan + while (scan > 0xf) { + *l++ |= 0x0f; // May merge with short skip; must calloc + scan -= 0xf; + } + *l++ |= scan; // NOT checked for zero - always increments + // May merge with short skip; must calloc + } + } + + // insert terminating byte + *l++ = '\0'; + + // return result + if (none_set && weak) { + result = NULL; // NULL weak layout means none-weak + } else if (all_set && !weak) { + result = NULL; // NULL ivar layout means all-scanned + } else { + result = (unsigned char *)strdup((char *)layout); + } + free(layout); + return result; +} + + +static void set_bits(layout_bitmap bits, size_t which, size_t count) +{ + // fixme optimize for byte/word at a time + size_t bit; + for (bit = which; bit < which + count && bit < bits.bitCount; bit++) { + bits.bits[bit/8] |= 1 << (bit % 8); + } + if (bit == bits.bitCount && bit < which + count) { + // couldn't fit full type in bitmap + _objc_fatal("layout bitmap too short"); + } +} + +static void clear_bits(layout_bitmap bits, size_t which, size_t count) +{ + // fixme optimize for byte/word at a time + size_t bit; + for (bit = which; bit < which + count && bit < bits.bitCount; bit++) { + bits.bits[bit/8] &= ~(1 << (bit % 8)); + } + if (bit == bits.bitCount && bit < which + count) { + // couldn't fit full type in bitmap + _objc_fatal("layout bitmap too short"); + } +} + +static void move_bits(layout_bitmap bits, size_t src, size_t dst, + size_t count) +{ + // fixme optimize for byte/word at a time + + if (dst == src) { + return; + } + else if (dst > src) { + // Copy backwards in case of overlap + size_t pos = count; + while (pos--) { + size_t srcbit = src + pos; + size_t dstbit = dst + pos; + if (bits.bits[srcbit/8] & (1 << (srcbit % 8))) { + bits.bits[dstbit/8] |= 1 << (dstbit % 8); + } else { + bits.bits[dstbit/8] &= ~(1 << (dstbit % 8)); + } + } + } + else { + // Copy forwards in case of overlap + size_t pos; + for (pos = 0; pos < count; pos++) { + size_t srcbit = src + pos; + size_t dstbit = dst + pos; + if (bits.bits[srcbit/8] & (1 << (srcbit % 8))) { + bits.bits[dstbit/8] |= 1 << (dstbit % 8); + } else { + bits.bits[dstbit/8] &= ~(1 << (dstbit % 8)); + } + } + } +} + +// emacs autoindent hack - it doesn't like the loop in set_bits/clear_bits +#if 0 +} } +#endif + + +static void decompress_layout(const unsigned char *layout_string, layout_bitmap bits) +{ + unsigned char c; + size_t bit = 0; + while ((c = *layout_string++)) { + unsigned char skip = (c & 0xf0) >> 4; + unsigned char scan = (c & 0x0f); + bit += skip; + set_bits(bits, bit, scan); + bit += scan; + } +} + + +/*********************************************************************** +* layout_bitmap_create +* Allocate a layout bitmap. +* The new bitmap spans the given instance size bytes. +* The start of the bitmap is filled from the given layout string (which +* spans an instance size of layoutStringSize); the rest is zero-filled. +* The returned bitmap must be freed with layout_bitmap_free(). +**********************************************************************/ +layout_bitmap +layout_bitmap_create(const unsigned char *layout_string, + size_t layoutStringInstanceSize, + size_t instanceSize, bool weak) +{ + layout_bitmap result; + size_t words = instanceSize / sizeof(id); + + result.weak = weak; + result.bitCount = words; + result.bitsAllocated = words; + result.bits = (uint8_t *)calloc((words+7)/8, 1); + + if (!layout_string) { + if (!weak) { + // NULL ivar layout means all-scanned + // (but only up to layoutStringSize instance size) + set_bits(result, 0, layoutStringInstanceSize/sizeof(id)); + } else { + // NULL weak layout means none-weak. + } + } else { + decompress_layout(layout_string, result); + } + + return result; +} + + +/*********************************************************************** + * layout_bitmap_create_empty + * Allocate a layout bitmap. + * The new bitmap spans the given instance size bytes. + * The bitmap is empty, to represent an object whose ivars are completely unscanned. + * The returned bitmap must be freed with layout_bitmap_free(). + **********************************************************************/ +layout_bitmap +layout_bitmap_create_empty(size_t instanceSize, bool weak) +{ + layout_bitmap result; + size_t words = instanceSize / sizeof(id); + + result.weak = weak; + result.bitCount = words; + result.bitsAllocated = words; + result.bits = (uint8_t *)calloc((words+7)/8, 1); + + return result; +} + +void +layout_bitmap_free(layout_bitmap bits) +{ + if (bits.bits) free(bits.bits); +} + +const unsigned char * +layout_string_create(layout_bitmap bits) +{ + const unsigned char *result = + compress_layout(bits.bits, bits.bitCount, bits.weak); + +#if DEBUG + // paranoia: cycle to bitmap and back to string again, and compare + layout_bitmap check = layout_bitmap_create(result, bits.bitCount*sizeof(id), + bits.bitCount*sizeof(id), bits.weak); + unsigned char *result2 = + compress_layout(check.bits, check.bitCount, check.weak); + if (result != result2 && 0 != strcmp((char*)result, (char *)result2)) { + layout_bitmap_print(bits); + layout_bitmap_print(check); + _objc_fatal("libobjc bug: mishandled layout bitmap"); + } + free(result2); + layout_bitmap_free(check); +#endif + + return result; +} + + +void +layout_bitmap_set_ivar(layout_bitmap bits, const char *type, size_t offset) +{ + // fixme only handles some types + size_t bit = offset / sizeof(id); + + if (!type) return; + if (type[0] == '@' || 0 == strcmp(type, "^@")) { + // id + // id * + // Block ("@?") + set_bits(bits, bit, 1); + } + else if (type[0] == '[') { + // id[] + char *t; + unsigned long count = strtoul(type+1, &t, 10); + if (t && t[0] == '@') { + set_bits(bits, bit, count); + } + } + else if (strchr(type, '@')) { + _objc_inform("warning: failing to set GC layout for '%s'\n", type); + } +} + + + +/*********************************************************************** +* layout_bitmap_grow +* Expand a layout bitmap to span newCount bits. +* The new bits are undefined. +**********************************************************************/ +void +layout_bitmap_grow(layout_bitmap *bits, size_t newCount) +{ + if (bits->bitCount >= newCount) return; + bits->bitCount = newCount; + if (bits->bitsAllocated < newCount) { + size_t newAllocated = bits->bitsAllocated * 2; + if (newAllocated < newCount) newAllocated = newCount; + bits->bits = (uint8_t *) + realloc(bits->bits, (newAllocated+7) / 8); + bits->bitsAllocated = newAllocated; + } + assert(bits->bitsAllocated >= bits->bitCount); + assert(bits->bitsAllocated >= newCount); +} + + +/*********************************************************************** +* layout_bitmap_slide +* Slide the end of a layout bitmap farther from the start. +* Slides bits [oldPos, bits.bitCount) to [newPos, bits.bitCount+newPos-oldPos) +* Bits [oldPos, newPos) are zero-filled. +* The bitmap is expanded and bitCount updated if necessary. +* newPos >= oldPos. +**********************************************************************/ +void +layout_bitmap_slide(layout_bitmap *bits, size_t oldPos, size_t newPos) +{ + size_t shift; + size_t count; + + if (oldPos == newPos) return; + if (oldPos > newPos) _objc_fatal("layout bitmap sliding backwards"); + + shift = newPos - oldPos; + count = bits->bitCount - oldPos; + layout_bitmap_grow(bits, bits->bitCount + shift); + move_bits(*bits, oldPos, newPos, count); // slide + clear_bits(*bits, oldPos, shift); // zero-fill +} + + +/*********************************************************************** +* layout_bitmap_slide_anywhere +* Slide the end of a layout bitmap relative to the start. +* Like layout_bitmap_slide, but can slide backwards too. +* The end of the bitmap is truncated. +**********************************************************************/ +void +layout_bitmap_slide_anywhere(layout_bitmap *bits, size_t oldPos, size_t newPos) +{ + size_t shift; + size_t count; + + if (oldPos == newPos) return; + + if (oldPos < newPos) { + layout_bitmap_slide(bits, oldPos, newPos); + return; + } + + shift = oldPos - newPos; + count = bits->bitCount - oldPos; + move_bits(*bits, oldPos, newPos, count); // slide + bits->bitCount -= shift; +} + + +/*********************************************************************** +* layout_bitmap_splat +* Pastes the contents of bitmap src to the start of bitmap dst. +* dst bits between the end of src and oldSrcInstanceSize are zeroed. +* dst must be at least as long as src. +* Returns YES if any of dst's bits were changed. +**********************************************************************/ +bool +layout_bitmap_splat(layout_bitmap dst, layout_bitmap src, + size_t oldSrcInstanceSize) +{ + bool changed; + size_t oldSrcBitCount; + size_t bit; + + if (dst.bitCount < src.bitCount) _objc_fatal("layout bitmap too short"); + + changed = NO; + oldSrcBitCount = oldSrcInstanceSize / sizeof(id); + + // fixme optimize for byte/word at a time + for (bit = 0; bit < oldSrcBitCount; bit++) { + int dstset = dst.bits[bit/8] & (1 << (bit % 8)); + int srcset = (bit < src.bitCount) + ? src.bits[bit/8] & (1 << (bit % 8)) + : 0; + if (dstset != srcset) { + changed = YES; + if (srcset) { + dst.bits[bit/8] |= 1 << (bit % 8); + } else { + dst.bits[bit/8] &= ~(1 << (bit % 8)); + } + } + } + + return changed; +} + + +/*********************************************************************** +* layout_bitmap_or +* Set dst=dst|src. +* dst must be at least as long as src. +* Returns YES if any of dst's bits were changed. +**********************************************************************/ +bool +layout_bitmap_or(layout_bitmap dst, layout_bitmap src, const char *msg) +{ + bool changed = NO; + size_t bit; + + if (dst.bitCount < src.bitCount) { + _objc_fatal("layout_bitmap_or: layout bitmap too short%s%s", + msg ? ": " : "", msg ? msg : ""); + } + + // fixme optimize for byte/word at a time + for (bit = 0; bit < src.bitCount; bit++) { + int dstset = dst.bits[bit/8] & (1 << (bit % 8)); + int srcset = src.bits[bit/8] & (1 << (bit % 8)); + if (srcset && !dstset) { + changed = YES; + dst.bits[bit/8] |= 1 << (bit % 8); + } + } + + return changed; +} + + +/*********************************************************************** +* layout_bitmap_clear +* Set dst=dst&~src. +* dst must be at least as long as src. +* Returns YES if any of dst's bits were changed. +**********************************************************************/ +bool +layout_bitmap_clear(layout_bitmap dst, layout_bitmap src, const char *msg) +{ + bool changed = NO; + size_t bit; + + if (dst.bitCount < src.bitCount) { + _objc_fatal("layout_bitmap_clear: layout bitmap too short%s%s", + msg ? ": " : "", msg ? msg : ""); + } + + // fixme optimize for byte/word at a time + for (bit = 0; bit < src.bitCount; bit++) { + int dstset = dst.bits[bit/8] & (1 << (bit % 8)); + int srcset = src.bits[bit/8] & (1 << (bit % 8)); + if (srcset && dstset) { + changed = YES; + dst.bits[bit/8] &= ~(1 << (bit % 8)); + } + } + + return changed; +} + + +void +layout_bitmap_print(layout_bitmap bits) +{ + size_t i; + printf("%zu: ", bits.bitCount); + for (i = 0; i < bits.bitCount; i++) { + int set = bits.bits[i/8] & (1 << (i % 8)); + printf("%c", set ? '#' : '.'); + } + printf("\n"); +} + +#if 0 +// The code below may be useful when interpreting ivar types more precisely. + +/********************************************************************** +* mark_offset_for_layout +* +* Marks the appropriate bit in the bits array cooresponding to a the +* offset of a reference. If we are scanning a nested pointer structure +* then the bits array will be NULL then this function does nothing. +* +**********************************************************************/ +static void mark_offset_for_layout(long offset, long bits_size, unsigned char *bits) { + // references are ignored if bits is NULL + if (bits) { + long slot = offset / sizeof(long); + + // determine byte index using (offset / 8 bits per byte) + long i_byte = slot >> 3; + + // if the byte index is valid + if (i_byte < bits_size) { + // set the (offset / 8 bits per byte)th bit + bits[i_byte] |= 1 << (slot & 7); + } else { + // offset not within instance size + _objc_inform ("layout - offset exceeds instance size"); + } + } +} + +/********************************************************************** +* skip_ivar_type_name +* +* Skip over the name of a field/class in an ivar type string. Names +* are in the form of a double-quoted string. Returns the remaining +* string. +* +**********************************************************************/ +static char *skip_ivar_type_name(char *type) { + // current character + char ch; + + // if there is an open quote + if (*type == '\"') { + // skip quote + type++; + + // while no closing quote + while ((ch = *type) != '\"') { + // if end of string return end of string + if (!ch) return type; + + // skip character + type++; + } + + // skip closing quote + type++; + } + + // return remaining string + return type; +} + + +/********************************************************************** +* skip_ivar_struct_name +* +* Skip over the name of a struct in an ivar type string. Names +* may be followed by an equals sign. Returns the remaining string. +* +**********************************************************************/ +static char *skip_ivar_struct_name(char *type) { + // get first character + char ch = *type; + + if (ch == _C_UNDEF) { + // skip undefined name + type++; + } else if ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '_') { + // if alphabetic + + // scan alphanumerics + do { + // next character + ch = *++type; + } while ((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '_' || (ch >= '0' && ch <= '9')); + } else { + // no struct name present + return type; + } + + // skip equals sign + if (*type == '=') type++; + + return type; +} + + +/********************************************************************** +* scan_basic_ivar_type +* +* Determines the size and alignment of a basic ivar type. If the basic +* type is a possible reference to another garbage collected type the +* is_reference is set to true (false otherwise.) Returns the remaining +* string. +* +**********************************************************************/ +static char *scan_ivar_type_for_layout(char *type, long offset, long bits_size, unsigned char *bits, long *next_offset); +static char *scan_basic_ivar_type(char *type, long *size, long *alignment, bool *is_reference) { + // assume it is a non-reference type + *is_reference = NO; + + // get the first character (advancing string) + const char *full_type = type; + char ch = *type++; + + // GCC 4 uses for const type*. + if (ch == _C_CONST) ch = *type++; + + // act on first character + switch (ch) { + case _C_ID: { + // ID type + + // skip over optional class name + type = skip_ivar_type_name(type); + + // size and alignment of an id type + *size = sizeof(id); + *alignment = __alignof(id); + + // is a reference type + *is_reference = YES; + break; + } + case _C_PTR: { + // C pointer type + + // skip underlying type + long ignored_offset; + type = scan_ivar_type_for_layout(type, 0, 0, NULL, &ignored_offset); + + // size and alignment of a generic pointer type + *size = sizeof(void *); + *alignment = __alignof(void *); + + // is a reference type + *is_reference = YES; + break; + } + case _C_CHARPTR: { + // C string + + // size and alignment of a char pointer type + *size = sizeof(char *); + *alignment = __alignof(char *); + + // is a reference type + *is_reference = YES; + break; + } + case _C_CLASS: + case _C_SEL: { + // classes and selectors are ignored for now + *size = sizeof(void *); + *alignment = __alignof(void *); + break; + } + case _C_CHR: + case _C_UCHR: { + // char and unsigned char + *size = sizeof(char); + *alignment = __alignof(char); + break; + } + case _C_SHT: + case _C_USHT: { + // short and unsigned short + *size = sizeof(short); + *alignment = __alignof(short); + break; + } + case _C_ATOM: + case _C_INT: + case _C_UINT: { + // int and unsigned int + *size = sizeof(int); + *alignment = __alignof(int); + break; + } + case _C_LNG: + case _C_ULNG: { + // long and unsigned long + *size = sizeof(long); + *alignment = __alignof(long); + break; + } + case _C_LNG_LNG: + case _C_ULNG_LNG: { + // long long and unsigned long long + *size = sizeof(long long); + *alignment = __alignof(long long); + break; + } + case _C_VECTOR: { + // vector + *size = 16; + *alignment = 16; + break; + } + case _C_FLT: { + // float + *size = sizeof(float); + *alignment = __alignof(float); + break; + } + case _C_DBL: { + // double + *size = sizeof(double); + *alignment = __alignof(double); + break; + } + case _C_BFLD: { + // bit field + + // get number of bits in bit field (advance type string) + long lng = strtol(type, &type, 10); + + // while next type is a bit field + while (*type == _C_BFLD) { + // skip over _C_BFLD + type++; + + // get next bit field length + long next_lng = strtol(type, &type, 10); + + // if spans next word then align to next word + if ((lng & ~31) != ((lng + next_lng) & ~31)) lng = (lng + 31) & ~31; + + // increment running length + lng += next_lng; + + // skip over potential field name + type = skip_ivar_type_name(type); + } + + // determine number of bytes bits represent + *size = (lng + 7) / 8; + + // byte alignment + *alignment = __alignof(char); + break; + } + case _C_BOOL: { + // double + *size = sizeof(BOOL); + *alignment = __alignof(BOOL); + break; + } + case _C_VOID: { + // skip void types + *size = 0; + *alignment = __alignof(char); + break; + } + case _C_UNDEF: { + *size = 0; + *alignment = __alignof(char); + break; + } + default: { + // unhandled type + _objc_fatal("unrecognized character \'%c\' in ivar type: \"%s\"", ch, full_type); + } + } + + return type; +} + + +/********************************************************************** +* scan_ivar_type_for_layout +* +* Scan an ivar type string looking for references. The offset indicates +* where the ivar begins. bits is a byte array of size bits_size used to +* contain the references bit map. next_offset is the offset beyond the +* ivar. Returns the remaining string. +* +**********************************************************************/ +static char *scan_ivar_type_for_layout(char *type, long offset, long bits_size, unsigned char *bits, long *next_offset) { + long size; // size of a basic type + long alignment; // alignment of the basic type + bool is_reference; // true if the type indicates a reference to a garbage collected object + + // get the first character + char ch = *type; + + // GCC 4 uses for const type*. + if (ch == _C_CONST) ch = *++type; + + // act on first character + switch (ch) { + case _C_ARY_B: { + // array type + + // get the array length + long lng = strtol(type + 1, &type, 10); + + // next type will be where to advance the type string once the array is processed + char *next_type = type; + + // repeat the next type x lng + if (!lng) { + next_type = scan_ivar_type_for_layout(type, 0, 0, NULL, &offset); + } else { + while (lng--) { + // repeatedly scan the same type + next_type = scan_ivar_type_for_layout(type, offset, bits_size, bits, &offset); + } + } + + // advance the type now + type = next_type; + + // after the end of the array + *next_offset = offset; + + // advance over closing bracket + if (*type == _C_ARY_E) type++; + else _objc_inform("missing \'%c\' in ivar type.", _C_ARY_E); + + break; + } + case _C_UNION_B: { + // union type + + // skip over possible union name + type = skip_ivar_struct_name(type + 1); + + // need to accumulate the maximum element offset + long max_offset = 0; + + // while not closing paren + while ((ch = *type) && ch != _C_UNION_E) { + // skip over potential field name + type = skip_ivar_type_name(type); + + // scan type + long union_offset; + type = scan_ivar_type_for_layout(type, offset, bits_size, bits, &union_offset); + + // adjust the maximum element offset + if (max_offset < union_offset) max_offset = union_offset; + } + + // after the largest element + *next_offset = max_offset; + + // advance over closing paren + if (ch == _C_UNION_E) { + type++; + } else { + _objc_inform("missing \'%c\' in ivar type", _C_UNION_E); + } + + break; + } + case _C_STRUCT_B: { + // struct type + + // skip over possible struct name + type = skip_ivar_struct_name(type + 1); + + // while not closing brace + while ((ch = *type) && ch != _C_STRUCT_E) { + // skip over potential field name + type = skip_ivar_type_name(type); + + // scan type + type = scan_ivar_type_for_layout(type, offset, bits_size, bits, &offset); + } + + // after the end of the struct + *next_offset = offset; + + // advance over closing brace + if (ch == _C_STRUCT_E) type++; + else _objc_inform("missing \'%c\' in ivar type", _C_STRUCT_E); + + break; + } + default: { + // basic type + + // scan type + type = scan_basic_ivar_type(type, &size, &alignment, &is_reference); + + // create alignment mask + alignment--; + + // align offset + offset = (offset + alignment) & ~alignment; + + // if is a reference then mark in the bit map + if (is_reference) mark_offset_for_layout(offset, bits_size, bits); + + // after the basic type + *next_offset = offset + size; + break; + } + } + + // return remainder of type string + return type; +} + +#endif diff --git a/runtime/objc-load.h b/runtime/objc-load.h new file mode 100644 index 0000000..8ec1fa2 --- /dev/null +++ b/runtime/objc-load.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 1999-2001, 2005-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * objc-load.h + * Copyright 1988-1996, NeXT Software, Inc. + */ + +#ifndef _OBJC_LOAD_H_ +#define _OBJC_LOAD_H_ + +#include + +#include + +/* dynamically loading Mach-O object files that contain Objective-C code */ + +OBJC_EXPORT long objc_loadModules ( + char *modlist[], + void *errStream, + void (*class_callback) (Class, Category), + /*headerType*/ struct mach_header **hdr_addr, + char *debug_file +) OBJC2_UNAVAILABLE; +OBJC_EXPORT int objc_loadModule ( + char * moduleName, + void (*class_callback) (Class, Category), + int * errorCode +) OBJC2_UNAVAILABLE; +OBJC_EXPORT long objc_unloadModules( + void *errorStream, /* input (optional) */ + void (*unloadCallback)(Class, Category) /* input (optional) */ +) OBJC2_UNAVAILABLE; + +#endif /* _OBJC_LOAD_H_ */ diff --git a/runtime/objc-load.mm b/runtime/objc-load.mm new file mode 100644 index 0000000..3e00a03 --- /dev/null +++ b/runtime/objc-load.mm @@ -0,0 +1,167 @@ +/* + * Copyright (c) 1999-2001, 2004-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * objc-load.m + * Copyright 1988-1996, NeXT Software, Inc. + * Author: s. naroff + * + */ + +#include "objc-private.h" +#include "objc-load.h" + +#if !__OBJC2__ && !TARGET_OS_WIN32 + +extern void (*callbackFunction)( Class, Category ); + + +/********************************************************************************** +* objc_loadModule. +* +* NOTE: Loading isn't really thread safe. If a load message recursively calls +* objc_loadModules() both sets will be loaded correctly, but if the original +* caller calls objc_unloadModules() it will probably unload the wrong modules. +* If a load message calls objc_unloadModules(), then it will unload +* the modules currently being loaded, which will probably cause a crash. +* +* Error handling is still somewhat crude. If we encounter errors while +* linking up classes or categories, we will not recover correctly. +* +* I removed attempts to lock the class hashtable, since this introduced +* deadlock which was hard to remove. The only way you can get into trouble +* is if one thread loads a module while another thread tries to access the +* loaded classes (using objc_lookUpClass) before the load is complete. +**********************************************************************************/ +int objc_loadModule(char *moduleName, void (*class_callback) (Class, Category), int *errorCode) +{ + int successFlag = 1; + int locErrorCode; + NSObjectFileImage objectFileImage; + NSObjectFileImageReturnCode code; + + // So we don't have to check this everywhere + if (errorCode == NULL) + errorCode = &locErrorCode; + + if (moduleName == NULL) + { + *errorCode = NSObjectFileImageInappropriateFile; + return 0; + } + + if (_dyld_present () == 0) + { + *errorCode = NSObjectFileImageFailure; + return 0; + } + + callbackFunction = class_callback; + code = NSCreateObjectFileImageFromFile (moduleName, &objectFileImage); + if (code != NSObjectFileImageSuccess) + { + *errorCode = code; + return 0; + } + + if (NSLinkModule(objectFileImage, moduleName, NSLINKMODULE_OPTION_RETURN_ON_ERROR) == NULL) { + NSLinkEditErrors error; + int errorNum; + const char *fileName, *errorString; + NSLinkEditError(&error, &errorNum, &fileName, &errorString); + // These errors may overlap with other errors that objc_loadModule returns in other failure cases. + *errorCode = error; + return 0; + } + callbackFunction = NULL; + + + return successFlag; +} + +/********************************************************************************** +* objc_loadModules. +**********************************************************************************/ +/* Lock for dynamic loading and unloading. */ +// static OBJC_DECLARE_LOCK (loadLock); + + +long objc_loadModules (char * modlist[], + void * errStream, + void (*class_callback) (Class, Category), + headerType ** hdr_addr, + char * debug_file) +{ + char ** modules; + int code; + int itWorked; + + if (modlist == 0) + return 0; + + for (modules = &modlist[0]; *modules != 0; modules++) + { + itWorked = objc_loadModule (*modules, class_callback, &code); + if (itWorked == 0) + { + //if (errStream) + // NXPrintf ((NXStream *) errStream, "objc_loadModules(%s) code = %d\n", *modules, code); + return 1; + } + + if (hdr_addr) + *(hdr_addr++) = 0; + } + + return 0; +} + +/********************************************************************************** +* objc_unloadModules. +* +* NOTE: Unloading isn't really thread safe. If an unload message calls +* objc_loadModules() or objc_unloadModules(), then the current call +* to objc_unloadModules() will probably unload the wrong stuff. +**********************************************************************************/ + +long objc_unloadModules (void * errStream, + void (*unload_callback) (Class, Category)) +{ + headerType * header_addr = 0; + int errflag = 0; + + // TODO: to make unloading work, should get the current header + + if (header_addr) + { + ; // TODO: unload the current header + } + else + { + errflag = 1; + } + + return errflag; +} + +#endif diff --git a/runtime/objc-loadmethod.h b/runtime/objc-loadmethod.h new file mode 100644 index 0000000..f1f2b9c --- /dev/null +++ b/runtime/objc-loadmethod.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2004-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-loadmethod.h +* Support for +load methods. +**********************************************************************/ + +#ifndef _OBJC_LOADMETHOD_H +#define _OBJC_LOADMETHOD_H + +#include "objc-private.h" + +__BEGIN_DECLS + +extern void add_class_to_loadable_list(Class cls); +extern void add_category_to_loadable_list(Category cat); +extern void remove_class_from_loadable_list(Class cls); +extern void remove_category_from_loadable_list(Category cat); + +extern void call_load_methods(void); + +__END_DECLS + +#endif diff --git a/runtime/objc-loadmethod.mm b/runtime/objc-loadmethod.mm new file mode 100644 index 0000000..55ef00b --- /dev/null +++ b/runtime/objc-loadmethod.mm @@ -0,0 +1,367 @@ +/* + * Copyright (c) 2004-2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-loadmethod.m +* Support for +load methods. +**********************************************************************/ + +#include "objc-loadmethod.h" +#include "objc-private.h" + +typedef void(*load_method_t)(id, SEL); + +struct loadable_class { + Class cls; // may be nil + IMP method; +}; + +struct loadable_category { + Category cat; // may be nil + IMP method; +}; + + +// List of classes that need +load called (pending superclass +load) +// This list always has superclasses first because of the way it is constructed +static struct loadable_class *loadable_classes = nil; +static int loadable_classes_used = 0; +static int loadable_classes_allocated = 0; + +// List of categories that need +load called (pending parent class +load) +static struct loadable_category *loadable_categories = nil; +static int loadable_categories_used = 0; +static int loadable_categories_allocated = 0; + + +/*********************************************************************** +* add_class_to_loadable_list +* Class cls has just become connected. Schedule it for +load if +* it implements a +load method. +**********************************************************************/ +void add_class_to_loadable_list(Class cls) +{ + IMP method; + + loadMethodLock.assertLocked(); + + method = cls->getLoadMethod(); + if (!method) return; // Don't bother if cls has no +load method + + if (PrintLoading) { + _objc_inform("LOAD: class '%s' scheduled for +load", + cls->nameForLogging()); + } + + if (loadable_classes_used == loadable_classes_allocated) { + loadable_classes_allocated = loadable_classes_allocated*2 + 16; + loadable_classes = (struct loadable_class *) + realloc(loadable_classes, + loadable_classes_allocated * + sizeof(struct loadable_class)); + } + + loadable_classes[loadable_classes_used].cls = cls; + loadable_classes[loadable_classes_used].method = method; + loadable_classes_used++; +} + + +/*********************************************************************** +* add_category_to_loadable_list +* Category cat's parent class exists and the category has been attached +* to its class. Schedule this category for +load after its parent class +* becomes connected and has its own +load method called. +**********************************************************************/ +void add_category_to_loadable_list(Category cat) +{ + IMP method; + + loadMethodLock.assertLocked(); + + method = _category_getLoadMethod(cat); + + // Don't bother if cat has no +load method + if (!method) return; + + if (PrintLoading) { + _objc_inform("LOAD: category '%s(%s)' scheduled for +load", + _category_getClassName(cat), _category_getName(cat)); + } + + if (loadable_categories_used == loadable_categories_allocated) { + loadable_categories_allocated = loadable_categories_allocated*2 + 16; + loadable_categories = (struct loadable_category *) + realloc(loadable_categories, + loadable_categories_allocated * + sizeof(struct loadable_category)); + } + + loadable_categories[loadable_categories_used].cat = cat; + loadable_categories[loadable_categories_used].method = method; + loadable_categories_used++; +} + + +/*********************************************************************** +* remove_class_from_loadable_list +* Class cls may have been loadable before, but it is now no longer +* loadable (because its image is being unmapped). +**********************************************************************/ +void remove_class_from_loadable_list(Class cls) +{ + loadMethodLock.assertLocked(); + + if (loadable_classes) { + int i; + for (i = 0; i < loadable_classes_used; i++) { + if (loadable_classes[i].cls == cls) { + loadable_classes[i].cls = nil; + if (PrintLoading) { + _objc_inform("LOAD: class '%s' unscheduled for +load", + cls->nameForLogging()); + } + return; + } + } + } +} + + +/*********************************************************************** +* remove_category_from_loadable_list +* Category cat may have been loadable before, but it is now no longer +* loadable (because its image is being unmapped). +**********************************************************************/ +void remove_category_from_loadable_list(Category cat) +{ + loadMethodLock.assertLocked(); + + if (loadable_categories) { + int i; + for (i = 0; i < loadable_categories_used; i++) { + if (loadable_categories[i].cat == cat) { + loadable_categories[i].cat = nil; + if (PrintLoading) { + _objc_inform("LOAD: category '%s(%s)' unscheduled for +load", + _category_getClassName(cat), + _category_getName(cat)); + } + return; + } + } + } +} + + +/*********************************************************************** +* call_class_loads +* Call all pending class +load methods. +* If new classes become loadable, +load is NOT called for them. +* +* Called only by call_load_methods(). +**********************************************************************/ +static void call_class_loads(void) +{ + int i; + + // Detach current loadable list. + struct loadable_class *classes = loadable_classes; + int used = loadable_classes_used; + loadable_classes = nil; + loadable_classes_allocated = 0; + loadable_classes_used = 0; + + // Call all +loads for the detached list. + for (i = 0; i < used; i++) { + Class cls = classes[i].cls; + load_method_t load_method = (load_method_t)classes[i].method; + if (!cls) continue; + + if (PrintLoading) { + _objc_inform("LOAD: +[%s load]\n", cls->nameForLogging()); + } + (*load_method)(cls, SEL_load); + } + + // Destroy the detached list. + if (classes) free(classes); +} + + +/*********************************************************************** +* call_category_loads +* Call some pending category +load methods. +* The parent class of the +load-implementing categories has all of +* its categories attached, in case some are lazily waiting for +initalize. +* Don't call +load unless the parent class is connected. +* If new categories become loadable, +load is NOT called, and they +* are added to the end of the loadable list, and we return TRUE. +* Return FALSE if no new categories became loadable. +* +* Called only by call_load_methods(). +**********************************************************************/ +static bool call_category_loads(void) +{ + int i, shift; + bool new_categories_added = NO; + + // Detach current loadable list. + struct loadable_category *cats = loadable_categories; + int used = loadable_categories_used; + int allocated = loadable_categories_allocated; + loadable_categories = nil; + loadable_categories_allocated = 0; + loadable_categories_used = 0; + + // Call all +loads for the detached list. + for (i = 0; i < used; i++) { + Category cat = cats[i].cat; + load_method_t load_method = (load_method_t)cats[i].method; + Class cls; + if (!cat) continue; + + cls = _category_getClass(cat); + if (cls && cls->isLoadable()) { + if (PrintLoading) { + _objc_inform("LOAD: +[%s(%s) load]\n", + cls->nameForLogging(), + _category_getName(cat)); + } + (*load_method)(cls, SEL_load); + cats[i].cat = nil; + } + } + + // Compact detached list (order-preserving) + shift = 0; + for (i = 0; i < used; i++) { + if (cats[i].cat) { + cats[i-shift] = cats[i]; + } else { + shift++; + } + } + used -= shift; + + // Copy any new +load candidates from the new list to the detached list. + new_categories_added = (loadable_categories_used > 0); + for (i = 0; i < loadable_categories_used; i++) { + if (used == allocated) { + allocated = allocated*2 + 16; + cats = (struct loadable_category *) + realloc(cats, allocated * + sizeof(struct loadable_category)); + } + cats[used++] = loadable_categories[i]; + } + + // Destroy the new list. + if (loadable_categories) free(loadable_categories); + + // Reattach the (now augmented) detached list. + // But if there's nothing left to load, destroy the list. + if (used) { + loadable_categories = cats; + loadable_categories_used = used; + loadable_categories_allocated = allocated; + } else { + if (cats) free(cats); + loadable_categories = nil; + loadable_categories_used = 0; + loadable_categories_allocated = 0; + } + + if (PrintLoading) { + if (loadable_categories_used != 0) { + _objc_inform("LOAD: %d categories still waiting for +load\n", + loadable_categories_used); + } + } + + return new_categories_added; +} + + +/*********************************************************************** +* call_load_methods +* Call all pending class and category +load methods. +* Class +load methods are called superclass-first. +* Category +load methods are not called until after the parent class's +load. +* +* This method must be RE-ENTRANT, because a +load could trigger +* more image mapping. In addition, the superclass-first ordering +* must be preserved in the face of re-entrant calls. Therefore, +* only the OUTERMOST call of this function will do anything, and +* that call will handle all loadable classes, even those generated +* while it was running. +* +* The sequence below preserves +load ordering in the face of +* image loading during a +load, and make sure that no +* +load method is forgotten because it was added during +* a +load call. +* Sequence: +* 1. Repeatedly call class +loads until there aren't any more +* 2. Call category +loads ONCE. +* 3. Run more +loads if: +* (a) there are more classes to load, OR +* (b) there are some potential category +loads that have +* still never been attempted. +* Category +loads are only run once to ensure "parent class first" +* ordering, even if a category +load triggers a new loadable class +* and a new loadable category attached to that class. +* +* Locking: loadMethodLock must be held by the caller +* All other locks must not be held. +**********************************************************************/ +void call_load_methods(void) +{ + static bool loading = NO; + bool more_categories; + + loadMethodLock.assertLocked(); + + // Re-entrant calls do nothing; the outermost call will finish the job. + if (loading) return; + loading = YES; + + void *pool = objc_autoreleasePoolPush(); + + do { + // 1. Repeatedly call class +loads until there aren't any more + while (loadable_classes_used > 0) { + call_class_loads(); + } + + // 2. Call category +loads ONCE + more_categories = call_category_loads(); + + // 3. Run more +loads if there are classes OR more untried categories + } while (loadable_classes_used > 0 || more_categories); + + objc_autoreleasePoolPop(pool); + + loading = NO; +} + + diff --git a/runtime/objc-lockdebug.h b/runtime/objc-lockdebug.h new file mode 100644 index 0000000..071064d --- /dev/null +++ b/runtime/objc-lockdebug.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2015 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +extern void lockdebug_mutex_lock(mutex_tt *lock); +extern void lockdebug_mutex_try_lock(mutex_tt *lock); +extern void lockdebug_mutex_unlock(mutex_tt *lock); +extern void lockdebug_mutex_assert_locked(mutex_tt *lock); +extern void lockdebug_mutex_assert_unlocked(mutex_tt *lock); + +static inline void lockdebug_mutex_lock(mutex_tt *lock) { } +static inline void lockdebug_mutex_try_lock(mutex_tt *lock) { } +static inline void lockdebug_mutex_unlock(mutex_tt *lock) { } +static inline void lockdebug_mutex_assert_locked(mutex_tt *lock) { } +static inline void lockdebug_mutex_assert_unlocked(mutex_tt *lock) { } + + +extern void lockdebug_monitor_enter(monitor_tt *lock); +extern void lockdebug_monitor_leave(monitor_tt *lock); +extern void lockdebug_monitor_wait(monitor_tt *lock); +extern void lockdebug_monitor_assert_locked(monitor_tt *lock); +extern void lockdebug_monitor_assert_unlocked(monitor_tt *lock); + +static inline void lockdebug_monitor_enter(monitor_tt *lock) { } +static inline void lockdebug_monitor_leave(monitor_tt *lock) { } +static inline void lockdebug_monitor_wait(monitor_tt *lock) { } +static inline void lockdebug_monitor_assert_locked(monitor_tt *lock) { } +static inline void lockdebug_monitor_assert_unlocked(monitor_tt *lock) {} + + +extern void +lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock); +extern void +lockdebug_recursive_mutex_unlock(recursive_mutex_tt *lock); +extern void +lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt *lock); +extern void +lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock); + +static inline void +lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock) { } +static inline void +lockdebug_recursive_mutex_unlock(recursive_mutex_tt *lock) { } +static inline void +lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt *lock) { } +static inline void +lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock) { } + + +extern void lockdebug_rwlock_read(rwlock_tt *lock); +extern void lockdebug_rwlock_try_read_success(rwlock_tt *lock); +extern void lockdebug_rwlock_unlock_read(rwlock_tt *lock); +extern void lockdebug_rwlock_write(rwlock_tt *lock); +extern void lockdebug_rwlock_try_write_success(rwlock_tt *lock); +extern void lockdebug_rwlock_unlock_write(rwlock_tt *lock); +extern void lockdebug_rwlock_assert_reading(rwlock_tt *lock); +extern void lockdebug_rwlock_assert_writing(rwlock_tt *lock); +extern void lockdebug_rwlock_assert_locked(rwlock_tt *lock); +extern void lockdebug_rwlock_assert_unlocked(rwlock_tt *lock); + +static inline void lockdebug_rwlock_read(rwlock_tt *) { } +static inline void lockdebug_rwlock_try_read_success(rwlock_tt *) { } +static inline void lockdebug_rwlock_unlock_read(rwlock_tt *) { } +static inline void lockdebug_rwlock_write(rwlock_tt *) { } +static inline void lockdebug_rwlock_try_write_success(rwlock_tt *) { } +static inline void lockdebug_rwlock_unlock_write(rwlock_tt *) { } +static inline void lockdebug_rwlock_assert_reading(rwlock_tt *) { } +static inline void lockdebug_rwlock_assert_writing(rwlock_tt *) { } +static inline void lockdebug_rwlock_assert_locked(rwlock_tt *) { } +static inline void lockdebug_rwlock_assert_unlocked(rwlock_tt *) { } diff --git a/runtime/objc-lockdebug.mm b/runtime/objc-lockdebug.mm new file mode 100644 index 0000000..ed94011 --- /dev/null +++ b/runtime/objc-lockdebug.mm @@ -0,0 +1,471 @@ +/* + * Copyright (c) 2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-lock.m +* Error-checking locks for debugging. +**********************************************************************/ + +#include "objc-private.h" + +#if DEBUG && !TARGET_OS_WIN32 + +/*********************************************************************** +* Recording - per-thread list of mutexes and monitors held +**********************************************************************/ + +typedef struct { + void *l; // the lock itself + int k; // the kind of lock it is (MUTEX, MONITOR, etc) + int i; // the lock's nest count +} lockcount; + +#define MUTEX 1 +#define MONITOR 2 +#define RDLOCK 3 +#define WRLOCK 4 +#define RECURSIVE 5 + +typedef struct _objc_lock_list { + int allocated; + int used; + lockcount list[0]; +} _objc_lock_list; + +static tls_key_t lock_tls; + +static void +destroyLocks(void *value) +{ + _objc_lock_list *locks = (_objc_lock_list *)value; + // fixme complain about any still-held locks? + if (locks) free(locks); +} + +static struct _objc_lock_list * +getLocks(BOOL create) +{ + _objc_lock_list *locks; + + // Use a dedicated tls key to prevent differences vs non-debug in + // usage of objc's other tls keys (required for some unit tests). + INIT_ONCE_PTR(lock_tls, tls_create(&destroyLocks), (void)0); + + locks = (_objc_lock_list *)tls_get(lock_tls); + if (!locks) { + if (!create) { + return NULL; + } else { + locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + sizeof(lockcount) * 16); + locks->allocated = 16; + locks->used = 0; + tls_set(lock_tls, locks); + } + } + + if (locks->allocated == locks->used) { + if (!create) { + return locks; + } else { + _objc_lock_list *oldlocks = locks; + locks = (_objc_lock_list *)calloc(1, sizeof(_objc_lock_list) + 2 * oldlocks->used * sizeof(lockcount)); + locks->used = oldlocks->used; + locks->allocated = oldlocks->used * 2; + memcpy(locks->list, oldlocks->list, locks->used * sizeof(lockcount)); + tls_set(lock_tls, locks); + free(oldlocks); + } + } + + return locks; +} + +static BOOL +hasLock(_objc_lock_list *locks, void *lock, int kind) +{ + int i; + if (!locks) return NO; + + for (i = 0; i < locks->used; i++) { + if (locks->list[i].l == lock && locks->list[i].k == kind) return YES; + } + return NO; +} + + +static void +setLock(_objc_lock_list *locks, void *lock, int kind) +{ + int i; + for (i = 0; i < locks->used; i++) { + if (locks->list[i].l == lock && locks->list[i].k == kind) { + locks->list[i].i++; + return; + } + } + + locks->list[locks->used].l = lock; + locks->list[locks->used].i = 1; + locks->list[locks->used].k = kind; + locks->used++; +} + +static void +clearLock(_objc_lock_list *locks, void *lock, int kind) +{ + int i; + for (i = 0; i < locks->used; i++) { + if (locks->list[i].l == lock && locks->list[i].k == kind) { + if (--locks->list[i].i == 0) { + locks->list[i].l = NULL; + locks->list[i] = locks->list[--locks->used]; + } + return; + } + } + + _objc_fatal("lock not found!"); +} + + +/*********************************************************************** +* Mutex checking +**********************************************************************/ + +#if !TARGET_OS_SIMULATOR +// Non-simulator platforms have lock debugging built into os_unfair_lock. + + +void +lockdebug_mutex_lock(mutex_t *lock) +{ + // empty +} + +void +lockdebug_mutex_unlock(mutex_t *lock) +{ + // empty +} + +void +lockdebug_mutex_assert_locked(mutex_t *lock) +{ + os_unfair_lock_assert_owner((os_unfair_lock *)lock); +} + +void +lockdebug_mutex_assert_unlocked(mutex_t *lock) +{ + os_unfair_lock_assert_not_owner((os_unfair_lock *)lock); +} + + +// !TARGET_OS_SIMULATOR +#else +// TARGET_OS_SIMULATOR + +// Simulator platforms have no built-in lock debugging in os_unfair_lock. + + +void +lockdebug_mutex_lock(mutex_t *lock) +{ + _objc_lock_list *locks = getLocks(YES); + + if (hasLock(locks, lock, MUTEX)) { + _objc_fatal("deadlock: relocking mutex"); + } + setLock(locks, lock, MUTEX); +} + +// try-lock success is the only case with lockdebug effects. +// try-lock when already locked is OK (will fail) +// try-lock failure does nothing. +void +lockdebug_mutex_try_lock_success(mutex_t *lock) +{ + _objc_lock_list *locks = getLocks(YES); + setLock(locks, lock, MUTEX); +} + +void +lockdebug_mutex_unlock(mutex_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, MUTEX)) { + _objc_fatal("unlocking unowned mutex"); + } + clearLock(locks, lock, MUTEX); +} + + +void +lockdebug_mutex_assert_locked(mutex_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, MUTEX)) { + _objc_fatal("mutex incorrectly not locked"); + } +} + +void +lockdebug_mutex_assert_unlocked(mutex_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (hasLock(locks, lock, MUTEX)) { + _objc_fatal("mutex incorrectly locked"); + } +} + + +// TARGET_OS_SIMULATOR +#endif + +/*********************************************************************** +* Recursive mutex checking +**********************************************************************/ + +void +lockdebug_recursive_mutex_lock(recursive_mutex_tt *lock) +{ + _objc_lock_list *locks = getLocks(YES); + setLock(locks, lock, RECURSIVE); +} + +void +lockdebug_recursive_mutex_unlock(recursive_mutex_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, RECURSIVE)) { + _objc_fatal("unlocking unowned recursive mutex"); + } + clearLock(locks, lock, RECURSIVE); +} + + +void +lockdebug_recursive_mutex_assert_locked(recursive_mutex_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, RECURSIVE)) { + _objc_fatal("recursive mutex incorrectly not locked"); + } +} + +void +lockdebug_recursive_mutex_assert_unlocked(recursive_mutex_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (hasLock(locks, lock, RECURSIVE)) { + _objc_fatal("recursive mutex incorrectly locked"); + } +} + + +/*********************************************************************** +* Monitor checking +**********************************************************************/ + +void +lockdebug_monitor_enter(monitor_t *lock) +{ + _objc_lock_list *locks = getLocks(YES); + + if (hasLock(locks, lock, MONITOR)) { + _objc_fatal("deadlock: relocking monitor"); + } + setLock(locks, lock, MONITOR); +} + +void +lockdebug_monitor_leave(monitor_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, MONITOR)) { + _objc_fatal("unlocking unowned monitor"); + } + clearLock(locks, lock, MONITOR); +} + +void +lockdebug_monitor_wait(monitor_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, MONITOR)) { + _objc_fatal("waiting in unowned monitor"); + } +} + + +void +lockdebug_monitor_assert_locked(monitor_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, MONITOR)) { + _objc_fatal("monitor incorrectly not locked"); + } +} + +void +lockdebug_monitor_assert_unlocked(monitor_t *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (hasLock(locks, lock, MONITOR)) { + _objc_fatal("monitor incorrectly held"); + } +} + + +/*********************************************************************** +* rwlock checking +**********************************************************************/ + +void +lockdebug_rwlock_read(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(YES); + + if (hasLock(locks, lock, RDLOCK)) { + // Recursive rwlock read is bad (may deadlock vs pending writer) + _objc_fatal("recursive rwlock read"); + } + if (hasLock(locks, lock, WRLOCK)) { + _objc_fatal("deadlock: read after write for rwlock"); + } + setLock(locks, lock, RDLOCK); +} + +// try-read success is the only case with lockdebug effects. +// try-read when already reading is OK (won't deadlock) +// try-read when already writing is OK (will fail) +// try-read failure does nothing. +void +lockdebug_rwlock_try_read_success(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(YES); + setLock(locks, lock, RDLOCK); +} + +void +lockdebug_rwlock_unlock_read(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, RDLOCK)) { + _objc_fatal("un-reading unowned rwlock"); + } + clearLock(locks, lock, RDLOCK); +} + + +void +lockdebug_rwlock_write(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(YES); + + if (hasLock(locks, lock, RDLOCK)) { + // Lock promotion not allowed (may deadlock) + _objc_fatal("deadlock: write after read for rwlock"); + } + if (hasLock(locks, lock, WRLOCK)) { + _objc_fatal("recursive rwlock write"); + } + setLock(locks, lock, WRLOCK); +} + +// try-write success is the only case with lockdebug effects. +// try-write when already reading is OK (will fail) +// try-write when already writing is OK (will fail) +// try-write failure does nothing. +void +lockdebug_rwlock_try_write_success(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(YES); + setLock(locks, lock, WRLOCK); +} + +void +lockdebug_rwlock_unlock_write(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, WRLOCK)) { + _objc_fatal("un-writing unowned rwlock"); + } + clearLock(locks, lock, WRLOCK); +} + + +void +lockdebug_rwlock_assert_reading(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, RDLOCK)) { + _objc_fatal("rwlock incorrectly not reading"); + } +} + +void +lockdebug_rwlock_assert_writing(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, WRLOCK)) { + _objc_fatal("rwlock incorrectly not writing"); + } +} + +void +lockdebug_rwlock_assert_locked(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (!hasLock(locks, lock, RDLOCK) && !hasLock(locks, lock, WRLOCK)) { + _objc_fatal("rwlock incorrectly neither reading nor writing"); + } +} + +void +lockdebug_rwlock_assert_unlocked(rwlock_tt *lock) +{ + _objc_lock_list *locks = getLocks(NO); + + if (hasLock(locks, lock, RDLOCK) || hasLock(locks, lock, WRLOCK)) { + _objc_fatal("rwlock incorrectly not unlocked"); + } +} + + +#endif diff --git a/runtime/objc-object.h b/runtime/objc-object.h new file mode 100644 index 0000000..821b0a7 --- /dev/null +++ b/runtime/objc-object.h @@ -0,0 +1,1217 @@ +/* + * Copyright (c) 2010-2012 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + + +/*********************************************************************** +* Inlineable parts of NSObject / objc_object implementation +**********************************************************************/ + +#ifndef _OBJC_OBJCOBJECT_H_ +#define _OBJC_OBJCOBJECT_H_ + +#include "objc-private.h" + + +enum ReturnDisposition : bool { + ReturnAtPlus0 = false, ReturnAtPlus1 = true +}; + +static ALWAYS_INLINE +bool prepareOptimizedReturn(ReturnDisposition disposition); + + +#if SUPPORT_TAGGED_POINTERS + +extern "C" { + extern Class objc_debug_taggedpointer_classes[_OBJC_TAG_SLOT_COUNT*2]; + extern Class objc_debug_taggedpointer_ext_classes[_OBJC_TAG_EXT_SLOT_COUNT]; +} +#define objc_tag_classes objc_debug_taggedpointer_classes +#define objc_tag_ext_classes objc_debug_taggedpointer_ext_classes + +#endif + +#if SUPPORT_INDEXED_ISA + +ALWAYS_INLINE Class & +classForIndex(uintptr_t index) { + assert(index > 0); + assert(index < (uintptr_t)objc_indexed_classes_count); + return objc_indexed_classes[index]; +} + +#endif + + +inline bool +objc_object::isClass() +{ + if (isTaggedPointer()) return false; + return ISA()->isMetaClass(); +} + + +#if SUPPORT_TAGGED_POINTERS + +inline Class +objc_object::getIsa() +{ + if (!isTaggedPointer()) return ISA(); + + uintptr_t ptr = (uintptr_t)this; + if (isExtTaggedPointer()) { + uintptr_t slot = + (ptr >> _OBJC_TAG_EXT_SLOT_SHIFT) & _OBJC_TAG_EXT_SLOT_MASK; + return objc_tag_ext_classes[slot]; + } else { + uintptr_t slot = + (ptr >> _OBJC_TAG_SLOT_SHIFT) & _OBJC_TAG_SLOT_MASK; + return objc_tag_classes[slot]; + } +} + + +inline bool +objc_object::isTaggedPointer() +{ + return _objc_isTaggedPointer(this); +} + +inline bool +objc_object::isBasicTaggedPointer() +{ + return isTaggedPointer() && !isExtTaggedPointer(); +} + +inline bool +objc_object::isExtTaggedPointer() +{ + return ((uintptr_t)this & _OBJC_TAG_EXT_MASK) == _OBJC_TAG_EXT_MASK; +} + + +// SUPPORT_TAGGED_POINTERS +#else +// not SUPPORT_TAGGED_POINTERS + + +inline Class +objc_object::getIsa() +{ + return ISA(); +} + + +inline bool +objc_object::isTaggedPointer() +{ + return false; +} + +inline bool +objc_object::isBasicTaggedPointer() +{ + return false; +} + +inline bool +objc_object::isExtTaggedPointer() +{ + return false; +} + + +// not SUPPORT_TAGGED_POINTERS +#endif + + +#if SUPPORT_NONPOINTER_ISA + +inline Class +objc_object::ISA() +{ + assert(!isTaggedPointer()); +#if SUPPORT_INDEXED_ISA + if (isa.nonpointer) { + uintptr_t slot = isa.indexcls; + return classForIndex((unsigned)slot); + } + return (Class)isa.bits; +#else + return (Class)(isa.bits & ISA_MASK); +#endif +} + + +inline bool +objc_object::hasNonpointerIsa() +{ + return isa.nonpointer; +} + + +inline void +objc_object::initIsa(Class cls) +{ + initIsa(cls, false, false); +} + +inline void +objc_object::initClassIsa(Class cls) +{ + if (DisableNonpointerIsa || cls->instancesRequireRawIsa()) { + initIsa(cls, false/*not nonpointer*/, false); + } else { + initIsa(cls, true/*nonpointer*/, false); + } +} + +inline void +objc_object::initProtocolIsa(Class cls) +{ + return initClassIsa(cls); +} + +inline void +objc_object::initInstanceIsa(Class cls, bool hasCxxDtor) +{ + assert(!cls->instancesRequireRawIsa()); + assert(hasCxxDtor == cls->hasCxxDtor()); + + initIsa(cls, true, hasCxxDtor); +} + +inline void +objc_object::initIsa(Class cls, bool nonpointer, bool hasCxxDtor) +{ + assert(!isTaggedPointer()); + + if (!nonpointer) { + isa.cls = cls; + } else { + assert(!DisableNonpointerIsa); + assert(!cls->instancesRequireRawIsa()); + + isa_t newisa(0); + +#if SUPPORT_INDEXED_ISA + assert(cls->classArrayIndex() > 0); + newisa.bits = ISA_INDEX_MAGIC_VALUE; + // isa.magic is part of ISA_MAGIC_VALUE + // isa.nonpointer is part of ISA_MAGIC_VALUE + newisa.has_cxx_dtor = hasCxxDtor; + newisa.indexcls = (uintptr_t)cls->classArrayIndex(); +#else + newisa.bits = ISA_MAGIC_VALUE; + // isa.magic is part of ISA_MAGIC_VALUE + // isa.nonpointer is part of ISA_MAGIC_VALUE + newisa.has_cxx_dtor = hasCxxDtor; + newisa.shiftcls = (uintptr_t)cls >> 3; +#endif + + // This write must be performed in a single store in some cases + // (for example when realizing a class because other threads + // may simultaneously try to use the class). + // fixme use atomics here to guarantee single-store and to + // guarantee memory order w.r.t. the class index table + // ...but not too atomic because we don't want to hurt instantiation + isa = newisa; + } +} + + +inline Class +objc_object::changeIsa(Class newCls) +{ + // This is almost always true but there are + // enough edge cases that we can't assert it. + // assert(newCls->isFuture() || + // newCls->isInitializing() || newCls->isInitialized()); + + assert(!isTaggedPointer()); + + isa_t oldisa; + isa_t newisa; + + bool sideTableLocked = false; + bool transcribeToSideTable = false; + + do { + transcribeToSideTable = false; + oldisa = LoadExclusive(&isa.bits); + if ((oldisa.bits == 0 || oldisa.nonpointer) && + !newCls->isFuture() && newCls->canAllocNonpointer()) + { + // 0 -> nonpointer + // nonpointer -> nonpointer +#if SUPPORT_INDEXED_ISA + if (oldisa.bits == 0) newisa.bits = ISA_INDEX_MAGIC_VALUE; + else newisa = oldisa; + // isa.magic is part of ISA_MAGIC_VALUE + // isa.nonpointer is part of ISA_MAGIC_VALUE + newisa.has_cxx_dtor = newCls->hasCxxDtor(); + assert(newCls->classArrayIndex() > 0); + newisa.indexcls = (uintptr_t)newCls->classArrayIndex(); +#else + if (oldisa.bits == 0) newisa.bits = ISA_MAGIC_VALUE; + else newisa = oldisa; + // isa.magic is part of ISA_MAGIC_VALUE + // isa.nonpointer is part of ISA_MAGIC_VALUE + newisa.has_cxx_dtor = newCls->hasCxxDtor(); + newisa.shiftcls = (uintptr_t)newCls >> 3; +#endif + } + else if (oldisa.nonpointer) { + // nonpointer -> raw pointer + // Need to copy retain count et al to side table. + // Acquire side table lock before setting isa to + // prevent races such as concurrent -release. + if (!sideTableLocked) sidetable_lock(); + sideTableLocked = true; + transcribeToSideTable = true; + newisa.cls = newCls; + } + else { + // raw pointer -> raw pointer + newisa.cls = newCls; + } + } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + + if (transcribeToSideTable) { + // Copy oldisa's retain count et al to side table. + // oldisa.has_assoc: nothing to do + // oldisa.has_cxx_dtor: nothing to do + sidetable_moveExtraRC_nolock(oldisa.extra_rc, + oldisa.deallocating, + oldisa.weakly_referenced); + } + + if (sideTableLocked) sidetable_unlock(); + + if (oldisa.nonpointer) { +#if SUPPORT_INDEXED_ISA + return classForIndex(oldisa.indexcls); +#else + return (Class)((uintptr_t)oldisa.shiftcls << 3); +#endif + } + else { + return oldisa.cls; + } +} + + +inline bool +objc_object::hasAssociatedObjects() +{ + if (isTaggedPointer()) return true; + if (isa.nonpointer) return isa.has_assoc; + return true; +} + + +inline void +objc_object::setHasAssociatedObjects() +{ + if (isTaggedPointer()) return; + + retry: + isa_t oldisa = LoadExclusive(&isa.bits); + isa_t newisa = oldisa; + if (!newisa.nonpointer || newisa.has_assoc) { + ClearExclusive(&isa.bits); + return; + } + newisa.has_assoc = true; + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; +} + + +inline bool +objc_object::isWeaklyReferenced() +{ + assert(!isTaggedPointer()); + if (isa.nonpointer) return isa.weakly_referenced; + else return sidetable_isWeaklyReferenced(); +} + + +inline void +objc_object::setWeaklyReferenced_nolock() +{ + retry: + isa_t oldisa = LoadExclusive(&isa.bits); + isa_t newisa = oldisa; + if (slowpath(!newisa.nonpointer)) { + ClearExclusive(&isa.bits); + sidetable_setWeaklyReferenced_nolock(); + return; + } + if (newisa.weakly_referenced) { + ClearExclusive(&isa.bits); + return; + } + newisa.weakly_referenced = true; + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; +} + + +inline bool +objc_object::hasCxxDtor() +{ + assert(!isTaggedPointer()); + if (isa.nonpointer) return isa.has_cxx_dtor; + else return isa.cls->hasCxxDtor(); +} + + + +inline bool +objc_object::rootIsDeallocating() +{ + if (isTaggedPointer()) return false; + if (isa.nonpointer) return isa.deallocating; + return sidetable_isDeallocating(); +} + + +inline void +objc_object::clearDeallocating() +{ + if (slowpath(!isa.nonpointer)) { + // Slow path for raw pointer isa. + sidetable_clearDeallocating(); + } + else if (slowpath(isa.weakly_referenced || isa.has_sidetable_rc)) { + // Slow path for non-pointer isa with weak refs and/or side table data. + clearDeallocating_slow(); + } + + assert(!sidetable_present()); +} + + +inline void +objc_object::rootDealloc() +{ + if (isTaggedPointer()) return; // fixme necessary? + + if (fastpath(isa.nonpointer && + !isa.weakly_referenced && + !isa.has_assoc && + !isa.has_cxx_dtor && + !isa.has_sidetable_rc)) + { + assert(!sidetable_present()); + free(this); + } + else { + object_dispose((id)this); + } +} + + +// Equivalent to calling [this retain], with shortcuts if there is no override +inline id +objc_object::retain() +{ + assert(!isTaggedPointer()); + + if (fastpath(!ISA()->hasCustomRR())) { + return rootRetain(); + } + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain); +} + + +// Base retain implementation, ignoring overrides. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super retain]. +// +// tryRetain=true is the -_tryRetain path. +// handleOverflow=false is the frameless fast path. +// handleOverflow=true is the framed slow path including overflow to side table +// The code is structured this way to prevent duplication. + +ALWAYS_INLINE id +objc_object::rootRetain() +{ + return rootRetain(false, false); +} + +ALWAYS_INLINE bool +objc_object::rootTryRetain() +{ + return rootRetain(true, false) ? true : false; +} + +ALWAYS_INLINE id +objc_object::rootRetain(bool tryRetain, bool handleOverflow) +{ + if (isTaggedPointer()) return (id)this; + + bool sideTableLocked = false; + bool transcribeToSideTable = false; + + isa_t oldisa; + isa_t newisa; + + do { + transcribeToSideTable = false; + oldisa = LoadExclusive(&isa.bits); + newisa = oldisa; + if (slowpath(!newisa.nonpointer)) { + ClearExclusive(&isa.bits); + if (!tryRetain && sideTableLocked) sidetable_unlock(); + if (tryRetain) return sidetable_tryRetain() ? (id)this : nil; + else return sidetable_retain(); + } + // don't check newisa.fast_rr; we already called any RR overrides + if (slowpath(tryRetain && newisa.deallocating)) { + ClearExclusive(&isa.bits); + if (!tryRetain && sideTableLocked) sidetable_unlock(); + return nil; + } + uintptr_t carry; + newisa.bits = addc(newisa.bits, RC_ONE, 0, &carry); // extra_rc++ + + if (slowpath(carry)) { + // newisa.extra_rc++ overflowed + if (!handleOverflow) { + ClearExclusive(&isa.bits); + return rootRetain_overflow(tryRetain); + } + // Leave half of the retain counts inline and + // prepare to copy the other half to the side table. + if (!tryRetain && !sideTableLocked) sidetable_lock(); + sideTableLocked = true; + transcribeToSideTable = true; + newisa.extra_rc = RC_HALF; + newisa.has_sidetable_rc = true; + } + } while (slowpath(!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits))); + + if (slowpath(transcribeToSideTable)) { + // Copy the other half of the retain counts to the side table. + sidetable_addExtraRC_nolock(RC_HALF); + } + + if (slowpath(!tryRetain && sideTableLocked)) sidetable_unlock(); + return (id)this; +} + + +// Equivalent to calling [this release], with shortcuts if there is no override +inline void +objc_object::release() +{ + assert(!isTaggedPointer()); + + if (fastpath(!ISA()->hasCustomRR())) { + rootRelease(); + return; + } + + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release); +} + + +// Base release implementation, ignoring overrides. +// Does not call -dealloc. +// Returns true if the object should now be deallocated. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super release]. +// +// handleUnderflow=false is the frameless fast path. +// handleUnderflow=true is the framed slow path including side table borrow +// The code is structured this way to prevent duplication. + +ALWAYS_INLINE bool +objc_object::rootRelease() +{ + return rootRelease(true, false); +} + +ALWAYS_INLINE bool +objc_object::rootReleaseShouldDealloc() +{ + return rootRelease(false, false); +} + +ALWAYS_INLINE bool +objc_object::rootRelease(bool performDealloc, bool handleUnderflow) +{ + if (isTaggedPointer()) return false; + + bool sideTableLocked = false; + + isa_t oldisa; + isa_t newisa; + + retry: + do { + oldisa = LoadExclusive(&isa.bits); + newisa = oldisa; + if (slowpath(!newisa.nonpointer)) { + ClearExclusive(&isa.bits); + if (sideTableLocked) sidetable_unlock(); + return sidetable_release(performDealloc); + } + // don't check newisa.fast_rr; we already called any RR overrides + uintptr_t carry; + newisa.bits = subc(newisa.bits, RC_ONE, 0, &carry); // extra_rc-- + if (slowpath(carry)) { + // don't ClearExclusive() + goto underflow; + } + } while (slowpath(!StoreReleaseExclusive(&isa.bits, + oldisa.bits, newisa.bits))); + + if (slowpath(sideTableLocked)) sidetable_unlock(); + return false; + + underflow: + // newisa.extra_rc-- underflowed: borrow from side table or deallocate + + // abandon newisa to undo the decrement + newisa = oldisa; + + if (slowpath(newisa.has_sidetable_rc)) { + if (!handleUnderflow) { + ClearExclusive(&isa.bits); + return rootRelease_underflow(performDealloc); + } + + // Transfer retain count from side table to inline storage. + + if (!sideTableLocked) { + ClearExclusive(&isa.bits); + sidetable_lock(); + sideTableLocked = true; + // Need to start over to avoid a race against + // the nonpointer -> raw pointer transition. + goto retry; + } + + // Try to remove some retain counts from the side table. + size_t borrowed = sidetable_subExtraRC_nolock(RC_HALF); + + // To avoid races, has_sidetable_rc must remain set + // even if the side table count is now zero. + + if (borrowed > 0) { + // Side table retain count decreased. + // Try to add them to the inline count. + newisa.extra_rc = borrowed - 1; // redo the original decrement too + bool stored = StoreReleaseExclusive(&isa.bits, + oldisa.bits, newisa.bits); + if (!stored) { + // Inline update failed. + // Try it again right now. This prevents livelock on LL/SC + // architectures where the side table access itself may have + // dropped the reservation. + isa_t oldisa2 = LoadExclusive(&isa.bits); + isa_t newisa2 = oldisa2; + if (newisa2.nonpointer) { + uintptr_t overflow; + newisa2.bits = + addc(newisa2.bits, RC_ONE * (borrowed-1), 0, &overflow); + if (!overflow) { + stored = StoreReleaseExclusive(&isa.bits, oldisa2.bits, + newisa2.bits); + } + } + } + + if (!stored) { + // Inline update failed. + // Put the retains back in the side table. + sidetable_addExtraRC_nolock(borrowed); + goto retry; + } + + // Decrement successful after borrowing from side table. + // This decrement cannot be the deallocating decrement - the side + // table lock and has_sidetable_rc bit ensure that if everyone + // else tried to -release while we worked, the last one would block. + sidetable_unlock(); + return false; + } + else { + // Side table is empty after all. Fall-through to the dealloc path. + } + } + + // Really deallocate. + + if (slowpath(newisa.deallocating)) { + ClearExclusive(&isa.bits); + if (sideTableLocked) sidetable_unlock(); + return overrelease_error(); + // does not actually return + } + newisa.deallocating = true; + if (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)) goto retry; + + if (slowpath(sideTableLocked)) sidetable_unlock(); + + __sync_synchronize(); + if (performDealloc) { + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_dealloc); + } + return true; +} + + +// Equivalent to [this autorelease], with shortcuts if there is no override +inline id +objc_object::autorelease() +{ + if (isTaggedPointer()) return (id)this; + if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease(); + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease); +} + + +// Base autorelease implementation, ignoring overrides. +inline id +objc_object::rootAutorelease() +{ + if (isTaggedPointer()) return (id)this; + if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this; + + return rootAutorelease2(); +} + + +inline uintptr_t +objc_object::rootRetainCount() +{ + if (isTaggedPointer()) return (uintptr_t)this; + + sidetable_lock(); + isa_t bits = LoadExclusive(&isa.bits); + ClearExclusive(&isa.bits); + if (bits.nonpointer) { + uintptr_t rc = 1 + bits.extra_rc; + if (bits.has_sidetable_rc) { + rc += sidetable_getExtraRC_nolock(); + } + sidetable_unlock(); + return rc; + } + + sidetable_unlock(); + return sidetable_retainCount(); +} + + +// SUPPORT_NONPOINTER_ISA +#else +// not SUPPORT_NONPOINTER_ISA + + +inline Class +objc_object::ISA() +{ + assert(!isTaggedPointer()); + return isa.cls; +} + + +inline bool +objc_object::hasNonpointerIsa() +{ + return false; +} + + +inline void +objc_object::initIsa(Class cls) +{ + assert(!isTaggedPointer()); + isa = (uintptr_t)cls; +} + + +inline void +objc_object::initClassIsa(Class cls) +{ + initIsa(cls); +} + + +inline void +objc_object::initProtocolIsa(Class cls) +{ + initIsa(cls); +} + + +inline void +objc_object::initInstanceIsa(Class cls, bool) +{ + initIsa(cls); +} + + +inline void +objc_object::initIsa(Class cls, bool, bool) +{ + initIsa(cls); +} + + +inline Class +objc_object::changeIsa(Class cls) +{ + // This is almost always rue but there are + // enough edge cases that we can't assert it. + // assert(cls->isFuture() || + // cls->isInitializing() || cls->isInitialized()); + + assert(!isTaggedPointer()); + + isa_t oldisa, newisa; + newisa.cls = cls; + do { + oldisa = LoadExclusive(&isa.bits); + } while (!StoreExclusive(&isa.bits, oldisa.bits, newisa.bits)); + + if (oldisa.cls && oldisa.cls->instancesHaveAssociatedObjects()) { + cls->setInstancesHaveAssociatedObjects(); + } + + return oldisa.cls; +} + + +inline bool +objc_object::hasAssociatedObjects() +{ + return getIsa()->instancesHaveAssociatedObjects(); +} + + +inline void +objc_object::setHasAssociatedObjects() +{ + getIsa()->setInstancesHaveAssociatedObjects(); +} + + +inline bool +objc_object::isWeaklyReferenced() +{ + assert(!isTaggedPointer()); + + return sidetable_isWeaklyReferenced(); +} + + +inline void +objc_object::setWeaklyReferenced_nolock() +{ + assert(!isTaggedPointer()); + + sidetable_setWeaklyReferenced_nolock(); +} + + +inline bool +objc_object::hasCxxDtor() +{ + assert(!isTaggedPointer()); + return isa.cls->hasCxxDtor(); +} + + +inline bool +objc_object::rootIsDeallocating() +{ + if (isTaggedPointer()) return false; + return sidetable_isDeallocating(); +} + + +inline void +objc_object::clearDeallocating() +{ + sidetable_clearDeallocating(); +} + + +inline void +objc_object::rootDealloc() +{ + if (isTaggedPointer()) return; + object_dispose((id)this); +} + + +// Equivalent to calling [this retain], with shortcuts if there is no override +inline id +objc_object::retain() +{ + assert(!isTaggedPointer()); + + if (fastpath(!ISA()->hasCustomRR())) { + return sidetable_retain(); + } + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_retain); +} + + +// Base retain implementation, ignoring overrides. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super retain]. +inline id +objc_object::rootRetain() +{ + if (isTaggedPointer()) return (id)this; + return sidetable_retain(); +} + + +// Equivalent to calling [this release], with shortcuts if there is no override +inline void +objc_object::release() +{ + assert(!isTaggedPointer()); + + if (fastpath(!ISA()->hasCustomRR())) { + sidetable_release(); + return; + } + + ((void(*)(objc_object *, SEL))objc_msgSend)(this, SEL_release); +} + + +// Base release implementation, ignoring overrides. +// Does not call -dealloc. +// Returns true if the object should now be deallocated. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super release]. +inline bool +objc_object::rootRelease() +{ + if (isTaggedPointer()) return false; + return sidetable_release(true); +} + +inline bool +objc_object::rootReleaseShouldDealloc() +{ + if (isTaggedPointer()) return false; + return sidetable_release(false); +} + + +// Equivalent to [this autorelease], with shortcuts if there is no override +inline id +objc_object::autorelease() +{ + if (isTaggedPointer()) return (id)this; + if (fastpath(!ISA()->hasCustomRR())) return rootAutorelease(); + + return ((id(*)(objc_object *, SEL))objc_msgSend)(this, SEL_autorelease); +} + + +// Base autorelease implementation, ignoring overrides. +inline id +objc_object::rootAutorelease() +{ + if (isTaggedPointer()) return (id)this; + if (prepareOptimizedReturn(ReturnAtPlus1)) return (id)this; + + return rootAutorelease2(); +} + + +// Base tryRetain implementation, ignoring overrides. +// This does not check isa.fast_rr; if there is an RR override then +// it was already called and it chose to call [super _tryRetain]. +inline bool +objc_object::rootTryRetain() +{ + if (isTaggedPointer()) return true; + return sidetable_tryRetain(); +} + + +inline uintptr_t +objc_object::rootRetainCount() +{ + if (isTaggedPointer()) return (uintptr_t)this; + return sidetable_retainCount(); +} + + +// not SUPPORT_NONPOINTER_ISA +#endif + + +#if SUPPORT_RETURN_AUTORELEASE + +/*********************************************************************** + Fast handling of return through Cocoa's +0 autoreleasing convention. + The caller and callee cooperate to keep the returned object + out of the autorelease pool and eliminate redundant retain/release pairs. + + An optimized callee looks at the caller's instructions following the + return. If the caller's instructions are also optimized then the callee + skips all retain count operations: no autorelease, no retain/autorelease. + Instead it saves the result's current retain count (+0 or +1) in + thread-local storage. If the caller does not look optimized then + the callee performs autorelease or retain/autorelease as usual. + + An optimized caller looks at the thread-local storage. If the result + is set then it performs any retain or release needed to change the + result from the retain count left by the callee to the retain count + desired by the caller. Otherwise the caller assumes the result is + currently at +0 from an unoptimized callee and performs any retain + needed for that case. + + There are two optimized callees: + objc_autoreleaseReturnValue + result is currently +1. The unoptimized path autoreleases it. + objc_retainAutoreleaseReturnValue + result is currently +0. The unoptimized path retains and autoreleases it. + + There are two optimized callers: + objc_retainAutoreleasedReturnValue + caller wants the value at +1. The unoptimized path retains it. + objc_unsafeClaimAutoreleasedReturnValue + caller wants the value at +0 unsafely. The unoptimized path does nothing. + + Example: + + Callee: + // compute ret at +1 + return objc_autoreleaseReturnValue(ret); + + Caller: + ret = callee(); + ret = objc_retainAutoreleasedReturnValue(ret); + // use ret at +1 here + + Callee sees the optimized caller, sets TLS, and leaves the result at +1. + Caller sees the TLS, clears it, and accepts the result at +1 as-is. + + The callee's recognition of the optimized caller is architecture-dependent. + x86_64: Callee looks for `mov rax, rdi` followed by a call or + jump instruction to objc_retainAutoreleasedReturnValue or + objc_unsafeClaimAutoreleasedReturnValue. + i386: Callee looks for a magic nop `movl %ebp, %ebp` (frame pointer register) + armv7: Callee looks for a magic nop `mov r7, r7` (frame pointer register). + arm64: Callee looks for a magic nop `mov x29, x29` (frame pointer register). + + Tagged pointer objects do participate in the optimized return scheme, + because it saves message sends. They are not entered in the autorelease + pool in the unoptimized case. +**********************************************************************/ + +# if __x86_64__ + +static ALWAYS_INLINE bool +callerAcceptsOptimizedReturn(const void * const ra0) +{ + const uint8_t *ra1 = (const uint8_t *)ra0; + const unaligned_uint16_t *ra2; + const unaligned_uint32_t *ra4 = (const unaligned_uint32_t *)ra1; + const void **sym; + +#define PREFER_GOTPCREL 0 +#if PREFER_GOTPCREL + // 48 89 c7 movq %rax,%rdi + // ff 15 callq *symbol@GOTPCREL(%rip) + if (*ra4 != 0xffc78948) { + return false; + } + if (ra1[4] != 0x15) { + return false; + } + ra1 += 3; +#else + // 48 89 c7 movq %rax,%rdi + // e8 callq symbol + if (*ra4 != 0xe8c78948) { + return false; + } + ra1 += (long)*(const unaligned_int32_t *)(ra1 + 4) + 8l; + ra2 = (const unaligned_uint16_t *)ra1; + // ff 25 jmpq *symbol@DYLDMAGIC(%rip) + if (*ra2 != 0x25ff) { + return false; + } +#endif + ra1 += 6l + (long)*(const unaligned_int32_t *)(ra1 + 2); + sym = (const void **)ra1; + if (*sym != objc_retainAutoreleasedReturnValue && + *sym != objc_unsafeClaimAutoreleasedReturnValue) + { + return false; + } + + return true; +} + +// __x86_64__ +# elif __arm__ + +static ALWAYS_INLINE bool +callerAcceptsOptimizedReturn(const void *ra) +{ + // if the low bit is set, we're returning to thumb mode + if ((uintptr_t)ra & 1) { + // 3f 46 mov r7, r7 + // we mask off the low bit via subtraction + // 16-bit instructions are well-aligned + if (*(uint16_t *)((uint8_t *)ra - 1) == 0x463f) { + return true; + } + } else { + // 07 70 a0 e1 mov r7, r7 + // 32-bit instructions may be only 16-bit aligned + if (*(unaligned_uint32_t *)ra == 0xe1a07007) { + return true; + } + } + return false; +} + +// __arm__ +# elif __arm64__ + +static ALWAYS_INLINE bool +callerAcceptsOptimizedReturn(const void *ra) +{ + // fd 03 1d aa mov fp, fp + // arm64 instructions are well-aligned + if (*(uint32_t *)ra == 0xaa1d03fd) { + return true; + } + return false; +} + +// __arm64__ +# elif __i386__ + +static ALWAYS_INLINE bool +callerAcceptsOptimizedReturn(const void *ra) +{ + // 89 ed movl %ebp, %ebp + if (*(unaligned_uint16_t *)ra == 0xed89) { + return true; + } + return false; +} + +// __i386__ +# else + +#warning unknown architecture + +static ALWAYS_INLINE bool +callerAcceptsOptimizedReturn(const void *ra) +{ + return false; +} + +// unknown architecture +# endif + + +static ALWAYS_INLINE ReturnDisposition +getReturnDisposition() +{ + return (ReturnDisposition)(uintptr_t)tls_get_direct(RETURN_DISPOSITION_KEY); +} + + +static ALWAYS_INLINE void +setReturnDisposition(ReturnDisposition disposition) +{ + tls_set_direct(RETURN_DISPOSITION_KEY, (void*)(uintptr_t)disposition); +} + + +// Try to prepare for optimized return with the given disposition (+0 or +1). +// Returns true if the optimized path is successful. +// Otherwise the return value must be retained and/or autoreleased as usual. +static ALWAYS_INLINE bool +prepareOptimizedReturn(ReturnDisposition disposition) +{ + assert(getReturnDisposition() == ReturnAtPlus0); + + if (callerAcceptsOptimizedReturn(__builtin_return_address(0))) { + if (disposition) setReturnDisposition(disposition); + return true; + } + + return false; +} + + +// Try to accept an optimized return. +// Returns the disposition of the returned object (+0 or +1). +// An un-optimized return is +0. +static ALWAYS_INLINE ReturnDisposition +acceptOptimizedReturn() +{ + ReturnDisposition disposition = getReturnDisposition(); + setReturnDisposition(ReturnAtPlus0); // reset to the unoptimized state + return disposition; +} + + +// SUPPORT_RETURN_AUTORELEASE +#else +// not SUPPORT_RETURN_AUTORELEASE + + +static ALWAYS_INLINE bool +prepareOptimizedReturn(ReturnDisposition disposition __unused) +{ + return false; +} + + +static ALWAYS_INLINE ReturnDisposition +acceptOptimizedReturn() +{ + return ReturnAtPlus0; +} + + +// not SUPPORT_RETURN_AUTORELEASE +#endif + + +// _OBJC_OBJECT_H_ +#endif diff --git a/runtime/objc-opt.mm b/runtime/objc-opt.mm new file mode 100644 index 0000000..19533df --- /dev/null +++ b/runtime/objc-opt.mm @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2012 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + objc-opt.mm + Management of optimizations in the dyld shared cache +*/ + +#include "objc-private.h" + + +#if !SUPPORT_PREOPT +// Preoptimization not supported on this platform. + +struct objc_selopt_t; + +bool isPreoptimized(void) +{ + return false; +} + +bool noMissingWeakSuperclasses(void) +{ + return false; +} + +bool header_info::isPreoptimized() const +{ + return false; +} + +objc_selopt_t *preoptimizedSelectors(void) +{ + return nil; +} + +Protocol *getPreoptimizedProtocol(const char *name) +{ + return nil; +} + +Class getPreoptimizedClass(const char *name) +{ + return nil; +} + +Class* copyPreoptimizedClasses(const char *name, int *outCount) +{ + *outCount = 0; + return nil; +} + +header_info *preoptimizedHinfoForHeader(const headerType *mhdr) +{ + return nil; +} + +header_info_rw *getPreoptimizedHeaderRW(const struct header_info *const hdr) +{ + return nil; +} + +void preopt_init(void) +{ + disableSharedCacheOptimizations(); + + if (PrintPreopt) { + _objc_inform("PREOPTIMIZATION: is DISABLED " + "(not supported on ths platform)"); + } +} + + +// !SUPPORT_PREOPT +#else +// SUPPORT_PREOPT + +#include + +using objc_opt::objc_stringhash_offset_t; +using objc_opt::objc_protocolopt_t; +using objc_opt::objc_clsopt_t; +using objc_opt::objc_headeropt_ro_t; +using objc_opt::objc_headeropt_rw_t; +using objc_opt::objc_opt_t; + +__BEGIN_DECLS + +// preopt: the actual opt used at runtime (nil or &_objc_opt_data) +// _objc_opt_data: opt data possibly written by dyld +// opt is initialized to ~0 to detect incorrect use before preopt_init() + +static const objc_opt_t *opt = (objc_opt_t *)~0; +static bool preoptimized; + +extern const objc_opt_t _objc_opt_data; // in __TEXT, __objc_opt_ro + +/*********************************************************************** +* Return YES if we have a valid optimized shared cache. +**********************************************************************/ +bool isPreoptimized(void) +{ + return preoptimized; +} + + +/*********************************************************************** +* Return YES if the shared cache does not have any classes with +* missing weak superclasses. +**********************************************************************/ +bool noMissingWeakSuperclasses(void) +{ + if (!preoptimized) return NO; // might have missing weak superclasses + return opt->flags & objc_opt::NoMissingWeakSuperclasses; +} + + +/*********************************************************************** +* Return YES if this image's dyld shared cache optimizations are valid. +**********************************************************************/ +bool header_info::isPreoptimized() const +{ + // preoptimization disabled for some reason + if (!preoptimized) return NO; + + // image not from shared cache, or not fixed inside shared cache + if (!info()->optimizedByDyld()) return NO; + + return YES; +} + + +objc_selopt_t *preoptimizedSelectors(void) +{ + return opt ? opt->selopt() : nil; +} + + +Protocol *getPreoptimizedProtocol(const char *name) +{ + objc_protocolopt_t *protocols = opt ? opt->protocolopt() : nil; + if (!protocols) return nil; + + return (Protocol *)protocols->getProtocol(name); +} + + +Class getPreoptimizedClass(const char *name) +{ + objc_clsopt_t *classes = opt ? opt->clsopt() : nil; + if (!classes) return nil; + + void *cls; + void *hi; + uint32_t count = classes->getClassAndHeader(name, cls, hi); + if (count == 1 && ((header_info *)hi)->isLoaded()) { + // exactly one matching class, and its image is loaded + return (Class)cls; + } + else if (count > 1) { + // more than one matching class - find one that is loaded + void *clslist[count]; + void *hilist[count]; + classes->getClassesAndHeaders(name, clslist, hilist); + for (uint32_t i = 0; i < count; i++) { + if (((header_info *)hilist[i])->isLoaded()) { + return (Class)clslist[i]; + } + } + } + + // no match that is loaded + return nil; +} + + +Class* copyPreoptimizedClasses(const char *name, int *outCount) +{ + *outCount = 0; + + objc_clsopt_t *classes = opt ? opt->clsopt() : nil; + if (!classes) return nil; + + void *cls; + void *hi; + uint32_t count = classes->getClassAndHeader(name, cls, hi); + if (count == 0) return nil; + + Class *result = (Class *)calloc(count, sizeof(Class)); + if (count == 1 && ((header_info *)hi)->isLoaded()) { + // exactly one matching class, and its image is loaded + result[(*outCount)++] = (Class)cls; + return result; + } + else if (count > 1) { + // more than one matching class - find those that are loaded + void *clslist[count]; + void *hilist[count]; + classes->getClassesAndHeaders(name, clslist, hilist); + for (uint32_t i = 0; i < count; i++) { + if (((header_info *)hilist[i])->isLoaded()) { + result[(*outCount)++] = (Class)clslist[i]; + } + } + + if (*outCount == 0) { + // found multiple classes with that name, but none are loaded + free(result); + result = nil; + } + return result; + } + + // no match that is loaded + return nil; +} + +namespace objc_opt { +struct objc_headeropt_ro_t { + uint32_t count; + uint32_t entsize; + header_info headers[0]; // sorted by mhdr address + + header_info *get(const headerType *mhdr) + { + assert(entsize == sizeof(header_info)); + + int32_t start = 0; + int32_t end = count; + while (start <= end) { + int32_t i = (start+end)/2; + header_info *hi = headers+i; + if (mhdr == hi->mhdr()) return hi; + else if (mhdr < hi->mhdr()) end = i-1; + else start = i+1; + } + +#if DEBUG + for (uint32_t i = 0; i < count; i++) { + header_info *hi = headers+i; + if (mhdr == hi->mhdr()) { + _objc_fatal("failed to find header %p (%d/%d)", + mhdr, i, count); + } + } +#endif + + return nil; + } +}; + +struct objc_headeropt_rw_t { + uint32_t count; + uint32_t entsize; + header_info_rw headers[0]; // sorted by mhdr address +}; +}; + + +header_info *preoptimizedHinfoForHeader(const headerType *mhdr) +{ +#if !__OBJC2__ + // fixme old ABI shared cache doesn't prepare these properly + return nil; +#endif + + objc_headeropt_ro_t *hinfos = opt ? opt->headeropt_ro() : nil; + if (hinfos) return hinfos->get(mhdr); + else return nil; +} + + +header_info_rw *getPreoptimizedHeaderRW(const struct header_info *const hdr) +{ +#if !__OBJC2__ + // fixme old ABI shared cache doesn't prepare these properly + return nil; +#endif + + objc_headeropt_ro_t *hinfoRO = opt ? opt->headeropt_ro() : nil; + objc_headeropt_rw_t *hinfoRW = opt ? opt->headeropt_rw() : nil; + if (!hinfoRO || !hinfoRW) { + _objc_fatal("preoptimized header_info missing for %s (%p %p %p)", + hdr->fname(), hdr, hinfoRO, hinfoRW); + } + int32_t index = (int32_t)(hdr - hinfoRO->headers); + assert(hinfoRW->entsize == sizeof(header_info_rw)); + return &hinfoRW->headers[index]; +} + + +void preopt_init(void) +{ + // `opt` not set at compile time in order to detect too-early usage + const char *failure = nil; + opt = &_objc_opt_data; + + if (DisablePreopt) { + // OBJC_DISABLE_PREOPTIMIZATION is set + // If opt->version != VERSION then you continue at your own risk. + failure = "(by OBJC_DISABLE_PREOPTIMIZATION)"; + } + else if (opt->version != objc_opt::VERSION) { + // This shouldn't happen. You probably forgot to edit objc-sel-table.s. + // If dyld really did write the wrong optimization version, + // then we must halt because we don't know what bits dyld twiddled. + _objc_fatal("bad objc preopt version (want %d, got %d)", + objc_opt::VERSION, opt->version); + } + else if (!opt->selopt() || !opt->headeropt_ro()) { + // One of the tables is missing. + failure = "(dyld shared cache is absent or out of date)"; + } + + if (failure) { + // All preoptimized selector references are invalid. + preoptimized = NO; + opt = nil; + disableSharedCacheOptimizations(); + + if (PrintPreopt) { + _objc_inform("PREOPTIMIZATION: is DISABLED %s", failure); + } + } + else { + // Valid optimization data written by dyld shared cache + preoptimized = YES; + + if (PrintPreopt) { + _objc_inform("PREOPTIMIZATION: is ENABLED " + "(version %d)", opt->version); + } + } +} + + +__END_DECLS + +// SUPPORT_PREOPT +#endif diff --git a/runtime/objc-os.h b/runtime/objc-os.h new file mode 100644 index 0000000..904184f --- /dev/null +++ b/runtime/objc-os.h @@ -0,0 +1,1225 @@ +/* + * Copyright (c) 2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-os.h +* OS portability layer. +**********************************************************************/ + +#ifndef _OBJC_OS_H +#define _OBJC_OS_H + +#include +#include "objc-config.h" + +#ifdef __LP64__ +# define WORD_SHIFT 3UL +# define WORD_MASK 7UL +# define WORD_BITS 64 +#else +# define WORD_SHIFT 2UL +# define WORD_MASK 3UL +# define WORD_BITS 32 +#endif + +static inline uint32_t word_align(uint32_t x) { + return (x + WORD_MASK) & ~WORD_MASK; +} +static inline size_t word_align(size_t x) { + return (x + WORD_MASK) & ~WORD_MASK; +} + + +// Mix-in for classes that must not be copied. +class nocopy_t { + private: + nocopy_t(const nocopy_t&) = delete; + const nocopy_t& operator=(const nocopy_t&) = delete; + protected: + nocopy_t() { } + ~nocopy_t() { } +}; + + +#if TARGET_OS_MAC + +# define OS_UNFAIR_LOCK_INLINE 1 + +# ifndef __STDC_LIMIT_MACROS +# define __STDC_LIMIT_MACROS +# endif + +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# undef check +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include "objc-probes.h" // generated dtrace probe definitions. + +// Some libc functions call objc_msgSend() +// so we can't use them without deadlocks. +void syslog(int, const char *, ...) UNAVAILABLE_ATTRIBUTE; +void vsyslog(int, const char *, va_list) UNAVAILABLE_ATTRIBUTE; + + +#define ALWAYS_INLINE inline __attribute__((always_inline)) +#define NEVER_INLINE inline __attribute__((noinline)) + +#define fastpath(x) (__builtin_expect(bool(x), 1)) +#define slowpath(x) (__builtin_expect(bool(x), 0)) + + +static ALWAYS_INLINE uintptr_t +addc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout) +{ + return __builtin_addcl(lhs, rhs, carryin, carryout); +} + +static ALWAYS_INLINE uintptr_t +subc(uintptr_t lhs, uintptr_t rhs, uintptr_t carryin, uintptr_t *carryout) +{ + return __builtin_subcl(lhs, rhs, carryin, carryout); +} + + +#if __arm64__ + +static ALWAYS_INLINE +uintptr_t +LoadExclusive(uintptr_t *src) +{ + uintptr_t result; + asm("ldxr %x0, [%x1]" + : "=r" (result) + : "r" (src), "m" (*src)); + return result; +} + +static ALWAYS_INLINE +bool +StoreExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value) +{ + uint32_t result; + asm("stxr %w0, %x2, [%x3]" + : "=r" (result), "=m" (*dst) + : "r" (value), "r" (dst)); + return !result; +} + + +static ALWAYS_INLINE +bool +StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue __unused, uintptr_t value) +{ + uint32_t result; + asm("stlxr %w0, %x2, [%x3]" + : "=r" (result), "=m" (*dst) + : "r" (value), "r" (dst)); + return !result; +} + +static ALWAYS_INLINE +void +ClearExclusive(uintptr_t *dst) +{ + // pretend it writes to *dst for instruction ordering purposes + asm("clrex" : "=m" (*dst)); +} + + +#elif __arm__ + +static ALWAYS_INLINE +uintptr_t +LoadExclusive(uintptr_t *src) +{ + return *src; +} + +static ALWAYS_INLINE +bool +StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + return OSAtomicCompareAndSwapPtr((void *)oldvalue, (void *)value, + (void **)dst); +} + +static ALWAYS_INLINE +bool +StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + return OSAtomicCompareAndSwapPtrBarrier((void *)oldvalue, (void *)value, + (void **)dst); +} + +static ALWAYS_INLINE +void +ClearExclusive(uintptr_t *dst __unused) +{ +} + + +#elif __x86_64__ || __i386__ + +static ALWAYS_INLINE +uintptr_t +LoadExclusive(uintptr_t *src) +{ + return *src; +} + +static ALWAYS_INLINE +bool +StoreExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + + return __sync_bool_compare_and_swap((void **)dst, (void *)oldvalue, (void *)value); +} + +static ALWAYS_INLINE +bool +StoreReleaseExclusive(uintptr_t *dst, uintptr_t oldvalue, uintptr_t value) +{ + return StoreExclusive(dst, oldvalue, value); +} + +static ALWAYS_INLINE +void +ClearExclusive(uintptr_t *dst __unused) +{ +} + + +#else +# error unknown architecture +#endif + + +#if !TARGET_OS_IPHONE +# include +#else + // CrashReporterClient not yet available on iOS + __BEGIN_DECLS + extern const char *CRSetCrashLogMessage(const char *msg); + extern const char *CRGetCrashLogMessage(void); + __END_DECLS +#endif + +# if __cplusplus +# include +# include +# include + using namespace std; +# endif + +# define PRIVATE_EXTERN __attribute__((visibility("hidden"))) +# undef __private_extern__ +# define __private_extern__ use_PRIVATE_EXTERN_instead +# undef private_extern +# define private_extern use_PRIVATE_EXTERN_instead + +/* Use this for functions that are intended to be breakpoint hooks. + If you do not, the compiler may optimize them away. + BREAKPOINT_FUNCTION( void stop_on_error(void) ); */ +# define BREAKPOINT_FUNCTION(prototype) \ + OBJC_EXTERN __attribute__((noinline, used, visibility("hidden"))) \ + prototype { asm(""); } + +#elif TARGET_OS_WIN32 + +# define WINVER 0x0501 // target Windows XP and later +# define _WIN32_WINNT 0x0501 // target Windows XP and later +# define WIN32_LEAN_AND_MEAN + // hack: windef.h typedefs BOOL as int +# define BOOL WINBOOL +# include +# undef BOOL + +# include +# include +# include +# include +# include +# include +# include +# include + +# if __cplusplus +# include +# include +# include + using namespace std; +# define __BEGIN_DECLS extern "C" { +# define __END_DECLS } +# else +# define __BEGIN_DECLS /*empty*/ +# define __END_DECLS /*empty*/ +# endif + +# define PRIVATE_EXTERN +# define __attribute__(x) +# define inline __inline + +/* Use this for functions that are intended to be breakpoint hooks. + If you do not, the compiler may optimize them away. + BREAKPOINT_FUNCTION( void MyBreakpointFunction(void) ); */ +# define BREAKPOINT_FUNCTION(prototype) \ + __declspec(noinline) prototype { __asm { } } + +/* stub out dtrace probes */ +# define OBJC_RUNTIME_OBJC_EXCEPTION_RETHROW() do {} while(0) +# define OBJC_RUNTIME_OBJC_EXCEPTION_THROW(arg0) do {} while(0) + +#else +# error unknown OS +#endif + + +#include +#include + +extern void _objc_fatal(const char *fmt, ...) + __attribute__((noreturn, format (printf, 1, 2))); +extern void _objc_fatal_with_reason(uint64_t reason, uint64_t flags, + const char *fmt, ...) + __attribute__((noreturn, format (printf, 3, 4))); + +#define INIT_ONCE_PTR(var, create, delete) \ + do { \ + if (var) break; \ + typeof(var) v = create; \ + while (!var) { \ + if (OSAtomicCompareAndSwapPtrBarrier(0, (void*)v, (void**)&var)){ \ + goto done; \ + } \ + } \ + delete; \ + done:; \ + } while (0) + +#define INIT_ONCE_32(var, create, delete) \ + do { \ + if (var) break; \ + typeof(var) v = create; \ + while (!var) { \ + if (OSAtomicCompareAndSwap32Barrier(0, v, (volatile int32_t *)&var)) { \ + goto done; \ + } \ + } \ + delete; \ + done:; \ + } while (0) + + +// Thread keys reserved by libc for our use. +#if defined(__PTK_FRAMEWORK_OBJC_KEY0) +# define SUPPORT_DIRECT_THREAD_KEYS 1 +# define TLS_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY0) +# define SYNC_DATA_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY1) +# define SYNC_COUNT_DIRECT_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY2) +# define AUTORELEASE_POOL_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY3) +# if SUPPORT_RETURN_AUTORELEASE +# define RETURN_DISPOSITION_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY4) +# endif +# if SUPPORT_QOS_HACK +# define QOS_KEY ((tls_key_t)__PTK_FRAMEWORK_OBJC_KEY5) +# endif +#else +# define SUPPORT_DIRECT_THREAD_KEYS 0 +#endif + + +#if TARGET_OS_WIN32 + +// Compiler compatibility + +// OS compatibility + +#define strdup _strdup + +#define issetugid() 0 + +#define MIN(x, y) ((x) < (y) ? (x) : (y)) + +static __inline void bcopy(const void *src, void *dst, size_t size) { memcpy(dst, src, size); } +static __inline void bzero(void *dst, size_t size) { memset(dst, 0, size); } + +int asprintf(char **dstp, const char *format, ...); + +typedef void * malloc_zone_t; + +static __inline malloc_zone_t malloc_default_zone(void) { return (malloc_zone_t)-1; } +static __inline void *malloc_zone_malloc(malloc_zone_t z, size_t size) { return malloc(size); } +static __inline void *malloc_zone_calloc(malloc_zone_t z, size_t size, size_t count) { return calloc(size, count); } +static __inline void *malloc_zone_realloc(malloc_zone_t z, void *p, size_t size) { return realloc(p, size); } +static __inline void malloc_zone_free(malloc_zone_t z, void *p) { free(p); } +static __inline malloc_zone_t malloc_zone_from_ptr(const void *p) { return (malloc_zone_t)-1; } +static __inline size_t malloc_size(const void *p) { return _msize((void*)p); /* fixme invalid pointer check? */ } + + +// OSAtomic + +static __inline BOOL OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) +{ + // fixme barrier is overkill + long original = InterlockedCompareExchange(dst, newl, oldl); + return (original == oldl); +} + +static __inline BOOL OSAtomicCompareAndSwapPtrBarrier(void *oldp, void *newp, void * volatile *dst) +{ + void *original = InterlockedCompareExchangePointer(dst, newp, oldp); + return (original == oldp); +} + +static __inline BOOL OSAtomicCompareAndSwap32Barrier(int32_t oldl, int32_t newl, int32_t volatile *dst) +{ + long original = InterlockedCompareExchange((volatile long *)dst, newl, oldl); + return (original == oldl); +} + +static __inline int32_t OSAtomicDecrement32Barrier(volatile int32_t *dst) +{ + return InterlockedDecrement((volatile long *)dst); +} + +static __inline int32_t OSAtomicIncrement32Barrier(volatile int32_t *dst) +{ + return InterlockedIncrement((volatile long *)dst); +} + + +// Internal data types + +typedef DWORD objc_thread_t; // thread ID +static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) { + return t1 == t2; +} +static __inline objc_thread_t thread_self(void) { + return GetCurrentThreadId(); +} + +typedef struct { + DWORD key; + void (*dtor)(void *); +} tls_key_t; +static __inline tls_key_t tls_create(void (*dtor)(void*)) { + // fixme need dtor registry for DllMain to call on thread detach + tls_key_t k; + k.key = TlsAlloc(); + k.dtor = dtor; + return k; +} +static __inline void *tls_get(tls_key_t k) { + return TlsGetValue(k.key); +} +static __inline void tls_set(tls_key_t k, void *value) { + TlsSetValue(k.key, value); +} + +typedef struct { + CRITICAL_SECTION *lock; +} mutex_t; +#define MUTEX_INITIALIZER {0}; +extern void mutex_init(mutex_t *m); +static __inline int _mutex_lock_nodebug(mutex_t *m) { + // fixme error check + if (!m->lock) { + mutex_init(m); + } + EnterCriticalSection(m->lock); + return 0; +} +static __inline bool _mutex_try_lock_nodebug(mutex_t *m) { + // fixme error check + if (!m->lock) { + mutex_init(m); + } + return TryEnterCriticalSection(m->lock); +} +static __inline int _mutex_unlock_nodebug(mutex_t *m) { + // fixme error check + LeaveCriticalSection(m->lock); + return 0; +} + + +typedef mutex_t spinlock_t; +#define spinlock_lock(l) mutex_lock(l) +#define spinlock_unlock(l) mutex_unlock(l) +#define SPINLOCK_INITIALIZER MUTEX_INITIALIZER + + +typedef struct { + HANDLE mutex; +} recursive_mutex_t; +#define RECURSIVE_MUTEX_INITIALIZER {0}; +#define RECURSIVE_MUTEX_NOT_LOCKED 1 +extern void recursive_mutex_init(recursive_mutex_t *m); +static __inline int _recursive_mutex_lock_nodebug(recursive_mutex_t *m) { + assert(m->mutex); + return WaitForSingleObject(m->mutex, INFINITE); +} +static __inline bool _recursive_mutex_try_lock_nodebug(recursive_mutex_t *m) { + assert(m->mutex); + return (WAIT_OBJECT_0 == WaitForSingleObject(m->mutex, 0)); +} +static __inline int _recursive_mutex_unlock_nodebug(recursive_mutex_t *m) { + assert(m->mutex); + return ReleaseMutex(m->mutex) ? 0 : RECURSIVE_MUTEX_NOT_LOCKED; +} + + +/* +typedef HANDLE mutex_t; +static inline void mutex_init(HANDLE *m) { *m = CreateMutex(NULL, FALSE, NULL); } +static inline void _mutex_lock(mutex_t *m) { WaitForSingleObject(*m, INFINITE); } +static inline bool mutex_try_lock(mutex_t *m) { return WaitForSingleObject(*m, 0) == WAIT_OBJECT_0; } +static inline void _mutex_unlock(mutex_t *m) { ReleaseMutex(*m); } +*/ + +// based on http://www.cs.wustl.edu/~schmidt/win32-cv-1.html +// Vista-only CONDITION_VARIABLE would be better +typedef struct { + HANDLE mutex; + HANDLE waiters; // semaphore for those in cond_wait() + HANDLE waitersDone; // auto-reset event after everyone gets a broadcast + CRITICAL_SECTION waitCountLock; // guards waitCount and didBroadcast + unsigned int waitCount; + int didBroadcast; +} monitor_t; +#define MONITOR_INITIALIZER { 0 } +#define MONITOR_NOT_ENTERED 1 +extern int monitor_init(monitor_t *c); + +static inline int _monitor_enter_nodebug(monitor_t *c) { + if (!c->mutex) { + int err = monitor_init(c); + if (err) return err; + } + return WaitForSingleObject(c->mutex, INFINITE); +} +static inline int _monitor_leave_nodebug(monitor_t *c) { + if (!ReleaseMutex(c->mutex)) return MONITOR_NOT_ENTERED; + else return 0; +} +static inline int _monitor_wait_nodebug(monitor_t *c) { + int last; + EnterCriticalSection(&c->waitCountLock); + c->waitCount++; + LeaveCriticalSection(&c->waitCountLock); + + SignalObjectAndWait(c->mutex, c->waiters, INFINITE, FALSE); + + EnterCriticalSection(&c->waitCountLock); + c->waitCount--; + last = c->didBroadcast && c->waitCount == 0; + LeaveCriticalSection(&c->waitCountLock); + + if (last) { + // tell broadcaster that all waiters have awoken + SignalObjectAndWait(c->waitersDone, c->mutex, INFINITE, FALSE); + } else { + WaitForSingleObject(c->mutex, INFINITE); + } + + // fixme error checking + return 0; +} +static inline int monitor_notify(monitor_t *c) { + int haveWaiters; + + EnterCriticalSection(&c->waitCountLock); + haveWaiters = c->waitCount > 0; + LeaveCriticalSection(&c->waitCountLock); + + if (haveWaiters) { + ReleaseSemaphore(c->waiters, 1, 0); + } + + // fixme error checking + return 0; +} +static inline int monitor_notifyAll(monitor_t *c) { + EnterCriticalSection(&c->waitCountLock); + if (c->waitCount == 0) { + LeaveCriticalSection(&c->waitCountLock); + return 0; + } + c->didBroadcast = 1; + ReleaseSemaphore(c->waiters, c->waitCount, 0); + LeaveCriticalSection(&c->waitCountLock); + + // fairness: wait for everyone to move from waiters to mutex + WaitForSingleObject(c->waitersDone, INFINITE); + // not under waitCountLock, but still under mutex + c->didBroadcast = 0; + + // fixme error checking + return 0; +} + + +// fixme no rwlock yet + + +typedef IMAGE_DOS_HEADER headerType; +// fixme YES bundle? NO bundle? sometimes? +#define headerIsBundle(hi) YES +OBJC_EXTERN IMAGE_DOS_HEADER __ImageBase; +#define libobjc_header ((headerType *)&__ImageBase) + +// Prototypes + + +#elif TARGET_OS_MAC + + +// OS headers +#include +#ifndef __LP64__ +# define SEGMENT_CMD LC_SEGMENT +#else +# define SEGMENT_CMD LC_SEGMENT_64 +#endif + +#ifndef VM_MEMORY_OBJC_DISPATCHERS +# define VM_MEMORY_OBJC_DISPATCHERS 0 +#endif + + +// Compiler compatibility + +// OS compatibility + +static inline uint64_t nanoseconds() { + return mach_absolute_time(); +} + +// Internal data types + +typedef pthread_t objc_thread_t; + +static __inline int thread_equal(objc_thread_t t1, objc_thread_t t2) { + return pthread_equal(t1, t2); +} +static __inline objc_thread_t thread_self(void) { + return pthread_self(); +} + + +typedef pthread_key_t tls_key_t; + +static inline tls_key_t tls_create(void (*dtor)(void*)) { + tls_key_t k; + pthread_key_create(&k, dtor); + return k; +} +static inline void *tls_get(tls_key_t k) { + return pthread_getspecific(k); +} +static inline void tls_set(tls_key_t k, void *value) { + pthread_setspecific(k, value); +} + +#if SUPPORT_DIRECT_THREAD_KEYS + +#if DEBUG +static bool is_valid_direct_key(tls_key_t k) { + return ( k == SYNC_DATA_DIRECT_KEY + || k == SYNC_COUNT_DIRECT_KEY + || k == AUTORELEASE_POOL_KEY +# if SUPPORT_RETURN_AUTORELEASE + || k == RETURN_DISPOSITION_KEY +# endif +# if SUPPORT_QOS_HACK + || k == QOS_KEY +# endif + ); +} +#endif + +#if __arm__ + +// rdar://9162780 _pthread_get/setspecific_direct are inefficient +// copied from libdispatch + +__attribute__((const)) +static ALWAYS_INLINE void** +tls_base(void) +{ + uintptr_t p; +#if defined(__arm__) && defined(_ARM_ARCH_6) + __asm__("mrc p15, 0, %[p], c13, c0, 3" : [p] "=&r" (p)); + return (void**)(p & ~0x3ul); +#else +#error tls_base not implemented +#endif +} + + +static ALWAYS_INLINE void +tls_set_direct(void **tsdb, tls_key_t k, void *v) +{ + assert(is_valid_direct_key(k)); + + tsdb[k] = v; +} +#define tls_set_direct(k, v) \ + tls_set_direct(tls_base(), (k), (v)) + + +static ALWAYS_INLINE void * +tls_get_direct(void **tsdb, tls_key_t k) +{ + assert(is_valid_direct_key(k)); + + return tsdb[k]; +} +#define tls_get_direct(k) \ + tls_get_direct(tls_base(), (k)) + +// arm +#else +// not arm + +static inline void *tls_get_direct(tls_key_t k) +{ + assert(is_valid_direct_key(k)); + + if (_pthread_has_direct_tsd()) { + return _pthread_getspecific_direct(k); + } else { + return pthread_getspecific(k); + } +} +static inline void tls_set_direct(tls_key_t k, void *value) +{ + assert(is_valid_direct_key(k)); + + if (_pthread_has_direct_tsd()) { + _pthread_setspecific_direct(k, value); + } else { + pthread_setspecific(k, value); + } +} + +// not arm +#endif + +// SUPPORT_DIRECT_THREAD_KEYS +#endif + + +static inline pthread_t pthread_self_direct() +{ + return (pthread_t) + _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_SELF); +} + +static inline mach_port_t mach_thread_self_direct() +{ + return (mach_port_t)(uintptr_t) + _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_MACH_THREAD_SELF); +} + +#if SUPPORT_QOS_HACK +static inline pthread_priority_t pthread_self_priority_direct() +{ + pthread_priority_t pri = (pthread_priority_t) + _pthread_getspecific_direct(_PTHREAD_TSD_SLOT_PTHREAD_QOS_CLASS); + return pri & ~_PTHREAD_PRIORITY_FLAGS_MASK; +} +#endif + + +template class mutex_tt; +template class monitor_tt; +template class rwlock_tt; +template class recursive_mutex_tt; + +using spinlock_t = mutex_tt; +using mutex_t = mutex_tt; +using monitor_t = monitor_tt; +using rwlock_t = rwlock_tt; +using recursive_mutex_t = recursive_mutex_tt; + +#include "objc-lockdebug.h" + +template +class mutex_tt : nocopy_t { + os_unfair_lock mLock; + public: + mutex_tt() : mLock(OS_UNFAIR_LOCK_INIT) { } + + void lock() { + lockdebug_mutex_lock(this); + + os_unfair_lock_lock_with_options_inline + (&mLock, OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION); + } + + void unlock() { + lockdebug_mutex_unlock(this); + + os_unfair_lock_unlock_inline(&mLock); + } + + void assertLocked() { + lockdebug_mutex_assert_locked(this); + } + + void assertUnlocked() { + lockdebug_mutex_assert_unlocked(this); + } + + + // Address-ordered lock discipline for a pair of locks. + + static void lockTwo(mutex_tt *lock1, mutex_tt *lock2) { + if (lock1 > lock2) { + lock1->lock(); + lock2->lock(); + } else { + lock2->lock(); + if (lock2 != lock1) lock1->lock(); + } + } + + static void unlockTwo(mutex_tt *lock1, mutex_tt *lock2) { + lock1->unlock(); + if (lock2 != lock1) lock2->unlock(); + } +}; + + +template +class recursive_mutex_tt : nocopy_t { + pthread_mutex_t mLock; + + public: + recursive_mutex_tt() : mLock(PTHREAD_RECURSIVE_MUTEX_INITIALIZER) { } + + void lock() + { + lockdebug_recursive_mutex_lock(this); + + int err = pthread_mutex_lock(&mLock); + if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err); + } + + void unlock() + { + lockdebug_recursive_mutex_unlock(this); + + int err = pthread_mutex_unlock(&mLock); + if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err); + } + + bool tryUnlock() + { + int err = pthread_mutex_unlock(&mLock); + if (err == 0) { + lockdebug_recursive_mutex_unlock(this); + return true; + } else if (err == EPERM) { + return false; + } else { + _objc_fatal("pthread_mutex_unlock failed (%d)", err); + } + } + + + void assertLocked() { + lockdebug_recursive_mutex_assert_locked(this); + } + + void assertUnlocked() { + lockdebug_recursive_mutex_assert_unlocked(this); + } +}; + + +template +class monitor_tt { + pthread_mutex_t mutex; + pthread_cond_t cond; + + public: + monitor_tt() + : mutex(PTHREAD_MUTEX_INITIALIZER), cond(PTHREAD_COND_INITIALIZER) { } + + void enter() + { + lockdebug_monitor_enter(this); + + int err = pthread_mutex_lock(&mutex); + if (err) _objc_fatal("pthread_mutex_lock failed (%d)", err); + } + + void leave() + { + lockdebug_monitor_leave(this); + + int err = pthread_mutex_unlock(&mutex); + if (err) _objc_fatal("pthread_mutex_unlock failed (%d)", err); + } + + void wait() + { + lockdebug_monitor_wait(this); + + int err = pthread_cond_wait(&cond, &mutex); + if (err) _objc_fatal("pthread_cond_wait failed (%d)", err); + } + + void notify() + { + int err = pthread_cond_signal(&cond); + if (err) _objc_fatal("pthread_cond_signal failed (%d)", err); + } + + void notifyAll() + { + int err = pthread_cond_broadcast(&cond); + if (err) _objc_fatal("pthread_cond_broadcast failed (%d)", err); + } + + void assertLocked() + { + lockdebug_monitor_assert_locked(this); + } + + void assertUnlocked() + { + lockdebug_monitor_assert_unlocked(this); + } +}; + + +// semaphore_create formatted for INIT_ONCE use +static inline semaphore_t create_semaphore(void) +{ + semaphore_t sem; + kern_return_t k; + k = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0); + if (k) _objc_fatal("semaphore_create failed (0x%x)", k); + return sem; +} + + +#if SUPPORT_QOS_HACK +// Override QOS class to avoid priority inversion in rwlocks +// do a qos override before taking rw lock in objc + +#include +extern pthread_priority_t BackgroundPriority; +extern pthread_priority_t MainPriority; + +static inline void qosStartOverride() +{ + uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY); + if (overrideRefCount > 0) { + // If there is a qos override, increment the refcount and continue + tls_set_direct(QOS_KEY, (void *)(overrideRefCount + 1)); + } + else { + pthread_priority_t currentPriority = pthread_self_priority_direct(); + // Check if override is needed. Only override if we are background qos + if (currentPriority != 0 && currentPriority <= BackgroundPriority) { + int res __unused = _pthread_override_qos_class_start_direct(mach_thread_self_direct(), MainPriority); + assert(res == 0); + // Once we override, we set the reference count in the tsd + // to know when to end the override + tls_set_direct(QOS_KEY, (void *)1); + } + } +} + +static inline void qosEndOverride() +{ + uintptr_t overrideRefCount = (uintptr_t)tls_get_direct(QOS_KEY); + if (overrideRefCount == 0) return; + + if (overrideRefCount == 1) { + // end the override + int res __unused = _pthread_override_qos_class_end_direct(mach_thread_self_direct()); + assert(res == 0); + } + + // decrement refcount + tls_set_direct(QOS_KEY, (void *)(overrideRefCount - 1)); +} + +// SUPPORT_QOS_HACK +#else +// not SUPPORT_QOS_HACK + +static inline void qosStartOverride() { } +static inline void qosEndOverride() { } + +// not SUPPORT_QOS_HACK +#endif + + +template +class rwlock_tt : nocopy_t { + pthread_rwlock_t mLock; + + public: + rwlock_tt() : mLock(PTHREAD_RWLOCK_INITIALIZER) { } + + void read() + { + lockdebug_rwlock_read(this); + + qosStartOverride(); + int err = pthread_rwlock_rdlock(&mLock); + if (err) _objc_fatal("pthread_rwlock_rdlock failed (%d)", err); + } + + void unlockRead() + { + lockdebug_rwlock_unlock_read(this); + + int err = pthread_rwlock_unlock(&mLock); + if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err); + qosEndOverride(); + } + + bool tryRead() + { + qosStartOverride(); + int err = pthread_rwlock_tryrdlock(&mLock); + if (err == 0) { + lockdebug_rwlock_try_read_success(this); + return true; + } else if (err == EBUSY) { + qosEndOverride(); + return false; + } else { + _objc_fatal("pthread_rwlock_tryrdlock failed (%d)", err); + } + } + + void write() + { + lockdebug_rwlock_write(this); + + qosStartOverride(); + int err = pthread_rwlock_wrlock(&mLock); + if (err) _objc_fatal("pthread_rwlock_wrlock failed (%d)", err); + } + + void unlockWrite() + { + lockdebug_rwlock_unlock_write(this); + + int err = pthread_rwlock_unlock(&mLock); + if (err) _objc_fatal("pthread_rwlock_unlock failed (%d)", err); + qosEndOverride(); + } + + bool tryWrite() + { + qosStartOverride(); + int err = pthread_rwlock_trywrlock(&mLock); + if (err == 0) { + lockdebug_rwlock_try_write_success(this); + return true; + } else if (err == EBUSY) { + qosEndOverride(); + return false; + } else { + _objc_fatal("pthread_rwlock_trywrlock failed (%d)", err); + } + } + + + void assertReading() { + lockdebug_rwlock_assert_reading(this); + } + + void assertWriting() { + lockdebug_rwlock_assert_writing(this); + } + + void assertLocked() { + lockdebug_rwlock_assert_locked(this); + } + + void assertUnlocked() { + lockdebug_rwlock_assert_unlocked(this); + } +}; + + +#ifndef __LP64__ +typedef struct mach_header headerType; +typedef struct segment_command segmentType; +typedef struct section sectionType; +#else +typedef struct mach_header_64 headerType; +typedef struct segment_command_64 segmentType; +typedef struct section_64 sectionType; +#endif +#define headerIsBundle(hi) (hi->mhdr()->filetype == MH_BUNDLE) +#define libobjc_header ((headerType *)&_mh_dylib_header) + +// Prototypes + +/* Secure /tmp usage */ +extern int secure_open(const char *filename, int flags, uid_t euid); + + +#else + + +#error unknown OS + + +#endif + + +static inline void * +memdup(const void *mem, size_t len) +{ + void *dup = malloc(len); + memcpy(dup, mem, len); + return dup; +} + +// strdup that doesn't copy read-only memory +static inline char * +strdupIfMutable(const char *str) +{ + size_t size = strlen(str) + 1; + if (_dyld_is_memory_immutable(str, size)) { + return (char *)str; + } else { + return (char *)memdup(str, size); + } +} + +// free strdupIfMutable() result +static inline void +freeIfMutable(char *str) +{ + size_t size = strlen(str) + 1; + if (_dyld_is_memory_immutable(str, size)) { + // nothing + } else { + free(str); + } +} + +// nil-checking unsigned strdup +static inline uint8_t * +ustrdupMaybeNil(const uint8_t *str) +{ + if (!str) return nil; + return (uint8_t *)strdupIfMutable((char *)str); +} + +// OS version checking: +// +// sdkVersion() +// DYLD_OS_VERSION(mac, ios, tv, watch) +// sdkIsOlderThan(mac, ios, tv, watch) +// sdkIsAtLeast(mac, ios, tv, watch) +// +// This version order matches OBJC_AVAILABLE. + +#if TARGET_OS_OSX +# define DYLD_OS_VERSION(x, i, t, w) DYLD_MACOSX_VERSION_##x +# define sdkVersion() dyld_get_program_sdk_version() + +#elif TARGET_OS_IOS +# define DYLD_OS_VERSION(x, i, t, w) DYLD_IOS_VERSION_##i +# define sdkVersion() dyld_get_program_sdk_version() + +#elif TARGET_OS_TV + // dyld does not currently have distinct constants for tvOS +# define DYLD_OS_VERSION(x, i, t, w) DYLD_IOS_VERSION_##t +# define sdkVersion() dyld_get_program_sdk_version() + +#elif TARGET_OS_WATCH +# define DYLD_OS_VERSION(x, i, t, w) DYLD_WATCHOS_VERSION_##w + // watchOS has its own API for compatibility reasons +# define sdkVersion() dyld_get_program_sdk_watch_os_version() + +#else +# error unknown OS +#endif + + +#define sdkIsOlderThan(x, i, t, w) \ + (sdkVersion() < DYLD_OS_VERSION(x, i, t, w)) +#define sdkIsAtLeast(x, i, t, w) \ + (sdkVersion() >= DYLD_OS_VERSION(x, i, t, w)) + +// Allow bare 0 to be used in DYLD_OS_VERSION() and sdkIsOlderThan() +#define DYLD_MACOSX_VERSION_0 0 +#define DYLD_IOS_VERSION_0 0 +#define DYLD_TVOS_VERSION_0 0 +#define DYLD_WATCHOS_VERSION_0 0 + +// Pretty-print a DYLD_*_VERSION_* constant. +#define SDK_FORMAT "%hu.%hhu.%hhu" +#define FORMAT_SDK(v) \ + (unsigned short)(((uint32_t)(v))>>16), \ + (unsigned char)(((uint32_t)(v))>>8), \ + (unsigned char)(((uint32_t)(v))>>0) + +#endif diff --git a/runtime/objc-os.mm b/runtime/objc-os.mm new file mode 100644 index 0000000..cb077e5 --- /dev/null +++ b/runtime/objc-os.mm @@ -0,0 +1,778 @@ +/* + * Copyright (c) 2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-os.m +* OS portability layer. +**********************************************************************/ + +#include "objc-private.h" +#include "objc-loadmethod.h" + +#if TARGET_OS_WIN32 + +#include "objc-runtime-old.h" +#include "objcrt.h" + +int monitor_init(monitor_t *c) +{ + // fixme error checking + HANDLE mutex = CreateMutex(NULL, TRUE, NULL); + while (!c->mutex) { + // fixme memory barrier here? + if (0 == InterlockedCompareExchangePointer(&c->mutex, mutex, 0)) { + // we win - finish construction + c->waiters = CreateSemaphore(NULL, 0, 0x7fffffff, NULL); + c->waitersDone = CreateEvent(NULL, FALSE, FALSE, NULL); + InitializeCriticalSection(&c->waitCountLock); + c->waitCount = 0; + c->didBroadcast = 0; + ReleaseMutex(c->mutex); + return 0; + } + } + + // someone else allocated the mutex and constructed the monitor + ReleaseMutex(mutex); + CloseHandle(mutex); + return 0; +} + +void mutex_init(mutex_t *m) +{ + while (!m->lock) { + CRITICAL_SECTION *newlock = malloc(sizeof(CRITICAL_SECTION)); + InitializeCriticalSection(newlock); + // fixme memory barrier here? + if (0 == InterlockedCompareExchangePointer(&m->lock, newlock, 0)) { + return; + } + // someone else installed their lock first + DeleteCriticalSection(newlock); + free(newlock); + } +} + + +void recursive_mutex_init(recursive_mutex_t *m) +{ + // fixme error checking + HANDLE newmutex = CreateMutex(NULL, FALSE, NULL); + while (!m->mutex) { + // fixme memory barrier here? + if (0 == InterlockedCompareExchangePointer(&m->mutex, newmutex, 0)) { + // we win + return; + } + } + + // someone else installed their lock first + CloseHandle(newmutex); +} + + +WINBOOL APIENTRY DllMain( HMODULE hModule, + DWORD ul_reason_for_call, + LPVOID lpReserved + ) +{ + switch (ul_reason_for_call) { + case DLL_PROCESS_ATTACH: + environ_init(); + tls_init(); + lock_init(); + sel_init(3500); // old selector heuristic + exception_init(); + break; + + case DLL_THREAD_ATTACH: + break; + + case DLL_THREAD_DETACH: + case DLL_PROCESS_DETACH: + break; + } + return TRUE; +} + +OBJC_EXPORT void *_objc_init_image(HMODULE image, const objc_sections *sects) +{ + header_info *hi = malloc(sizeof(header_info)); + size_t count, i; + + hi->mhdr = (const headerType *)image; + hi->info = sects->iiStart; + hi->allClassesRealized = NO; + hi->modules = sects->modStart ? (Module *)((void **)sects->modStart+1) : 0; + hi->moduleCount = (Module *)sects->modEnd - hi->modules; + hi->protocols = sects->protoStart ? (struct old_protocol **)((void **)sects->protoStart+1) : 0; + hi->protocolCount = (struct old_protocol **)sects->protoEnd - hi->protocols; + hi->imageinfo = NULL; + hi->imageinfoBytes = 0; + // hi->imageinfo = sects->iiStart ? (uint8_t *)((void **)sects->iiStart+1) : 0;; +// hi->imageinfoBytes = (uint8_t *)sects->iiEnd - hi->imageinfo; + hi->selrefs = sects->selrefsStart ? (SEL *)((void **)sects->selrefsStart+1) : 0; + hi->selrefCount = (SEL *)sects->selrefsEnd - hi->selrefs; + hi->clsrefs = sects->clsrefsStart ? (Class *)((void **)sects->clsrefsStart+1) : 0; + hi->clsrefCount = (Class *)sects->clsrefsEnd - hi->clsrefs; + + count = 0; + for (i = 0; i < hi->moduleCount; i++) { + if (hi->modules[i]) count++; + } + hi->mod_count = 0; + hi->mod_ptr = 0; + if (count > 0) { + hi->mod_ptr = malloc(count * sizeof(struct objc_module)); + for (i = 0; i < hi->moduleCount; i++) { + if (hi->modules[i]) memcpy(&hi->mod_ptr[hi->mod_count++], hi->modules[i], sizeof(struct objc_module)); + } + } + + hi->moduleName = malloc(MAX_PATH * sizeof(TCHAR)); + GetModuleFileName((HMODULE)(hi->mhdr), hi->moduleName, MAX_PATH * sizeof(TCHAR)); + + appendHeader(hi); + + if (PrintImages) { + _objc_inform("IMAGES: loading image for %s%s%s%s\n", + hi->fname, + headerIsBundle(hi) ? " (bundle)" : "", + hi->info->isReplacement() ? " (replacement)":"", + hi->info->hasCategoryClassProperties() ? " (has class properties)":""); + } + + // Count classes. Size various table based on the total. + int total = 0; + int unoptimizedTotal = 0; + { + if (_getObjc2ClassList(hi, &count)) { + total += (int)count; + if (!hi->getInSharedCache()) unoptimizedTotal += count; + } + } + + _read_images(&hi, 1, total, unoptimizedTotal); + + return hi; +} + +OBJC_EXPORT void _objc_load_image(HMODULE image, header_info *hinfo) +{ + prepare_load_methods(hinfo); + call_load_methods(); +} + +OBJC_EXPORT void _objc_unload_image(HMODULE image, header_info *hinfo) +{ + _objc_fatal("image unload not supported"); +} + + +// TARGET_OS_WIN32 +#elif TARGET_OS_MAC + +#include "objc-file-old.h" +#include "objc-file.h" + + +/*********************************************************************** +* libobjc must never run static destructors. +* Cover libc's __cxa_atexit with our own definition that runs nothing. +* rdar://21734598 ER: Compiler option to suppress C++ static destructors +**********************************************************************/ +extern "C" int __cxa_atexit(); +extern "C" int __cxa_atexit() { return 0; } + + +/*********************************************************************** +* bad_magic. +* Return YES if the header has invalid Mach-o magic. +**********************************************************************/ +bool bad_magic(const headerType *mhdr) +{ + return (mhdr->magic != MH_MAGIC && mhdr->magic != MH_MAGIC_64 && + mhdr->magic != MH_CIGAM && mhdr->magic != MH_CIGAM_64); +} + + +static header_info * addHeader(const headerType *mhdr, const char *path, int &totalClasses, int &unoptimizedTotalClasses) +{ + header_info *hi; + + if (bad_magic(mhdr)) return NULL; + + bool inSharedCache = false; + + // Look for hinfo from the dyld shared cache. + hi = preoptimizedHinfoForHeader(mhdr); + if (hi) { + // Found an hinfo in the dyld shared cache. + + // Weed out duplicates. + if (hi->isLoaded()) { + return NULL; + } + + inSharedCache = true; + + // Initialize fields not set by the shared cache + // hi->next is set by appendHeader + hi->setLoaded(true); + + if (PrintPreopt) { + _objc_inform("PREOPTIMIZATION: honoring preoptimized header info at %p for %s", hi, hi->fname()); + } + +#if !__OBJC2__ + _objc_fatal("shouldn't be here"); +#endif +#if DEBUG + // Verify image_info + size_t info_size = 0; + const objc_image_info *image_info = _getObjcImageInfo(mhdr,&info_size); + assert(image_info == hi->info()); +#endif + } + else + { + // Didn't find an hinfo in the dyld shared cache. + + // Weed out duplicates + for (hi = FirstHeader; hi; hi = hi->getNext()) { + if (mhdr == hi->mhdr()) return NULL; + } + + // Locate the __OBJC segment + size_t info_size = 0; + unsigned long seg_size; + const objc_image_info *image_info = _getObjcImageInfo(mhdr,&info_size); + const uint8_t *objc_segment = getsegmentdata(mhdr,SEG_OBJC,&seg_size); + if (!objc_segment && !image_info) return NULL; + + // Allocate a header_info entry. + // Note we also allocate space for a single header_info_rw in the + // rw_data[] inside header_info. + hi = (header_info *)calloc(sizeof(header_info) + sizeof(header_info_rw), 1); + + // Set up the new header_info entry. + hi->setmhdr(mhdr); +#if !__OBJC2__ + // mhdr must already be set + hi->mod_count = 0; + hi->mod_ptr = _getObjcModules(hi, &hi->mod_count); +#endif + // Install a placeholder image_info if absent to simplify code elsewhere + static const objc_image_info emptyInfo = {0, 0}; + hi->setinfo(image_info ?: &emptyInfo); + + hi->setLoaded(true); + hi->setAllClassesRealized(NO); + } + +#if __OBJC2__ + { + size_t count = 0; + if (_getObjc2ClassList(hi, &count)) { + totalClasses += (int)count; + if (!inSharedCache) unoptimizedTotalClasses += count; + } + } +#endif + + appendHeader(hi); + + return hi; +} + + +/*********************************************************************** +* linksToLibrary +* Returns true if the image links directly to a dylib whose install name +* is exactly the given name. +**********************************************************************/ +bool +linksToLibrary(const header_info *hi, const char *name) +{ + const struct dylib_command *cmd; + unsigned long i; + + cmd = (const struct dylib_command *) (hi->mhdr() + 1); + for (i = 0; i < hi->mhdr()->ncmds; i++) { + if (cmd->cmd == LC_LOAD_DYLIB || cmd->cmd == LC_LOAD_UPWARD_DYLIB || + cmd->cmd == LC_LOAD_WEAK_DYLIB || cmd->cmd == LC_REEXPORT_DYLIB) + { + const char *dylib = cmd->dylib.name.offset + (const char *)cmd; + if (0 == strcmp(dylib, name)) return true; + } + cmd = (const struct dylib_command *)((char *)cmd + cmd->cmdsize); + } + + return false; +} + + +#if SUPPORT_GC_COMPAT + +/*********************************************************************** +* shouldRejectGCApp +* Return YES if the executable requires GC. +**********************************************************************/ +static bool shouldRejectGCApp(const header_info *hi) +{ + assert(hi->mhdr()->filetype == MH_EXECUTE); + + if (!hi->info()->supportsGC()) { + // App does not use GC. Don't reject it. + return NO; + } + + // Exception: Trivial AppleScriptObjC apps can run without GC. + // 1. executable defines no classes + // 2. executable references NSBundle only + // 3. executable links to AppleScriptObjC.framework + // Note that objc_appRequiresGC() also knows about this. + size_t classcount = 0; + size_t refcount = 0; +#if __OBJC2__ + _getObjc2ClassList(hi, &classcount); + _getObjc2ClassRefs(hi, &refcount); +#else + if (hi->mod_count == 0 || (hi->mod_count == 1 && !hi->mod_ptr[0].symtab)) classcount = 0; + else classcount = 1; + _getObjcClassRefs(hi, &refcount); +#endif + if (classcount == 0 && refcount == 1 && + linksToLibrary(hi, "/System/Library/Frameworks" + "/AppleScriptObjC.framework/Versions/A" + "/AppleScriptObjC")) + { + // It's AppleScriptObjC. Don't reject it. + return NO; + } + else { + // GC and not trivial AppleScriptObjC. Reject it. + return YES; + } +} + + +/*********************************************************************** +* rejectGCImage +* Halt if an image requires GC. +* Testing of the main executable should use rejectGCApp() instead. +**********************************************************************/ +static bool shouldRejectGCImage(const headerType *mhdr) +{ + assert(mhdr->filetype != MH_EXECUTE); + + objc_image_info *image_info; + size_t size; + +#if !__OBJC2__ + unsigned long seg_size; + // 32-bit: __OBJC seg but no image_info means no GC support + if (!getsegmentdata(mhdr, "__OBJC", &seg_size)) { + // Not objc, therefore not GC. Don't reject it. + return NO; + } + image_info = _getObjcImageInfo(mhdr, &size); + if (!image_info) { + // No image_info, therefore not GC. Don't reject it. + return NO; + } +#else + // 64-bit: no image_info means no objc at all + image_info = _getObjcImageInfo(mhdr, &size); + if (!image_info) { + // Not objc, therefore not GC. Don't reject it. + return NO; + } +#endif + + return image_info->requiresGC(); +} + +// SUPPORT_GC_COMPAT +#endif + + +/*********************************************************************** +* map_images_nolock +* Process the given images which are being mapped in by dyld. +* All class registration and fixups are performed (or deferred pending +* discovery of missing superclasses etc), and +load methods are called. +* +* info[] is in bottom-up order i.e. libobjc will be earlier in the +* array than any library that links to libobjc. +* +* Locking: loadMethodLock(old) or runtimeLock(new) acquired by map_images. +**********************************************************************/ +#if __OBJC2__ +#include "objc-file.h" +#else +#include "objc-file-old.h" +#endif + +void +map_images_nolock(unsigned mhCount, const char * const mhPaths[], + const struct mach_header * const mhdrs[]) +{ + static bool firstTime = YES; + header_info *hList[mhCount]; + uint32_t hCount; + size_t selrefCount = 0; + + // Perform first-time initialization if necessary. + // This function is called before ordinary library initializers. + // fixme defer initialization until an objc-using image is found? + if (firstTime) { + preopt_init(); + } + + if (PrintImages) { + _objc_inform("IMAGES: processing %u newly-mapped images...\n", mhCount); + } + + + // Find all images with Objective-C metadata. + hCount = 0; + + // Count classes. Size various table based on the total. + int totalClasses = 0; + int unoptimizedTotalClasses = 0; + { + uint32_t i = mhCount; + while (i--) { + const headerType *mhdr = (const headerType *)mhdrs[i]; + + auto hi = addHeader(mhdr, mhPaths[i], totalClasses, unoptimizedTotalClasses); + if (!hi) { + // no objc data in this entry + continue; + } + + if (mhdr->filetype == MH_EXECUTE) { + // Size some data structures based on main executable's size +#if __OBJC2__ + size_t count; + _getObjc2SelectorRefs(hi, &count); + selrefCount += count; + _getObjc2MessageRefs(hi, &count); + selrefCount += count; +#else + _getObjcSelectorRefs(hi, &selrefCount); +#endif + +#if SUPPORT_GC_COMPAT + // Halt if this is a GC app. + if (shouldRejectGCApp(hi)) { + _objc_fatal_with_reason + (OBJC_EXIT_REASON_GC_NOT_SUPPORTED, + OS_REASON_FLAG_CONSISTENT_FAILURE, + "Objective-C garbage collection " + "is no longer supported."); + } +#endif + } + + hList[hCount++] = hi; + + if (PrintImages) { + _objc_inform("IMAGES: loading image for %s%s%s%s%s\n", + hi->fname(), + mhdr->filetype == MH_BUNDLE ? " (bundle)" : "", + hi->info()->isReplacement() ? " (replacement)" : "", + hi->info()->hasCategoryClassProperties() ? " (has class properties)" : "", + hi->info()->optimizedByDyld()?" (preoptimized)":""); + } + } + } + + // Perform one-time runtime initialization that must be deferred until + // the executable itself is found. This needs to be done before + // further initialization. + // (The executable may not be present in this infoList if the + // executable does not contain Objective-C code but Objective-C + // is dynamically loaded later. + if (firstTime) { + sel_init(selrefCount); + arr_init(); + +#if SUPPORT_GC_COMPAT + // Reject any GC images linked to the main executable. + // We already rejected the app itself above. + // Images loaded after launch will be rejected by dyld. + + for (uint32_t i = 0; i < hCount; i++) { + auto hi = hList[i]; + auto mh = hi->mhdr(); + if (mh->filetype != MH_EXECUTE && shouldRejectGCImage(mh)) { + _objc_fatal_with_reason + (OBJC_EXIT_REASON_GC_NOT_SUPPORTED, + OS_REASON_FLAG_CONSISTENT_FAILURE, + "%s requires Objective-C garbage collection " + "which is no longer supported.", hi->fname()); + } + } +#endif + } + + if (hCount > 0) { + _read_images(hList, hCount, totalClasses, unoptimizedTotalClasses); + } + + firstTime = NO; +} + + +/*********************************************************************** +* unmap_image_nolock +* Process the given image which is about to be unmapped by dyld. +* mh is mach_header instead of headerType because that's what +* dyld_priv.h says even for 64-bit. +* +* Locking: loadMethodLock(both) and runtimeLock(new) acquired by unmap_image. +**********************************************************************/ +void +unmap_image_nolock(const struct mach_header *mh) +{ + if (PrintImages) { + _objc_inform("IMAGES: processing 1 newly-unmapped image...\n"); + } + + header_info *hi; + + // Find the runtime's header_info struct for the image + for (hi = FirstHeader; hi != NULL; hi = hi->getNext()) { + if (hi->mhdr() == (const headerType *)mh) { + break; + } + } + + if (!hi) return; + + if (PrintImages) { + _objc_inform("IMAGES: unloading image for %s%s%s\n", + hi->fname(), + hi->mhdr()->filetype == MH_BUNDLE ? " (bundle)" : "", + hi->info()->isReplacement() ? " (replacement)" : ""); + } + + _unload_image(hi); + + // Remove header_info from header list + removeHeader(hi); + free(hi); +} + + +/*********************************************************************** +* static_init +* Run C++ static constructor functions. +* libc calls _objc_init() before dyld would call our static constructors, +* so we have to do it ourselves. +**********************************************************************/ +static void static_init() +{ + size_t count; + Initializer *inits = getLibobjcInitializers(&_mh_dylib_header, &count); + for (size_t i = 0; i < count; i++) { + inits[i](); + } +} + + +/*********************************************************************** +* _objc_init +* Bootstrap initialization. Registers our image notifier with dyld. +* Called by libSystem BEFORE library initialization time +**********************************************************************/ + +void _objc_init(void) +{ + static bool initialized = false; + if (initialized) return; + initialized = true; + + // fixme defer initialization until an objc-using image is found? + environ_init(); + tls_init(); + static_init(); + lock_init(); + exception_init(); + + _dyld_objc_notify_register(&map_2_images, load_images, unmap_image); +} + + +/*********************************************************************** +* _headerForAddress. +* addr can be a class or a category +**********************************************************************/ +static const header_info *_headerForAddress(void *addr) +{ +#if __OBJC2__ + const char *segnames[] = { "__DATA", "__DATA_CONST", "__DATA_DIRTY" }; +#else + const char *segnames[] = { "__OBJC" }; +#endif + header_info *hi; + + for (hi = FirstHeader; hi != NULL; hi = hi->getNext()) { + for (size_t i = 0; i < sizeof(segnames)/sizeof(segnames[0]); i++) { + unsigned long seg_size; + uint8_t *seg = getsegmentdata(hi->mhdr(), segnames[i], &seg_size); + if (!seg) continue; + + // Is the class in this header? + if ((uint8_t *)addr >= seg && (uint8_t *)addr < seg + seg_size) { + return hi; + } + } + } + + // Not found + return 0; +} + + +/*********************************************************************** +* _headerForClass +* Return the image header containing this class, or NULL. +* Returns NULL on runtime-constructed classes, and the NSCF classes. +**********************************************************************/ +const header_info *_headerForClass(Class cls) +{ + return _headerForAddress(cls); +} + + +/********************************************************************** +* secure_open +* Securely open a file from a world-writable directory (like /tmp) +* If the file does not exist, it will be atomically created with mode 0600 +* If the file exists, it must be, and remain after opening: +* 1. a regular file (in particular, not a symlink) +* 2. owned by euid +* 3. permissions 0600 +* 4. link count == 1 +* Returns a file descriptor or -1. Errno may or may not be set on error. +**********************************************************************/ +int secure_open(const char *filename, int flags, uid_t euid) +{ + struct stat fs, ls; + int fd = -1; + bool truncate = NO; + bool create = NO; + + if (flags & O_TRUNC) { + // Don't truncate the file until after it is open and verified. + truncate = YES; + flags &= ~O_TRUNC; + } + if (flags & O_CREAT) { + // Don't create except when we're ready for it + create = YES; + flags &= ~O_CREAT; + flags &= ~O_EXCL; + } + + if (lstat(filename, &ls) < 0) { + if (errno == ENOENT && create) { + // No such file - create it + fd = open(filename, flags | O_CREAT | O_EXCL, 0600); + if (fd >= 0) { + // File was created successfully. + // New file does not need to be truncated. + return fd; + } else { + // File creation failed. + return -1; + } + } else { + // lstat failed, or user doesn't want to create the file + return -1; + } + } else { + // lstat succeeded - verify attributes and open + if (S_ISREG(ls.st_mode) && // regular file? + ls.st_nlink == 1 && // link count == 1? + ls.st_uid == euid && // owned by euid? + (ls.st_mode & ALLPERMS) == (S_IRUSR | S_IWUSR)) // mode 0600? + { + // Attributes look ok - open it and check attributes again + fd = open(filename, flags, 0000); + if (fd >= 0) { + // File is open - double-check attributes + if (0 == fstat(fd, &fs) && + fs.st_nlink == ls.st_nlink && // link count == 1? + fs.st_uid == ls.st_uid && // owned by euid? + fs.st_mode == ls.st_mode && // regular file, 0600? + fs.st_ino == ls.st_ino && // same inode as before? + fs.st_dev == ls.st_dev) // same device as before? + { + // File is open and OK + if (truncate) ftruncate(fd, 0); + return fd; + } else { + // Opened file looks funny - close it + close(fd); + return -1; + } + } else { + // File didn't open + return -1; + } + } else { + // Unopened file looks funny - don't open it + return -1; + } + } +} + + +#if TARGET_OS_IPHONE + +const char *__crashreporter_info__ = NULL; + +const char *CRSetCrashLogMessage(const char *msg) +{ + __crashreporter_info__ = msg; + return msg; +} +const char *CRGetCrashLogMessage(void) +{ + return __crashreporter_info__; +} + +#endif + +// TARGET_OS_MAC +#else + + +#error unknown OS + + +#endif diff --git a/runtime/objc-private.h b/runtime/objc-private.h new file mode 100644 index 0000000..07bf78c --- /dev/null +++ b/runtime/objc-private.h @@ -0,0 +1,1022 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * objc-private.h + * Copyright 1988-1996, NeXT Software, Inc. + */ + +#ifndef _OBJC_PRIVATE_H_ +#define _OBJC_PRIVATE_H_ + +#include "objc-config.h" + +/* Isolate ourselves from the definitions of id and Class in the compiler + * and public headers. + */ + +#ifdef _OBJC_OBJC_H_ +#error include objc-private.h before other headers +#endif + +#define OBJC_TYPES_DEFINED 1 +#define OBJC_OLD_DISPATCH_PROTOTYPES 0 + +#include // for nullptr_t +#include +#include + +struct objc_class; +struct objc_object; + +typedef struct objc_class *Class; +typedef struct objc_object *id; + +namespace { + struct SideTable; +}; + + +#if (!SUPPORT_NONPOINTER_ISA && !SUPPORT_PACKED_ISA && !SUPPORT_INDEXED_ISA) ||\ + ( SUPPORT_NONPOINTER_ISA && SUPPORT_PACKED_ISA && !SUPPORT_INDEXED_ISA) ||\ + ( SUPPORT_NONPOINTER_ISA && !SUPPORT_PACKED_ISA && SUPPORT_INDEXED_ISA) + // good config +#else +# error bad config +#endif + + +union isa_t +{ + isa_t() { } + isa_t(uintptr_t value) : bits(value) { } + + Class cls; + uintptr_t bits; + +#if SUPPORT_PACKED_ISA + + // extra_rc must be the MSB-most field (so it matches carry/overflow flags) + // nonpointer must be the LSB (fixme or get rid of it) + // shiftcls must occupy the same bits that a real class pointer would + // bits + RC_ONE is equivalent to extra_rc + 1 + // RC_HALF is the high bit of extra_rc (i.e. half of its range) + + // future expansion: + // uintptr_t fast_rr : 1; // no r/r overrides + // uintptr_t lock : 2; // lock for atomic property, @synch + // uintptr_t extraBytes : 1; // allocated with extra bytes + +# if __arm64__ +# define ISA_MASK 0x0000000ffffffff8ULL +# define ISA_MAGIC_MASK 0x000003f000000001ULL +# define ISA_MAGIC_VALUE 0x000001a000000001ULL + struct { + uintptr_t nonpointer : 1; + uintptr_t has_assoc : 1; + uintptr_t has_cxx_dtor : 1; + uintptr_t shiftcls : 33; // MACH_VM_MAX_ADDRESS 0x1000000000 + uintptr_t magic : 6; + uintptr_t weakly_referenced : 1; + uintptr_t deallocating : 1; + uintptr_t has_sidetable_rc : 1; + uintptr_t extra_rc : 19; +# define RC_ONE (1ULL<<45) +# define RC_HALF (1ULL<<18) + }; + +# elif __x86_64__ +# define ISA_MASK 0x00007ffffffffff8ULL +# define ISA_MAGIC_MASK 0x001f800000000001ULL +# define ISA_MAGIC_VALUE 0x001d800000000001ULL + struct { + uintptr_t nonpointer : 1; + uintptr_t has_assoc : 1; + uintptr_t has_cxx_dtor : 1; + uintptr_t shiftcls : 44; // MACH_VM_MAX_ADDRESS 0x7fffffe00000 + uintptr_t magic : 6; + uintptr_t weakly_referenced : 1; + uintptr_t deallocating : 1; + uintptr_t has_sidetable_rc : 1; + uintptr_t extra_rc : 8; +# define RC_ONE (1ULL<<56) +# define RC_HALF (1ULL<<7) + }; + +# else +# error unknown architecture for packed isa +# endif + +// SUPPORT_PACKED_ISA +#endif + + +#if SUPPORT_INDEXED_ISA + +# if __ARM_ARCH_7K__ >= 2 + +# define ISA_INDEX_IS_NPI 1 +# define ISA_INDEX_MASK 0x0001FFFC +# define ISA_INDEX_SHIFT 2 +# define ISA_INDEX_BITS 15 +# define ISA_INDEX_COUNT (1 << ISA_INDEX_BITS) +# define ISA_INDEX_MAGIC_MASK 0x001E0001 +# define ISA_INDEX_MAGIC_VALUE 0x001C0001 + struct { + uintptr_t nonpointer : 1; + uintptr_t has_assoc : 1; + uintptr_t indexcls : 15; + uintptr_t magic : 4; + uintptr_t has_cxx_dtor : 1; + uintptr_t weakly_referenced : 1; + uintptr_t deallocating : 1; + uintptr_t has_sidetable_rc : 1; + uintptr_t extra_rc : 7; +# define RC_ONE (1ULL<<25) +# define RC_HALF (1ULL<<6) + }; + +# else +# error unknown architecture for indexed isa +# endif + +// SUPPORT_INDEXED_ISA +#endif + +}; + + +struct objc_object { +private: + isa_t isa; + +public: + + // ISA() assumes this is NOT a tagged pointer object + Class ISA(); + + // getIsa() allows this to be a tagged pointer object + Class getIsa(); + + // initIsa() should be used to init the isa of new objects only. + // If this object already has an isa, use changeIsa() for correctness. + // initInstanceIsa(): objects with no custom RR/AWZ + // initClassIsa(): class objects + // initProtocolIsa(): protocol objects + // initIsa(): other objects + void initIsa(Class cls /*nonpointer=false*/); + void initClassIsa(Class cls /*nonpointer=maybe*/); + void initProtocolIsa(Class cls /*nonpointer=maybe*/); + void initInstanceIsa(Class cls, bool hasCxxDtor); + + // changeIsa() should be used to change the isa of existing objects. + // If this is a new object, use initIsa() for performance. + Class changeIsa(Class newCls); + + bool hasNonpointerIsa(); + bool isTaggedPointer(); + bool isBasicTaggedPointer(); + bool isExtTaggedPointer(); + bool isClass(); + + // object may have associated objects? + bool hasAssociatedObjects(); + void setHasAssociatedObjects(); + + // object may be weakly referenced? + bool isWeaklyReferenced(); + void setWeaklyReferenced_nolock(); + + // object may have -.cxx_destruct implementation? + bool hasCxxDtor(); + + // Optimized calls to retain/release methods + id retain(); + void release(); + id autorelease(); + + // Implementations of retain/release methods + id rootRetain(); + bool rootRelease(); + id rootAutorelease(); + bool rootTryRetain(); + bool rootReleaseShouldDealloc(); + uintptr_t rootRetainCount(); + + // Implementation of dealloc methods + bool rootIsDeallocating(); + void clearDeallocating(); + void rootDealloc(); + +private: + void initIsa(Class newCls, bool nonpointer, bool hasCxxDtor); + + // Slow paths for inline control + id rootAutorelease2(); + bool overrelease_error(); + +#if SUPPORT_NONPOINTER_ISA + // Unified retain count manipulation for nonpointer isa + id rootRetain(bool tryRetain, bool handleOverflow); + bool rootRelease(bool performDealloc, bool handleUnderflow); + id rootRetain_overflow(bool tryRetain); + bool rootRelease_underflow(bool performDealloc); + + void clearDeallocating_slow(); + + // Side table retain count overflow for nonpointer isa + void sidetable_lock(); + void sidetable_unlock(); + + void sidetable_moveExtraRC_nolock(size_t extra_rc, bool isDeallocating, bool weaklyReferenced); + bool sidetable_addExtraRC_nolock(size_t delta_rc); + size_t sidetable_subExtraRC_nolock(size_t delta_rc); + size_t sidetable_getExtraRC_nolock(); +#endif + + // Side-table-only retain count + bool sidetable_isDeallocating(); + void sidetable_clearDeallocating(); + + bool sidetable_isWeaklyReferenced(); + void sidetable_setWeaklyReferenced_nolock(); + + id sidetable_retain(); + id sidetable_retain_slow(SideTable& table); + + uintptr_t sidetable_release(bool performDealloc = true); + uintptr_t sidetable_release_slow(SideTable& table, bool performDealloc = true); + + bool sidetable_tryRetain(); + + uintptr_t sidetable_retainCount(); +#if DEBUG + bool sidetable_present(); +#endif +}; + + +#if __OBJC2__ +typedef struct method_t *Method; +typedef struct ivar_t *Ivar; +typedef struct category_t *Category; +typedef struct property_t *objc_property_t; +#else +typedef struct old_method *Method; +typedef struct old_ivar *Ivar; +typedef struct old_category *Category; +typedef struct old_property *objc_property_t; +#endif + +// Public headers + +#include "objc.h" +#include "runtime.h" +#include "objc-os.h" +#include "objc-abi.h" +#include "objc-api.h" +#include "objc-config.h" +#include "objc-internal.h" +#include "maptable.h" +#include "hashtable2.h" + +/* Do not include message.h here. */ +/* #include "message.h" */ + +#define __APPLE_API_PRIVATE +#include "objc-gdb.h" +#undef __APPLE_API_PRIVATE + + +// Private headers + +#if __OBJC2__ +#include "objc-runtime-new.h" +#else +#include "objc-runtime-old.h" +#endif + +#include "objc-references.h" +#include "objc-initialize.h" +#include "objc-loadmethod.h" + + +#if SUPPORT_PREOPT && __cplusplus +#include +using objc_selopt_t = const objc_opt::objc_selopt_t; +#else +struct objc_selopt_t; +#endif + + +#define STRINGIFY(x) #x +#define STRINGIFY2(x) STRINGIFY(x) + +__BEGIN_DECLS + +struct header_info; + +// Split out the rw data from header info. For now put it in a huge array +// that more than exceeds the space needed. In future we'll just allocate +// this in the shared cache builder. +typedef struct header_info_rw { + + bool getLoaded() const { + return isLoaded; + } + + void setLoaded(bool v) { + isLoaded = v ? 1: 0; + } + + bool getAllClassesRealized() const { + return allClassesRealized; + } + + void setAllClassesRealized(bool v) { + allClassesRealized = v ? 1: 0; + } + + header_info *getNext() const { + return (header_info *)(next << 2); + } + + void setNext(header_info *v) { + next = ((uintptr_t)v) >> 2; + } + +private: +#ifdef __LP64__ + uintptr_t isLoaded : 1; + uintptr_t allClassesRealized : 1; + uintptr_t next : 62; +#else + uintptr_t isLoaded : 1; + uintptr_t allClassesRealized : 1; + uintptr_t next : 30; +#endif +} header_info_rw; + +struct header_info_rw* getPreoptimizedHeaderRW(const struct header_info *const hdr); + +typedef struct header_info { +private: + // Note, this is no longer a pointer, but instead an offset to a pointer + // from this location. + intptr_t mhdr_offset; + + // Note, this is no longer a pointer, but instead an offset to a pointer + // from this location. + intptr_t info_offset; + + // Do not add fields without editing ObjCModernAbstraction.hpp +public: + + header_info_rw *getHeaderInfoRW() { + header_info_rw *preopt = + isPreoptimized() ? getPreoptimizedHeaderRW(this) : nil; + if (preopt) return preopt; + else return &rw_data[0]; + } + + const headerType *mhdr() const { + return (const headerType *)(((intptr_t)&mhdr_offset) + mhdr_offset); + } + + void setmhdr(const headerType *mhdr) { + mhdr_offset = (intptr_t)mhdr - (intptr_t)&mhdr_offset; + } + + const objc_image_info *info() const { + return (const objc_image_info *)(((intptr_t)&info_offset) + info_offset); + } + + void setinfo(const objc_image_info *info) { + info_offset = (intptr_t)info - (intptr_t)&info_offset; + } + + bool isLoaded() { + return getHeaderInfoRW()->getLoaded(); + } + + void setLoaded(bool v) { + getHeaderInfoRW()->setLoaded(v); + } + + bool areAllClassesRealized() { + return getHeaderInfoRW()->getAllClassesRealized(); + } + + void setAllClassesRealized(bool v) { + getHeaderInfoRW()->setAllClassesRealized(v); + } + + header_info *getNext() { + return getHeaderInfoRW()->getNext(); + } + + void setNext(header_info *v) { + getHeaderInfoRW()->setNext(v); + } + + bool isBundle() { + return mhdr()->filetype == MH_BUNDLE; + } + + const char *fname() const { + return dyld_image_path_containing_address(mhdr()); + } + + bool isPreoptimized() const; + +#if !__OBJC2__ + struct old_protocol **proto_refs; + struct objc_module *mod_ptr; + size_t mod_count; +# if TARGET_OS_WIN32 + struct objc_module **modules; + size_t moduleCount; + struct old_protocol **protocols; + size_t protocolCount; + void *imageinfo; + size_t imageinfoBytes; + SEL *selrefs; + size_t selrefCount; + struct objc_class **clsrefs; + size_t clsrefCount; + TCHAR *moduleName; +# endif +#endif + +private: + // Images in the shared cache will have an empty array here while those + // allocated at run time will allocate a single entry. + header_info_rw rw_data[]; +} header_info; + +extern header_info *FirstHeader; +extern header_info *LastHeader; +extern int HeaderCount; + +extern void appendHeader(header_info *hi); +extern void removeHeader(header_info *hi); + +extern objc_image_info *_getObjcImageInfo(const headerType *head, size_t *size); +extern bool _hasObjcContents(const header_info *hi); + + +// Mach-O segment and section names are 16 bytes and may be un-terminated. + +static inline bool segnameEquals(const char *lhs, const char *rhs) { + return 0 == strncmp(lhs, rhs, 16); +} + +static inline bool segnameStartsWith(const char *segname, const char *prefix) { + return 0 == strncmp(segname, prefix, strlen(prefix)); +} + +static inline bool sectnameEquals(const char *lhs, const char *rhs) { + return segnameEquals(lhs, rhs); +} + +static inline bool sectnameStartsWith(const char *sectname, const char *prefix){ + return segnameStartsWith(sectname, prefix); +} + + +/* selectors */ +extern void sel_init(size_t selrefCount); +extern SEL sel_registerNameNoLock(const char *str, bool copy); +extern void sel_lock(void); +extern void sel_unlock(void); + +extern SEL SEL_load; +extern SEL SEL_initialize; +extern SEL SEL_resolveClassMethod; +extern SEL SEL_resolveInstanceMethod; +extern SEL SEL_cxx_construct; +extern SEL SEL_cxx_destruct; +extern SEL SEL_retain; +extern SEL SEL_release; +extern SEL SEL_autorelease; +extern SEL SEL_retainCount; +extern SEL SEL_alloc; +extern SEL SEL_allocWithZone; +extern SEL SEL_dealloc; +extern SEL SEL_copy; +extern SEL SEL_new; +extern SEL SEL_forwardInvocation; +extern SEL SEL_tryRetain; +extern SEL SEL_isDeallocating; +extern SEL SEL_retainWeakReference; +extern SEL SEL_allowsWeakReference; + +/* preoptimization */ +extern void preopt_init(void); +extern void disableSharedCacheOptimizations(void); +extern bool isPreoptimized(void); +extern bool noMissingWeakSuperclasses(void); +extern header_info *preoptimizedHinfoForHeader(const headerType *mhdr); + +extern objc_selopt_t *preoptimizedSelectors(void); + +extern Protocol *getPreoptimizedProtocol(const char *name); + +extern Class getPreoptimizedClass(const char *name); +extern Class* copyPreoptimizedClasses(const char *name, int *outCount); + +extern Class _calloc_class(size_t size); + +/* method lookup */ +extern IMP lookUpImpOrNil(Class, SEL, id obj, bool initialize, bool cache, bool resolver); +extern IMP lookUpImpOrForward(Class, SEL, id obj, bool initialize, bool cache, bool resolver); + +extern IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel); +extern bool class_respondsToSelector_inst(Class cls, SEL sel, id inst); + +extern bool objcMsgLogEnabled; +extern bool logMessageSend(bool isClassMethod, + const char *objectsClass, + const char *implementingClass, + SEL selector); + +/* message dispatcher */ +extern IMP _class_lookupMethodAndLoadCache3(id, SEL, Class); + +#if !OBJC_OLD_DISPATCH_PROTOTYPES +extern void _objc_msgForward_impcache(void); +#else +extern id _objc_msgForward_impcache(id, SEL, ...); +#endif + +/* errors */ +extern void __objc_error(id, const char *, ...) __attribute__((format (printf, 2, 3), noreturn)); +extern void _objc_inform(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern void _objc_inform_on_crash(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern void _objc_inform_now_and_on_crash(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +extern void _objc_inform_deprecated(const char *oldname, const char *newname) __attribute__((noinline)); +extern void inform_duplicate(const char *name, Class oldCls, Class cls); + +/* magic */ +extern Class _objc_getFreedObjectClass (void); + +/* map table additions */ +extern void *NXMapKeyCopyingInsert(NXMapTable *table, const void *key, const void *value); +extern void *NXMapKeyFreeingRemove(NXMapTable *table, const void *key); + +/* hash table additions */ +extern unsigned _NXHashCapacity(NXHashTable *table); +extern void _NXHashRehashToCapacity(NXHashTable *table, unsigned newCapacity); + +/* property attribute parsing */ +extern const char *copyPropertyAttributeString(const objc_property_attribute_t *attrs, unsigned int count); +extern objc_property_attribute_t *copyPropertyAttributeList(const char *attrs, unsigned int *outCount); +extern char *copyPropertyAttributeValue(const char *attrs, const char *name); + +/* locking */ +extern void lock_init(void); +extern rwlock_t selLock; +extern mutex_t cacheUpdateLock; +extern recursive_mutex_t loadMethodLock; +#if __OBJC2__ +extern rwlock_t runtimeLock; +#else +extern mutex_t classLock; +extern mutex_t methodListLock; +#endif + +class monitor_locker_t : nocopy_t { + monitor_t& lock; + public: + monitor_locker_t(monitor_t& newLock) : lock(newLock) { lock.enter(); } + ~monitor_locker_t() { lock.leave(); } +}; + +class mutex_locker_t : nocopy_t { + mutex_t& lock; + public: + mutex_locker_t(mutex_t& newLock) + : lock(newLock) { lock.lock(); } + ~mutex_locker_t() { lock.unlock(); } +}; + +class recursive_mutex_locker_t : nocopy_t { + recursive_mutex_t& lock; + public: + recursive_mutex_locker_t(recursive_mutex_t& newLock) + : lock(newLock) { lock.lock(); } + ~recursive_mutex_locker_t() { lock.unlock(); } +}; + +class rwlock_reader_t : nocopy_t { + rwlock_t& lock; + public: + rwlock_reader_t(rwlock_t& newLock) : lock(newLock) { lock.read(); } + ~rwlock_reader_t() { lock.unlockRead(); } +}; + +class rwlock_writer_t : nocopy_t { + rwlock_t& lock; + public: + rwlock_writer_t(rwlock_t& newLock) : lock(newLock) { lock.write(); } + ~rwlock_writer_t() { lock.unlockWrite(); } +}; + + +/* Exceptions */ +struct alt_handler_list; +extern void exception_init(void); +extern void _destroyAltHandlerList(struct alt_handler_list *list); + +/* Class change notifications (gdb only for now) */ +#define OBJC_CLASS_ADDED (1<<0) +#define OBJC_CLASS_REMOVED (1<<1) +#define OBJC_CLASS_IVARS_CHANGED (1<<2) +#define OBJC_CLASS_METHODS_CHANGED (1<<3) +extern void gdb_objc_class_changed(Class cls, unsigned long changes, const char *classname) + __attribute__((noinline)); + + +// Settings from environment variables +#define OPTION(var, env, help) extern bool var; +#include "objc-env.h" +#undef OPTION + +extern void environ_init(void); + +extern void logReplacedMethod(const char *className, SEL s, bool isMeta, const char *catName, IMP oldImp, IMP newImp); + + +// objc per-thread storage +typedef struct { + struct _objc_initializing_classes *initializingClasses; // for +initialize + struct SyncCache *syncCache; // for @synchronize + struct alt_handler_list *handlerList; // for exception alt handlers + char *printableNames[4]; // temporary demangled names for logging + + // If you add new fields here, don't forget to update + // _objc_pthread_destroyspecific() + +} _objc_pthread_data; + +extern _objc_pthread_data *_objc_fetch_pthread_data(bool create); +extern void tls_init(void); + +// encoding.h +extern unsigned int encoding_getNumberOfArguments(const char *typedesc); +extern unsigned int encoding_getSizeOfArguments(const char *typedesc); +extern unsigned int encoding_getArgumentInfo(const char *typedesc, unsigned int arg, const char **type, int *offset); +extern void encoding_getReturnType(const char *t, char *dst, size_t dst_len); +extern char * encoding_copyReturnType(const char *t); +extern void encoding_getArgumentType(const char *t, unsigned int index, char *dst, size_t dst_len); +extern char *encoding_copyArgumentType(const char *t, unsigned int index); + +// sync.h +extern void _destroySyncCache(struct SyncCache *cache); + +// arr +extern void arr_init(void); +extern id objc_autoreleaseReturnValue(id obj); + +// block trampolines +extern IMP _imp_implementationWithBlockNoCopy(id block); + +// layout.h +typedef struct { + uint8_t *bits; + size_t bitCount; + size_t bitsAllocated; + bool weak; +} layout_bitmap; +extern layout_bitmap layout_bitmap_create(const unsigned char *layout_string, size_t layoutStringInstanceSize, size_t instanceSize, bool weak); +extern layout_bitmap layout_bitmap_create_empty(size_t instanceSize, bool weak); +extern void layout_bitmap_free(layout_bitmap bits); +extern const unsigned char *layout_string_create(layout_bitmap bits); +extern void layout_bitmap_set_ivar(layout_bitmap bits, const char *type, size_t offset); +extern void layout_bitmap_grow(layout_bitmap *bits, size_t newCount); +extern void layout_bitmap_slide(layout_bitmap *bits, size_t oldPos, size_t newPos); +extern void layout_bitmap_slide_anywhere(layout_bitmap *bits, size_t oldPos, size_t newPos); +extern bool layout_bitmap_splat(layout_bitmap dst, layout_bitmap src, + size_t oldSrcInstanceSize); +extern bool layout_bitmap_or(layout_bitmap dst, layout_bitmap src, const char *msg); +extern bool layout_bitmap_clear(layout_bitmap dst, layout_bitmap src, const char *msg); +extern void layout_bitmap_print(layout_bitmap bits); + + +// fixme runtime +extern Class look_up_class(const char *aClassName, bool includeUnconnected, bool includeClassHandler); +extern "C" void map_2_images(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]); +extern void map_images_nolock(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]); +extern void load_images(const char *path, const struct mach_header *mh); +extern void unmap_image(const char *path, const struct mach_header *mh); +extern void unmap_image_nolock(const struct mach_header *mh); +extern void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClass); +extern void _unload_image(header_info *hi); +extern const char ** _objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount); + + +extern const header_info *_headerForClass(Class cls); + +extern Class _class_remap(Class cls); +extern Class _class_getNonMetaClass(Class cls, id obj); +extern Ivar _class_getVariable(Class cls, const char *name); + +extern unsigned _class_createInstancesFromZone(Class cls, size_t extraBytes, void *zone, id *results, unsigned num_requested); +extern id _objc_constructOrFree(id bytes, Class cls); + +extern const char *_category_getName(Category cat); +extern const char *_category_getClassName(Category cat); +extern Class _category_getClass(Category cat); +extern IMP _category_getLoadMethod(Category cat); + +extern id object_cxxConstructFromClass(id obj, Class cls); +extern void object_cxxDestruct(id obj); + +extern void _class_resolveMethod(Class cls, SEL sel, id inst); + +extern void fixupCopiedIvars(id newObject, id oldObject); +extern Class _class_getClassForIvar(Class cls, Ivar ivar); + + +#define OBJC_WARN_DEPRECATED \ + do { \ + static int warned = 0; \ + if (!warned) { \ + warned = 1; \ + _objc_inform_deprecated(__FUNCTION__, NULL); \ + } \ + } while (0) \ + +__END_DECLS + + +#ifndef STATIC_ASSERT +# define STATIC_ASSERT(x) _STATIC_ASSERT2(x, __LINE__) +# define _STATIC_ASSERT2(x, line) _STATIC_ASSERT3(x, line) +# define _STATIC_ASSERT3(x, line) \ + typedef struct { \ + int _static_assert[(x) ? 0 : -1]; \ + } _static_assert_ ## line __attribute__((unavailable)) +#endif + +#define countof(arr) (sizeof(arr) / sizeof((arr)[0])) + + +static __inline uint32_t _objc_strhash(const char *s) { + uint32_t hash = 0; + for (;;) { + int a = *s++; + if (0 == a) break; + hash += (hash << 8) + a; + } + return hash; +} + +#if __cplusplus + +template +static inline T log2u(T x) { + return (x<2) ? 0 : log2u(x>>1)+1; +} + +template +static inline T exp2u(T x) { + return (1 << x); +} + +template +static T exp2m1u(T x) { + return (1 << x) - 1; +} + +#endif + +// Misalignment-safe integer types +__attribute__((aligned(1))) typedef uintptr_t unaligned_uintptr_t; +__attribute__((aligned(1))) typedef intptr_t unaligned_intptr_t; +__attribute__((aligned(1))) typedef uint64_t unaligned_uint64_t; +__attribute__((aligned(1))) typedef int64_t unaligned_int64_t; +__attribute__((aligned(1))) typedef uint32_t unaligned_uint32_t; +__attribute__((aligned(1))) typedef int32_t unaligned_int32_t; +__attribute__((aligned(1))) typedef uint16_t unaligned_uint16_t; +__attribute__((aligned(1))) typedef int16_t unaligned_int16_t; + + +// Global operator new and delete. We must not use any app overrides. +// This ALSO REQUIRES each of these be in libobjc's unexported symbol list. +#if __cplusplus +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Winline-new-delete" +#include +inline void* operator new(std::size_t size) throw (std::bad_alloc) { return malloc(size); } +inline void* operator new[](std::size_t size) throw (std::bad_alloc) { return malloc(size); } +inline void* operator new(std::size_t size, const std::nothrow_t&) throw() { return malloc(size); } +inline void* operator new[](std::size_t size, const std::nothrow_t&) throw() { return malloc(size); } +inline void operator delete(void* p) throw() { free(p); } +inline void operator delete[](void* p) throw() { free(p); } +inline void operator delete(void* p, const std::nothrow_t&) throw() { free(p); } +inline void operator delete[](void* p, const std::nothrow_t&) throw() { free(p); } +#pragma clang diagnostic pop +#endif + + +class TimeLogger { + uint64_t mStart; + bool mRecord; + public: + TimeLogger(bool record = true) + : mStart(nanoseconds()) + , mRecord(record) + { } + + void log(const char *msg) { + if (mRecord) { + uint64_t end = nanoseconds(); + _objc_inform("%.2f ms: %s", (end - mStart) / 1000000.0, msg); + mStart = nanoseconds(); + } + } +}; + + +// StripedMap is a map of void* -> T, sized appropriately +// for cache-friendly lock striping. +// For example, this may be used as StripedMap +// or as StripedMap where SomeStruct stores a spin lock. +template +class StripedMap { + + enum { CacheLineSize = 64 }; + +#if TARGET_OS_EMBEDDED + enum { StripeCount = 8 }; +#else + enum { StripeCount = 64 }; +#endif + + struct PaddedT { + T value alignas(CacheLineSize); + }; + + PaddedT array[StripeCount]; + + static unsigned int indexForPointer(const void *p) { + uintptr_t addr = reinterpret_cast(p); + return ((addr >> 4) ^ (addr >> 9)) % StripeCount; + } + + public: + T& operator[] (const void *p) { + return array[indexForPointer(p)].value; + } + const T& operator[] (const void *p) const { + return const_cast>(this)[p]; + } + +#if DEBUG + StripedMap() { + // Verify alignment expectations. + uintptr_t base = (uintptr_t)&array[0].value; + uintptr_t delta = (uintptr_t)&array[1].value - base; + assert(delta % CacheLineSize == 0); + assert(base % CacheLineSize == 0); + } +#endif +}; + + +// DisguisedPtr acts like pointer type T*, except the +// stored value is disguised to hide it from tools like `leaks`. +// nil is disguised as itself so zero-filled memory works as expected, +// which means 0x80..00 is also disguised as itself but we don't care. +// Note that weak_entry_t knows about this encoding. +template +class DisguisedPtr { + uintptr_t value; + + static uintptr_t disguise(T* ptr) { + return -(uintptr_t)ptr; + } + + static T* undisguise(uintptr_t val) { + return (T*)-val; + } + + public: + DisguisedPtr() { } + DisguisedPtr(T* ptr) + : value(disguise(ptr)) { } + DisguisedPtr(const DisguisedPtr& ptr) + : value(ptr.value) { } + + DisguisedPtr& operator = (T* rhs) { + value = disguise(rhs); + return *this; + } + DisguisedPtr& operator = (const DisguisedPtr& rhs) { + value = rhs.value; + return *this; + } + + operator T* () const { + return undisguise(value); + } + T* operator -> () const { + return undisguise(value); + } + T& operator * () const { + return *undisguise(value); + } + T& operator [] (size_t i) const { + return undisguise(value)[i]; + } + + // pointer arithmetic operators omitted + // because we don't currently use them anywhere +}; + +// fixme type id is weird and not identical to objc_object* +static inline bool operator == (DisguisedPtr lhs, id rhs) { + return lhs == (objc_object *)rhs; +} +static inline bool operator != (DisguisedPtr lhs, id rhs) { + return lhs != (objc_object *)rhs; +} + + +// Pointer hash function. +// This is not a terrific hash, but it is fast +// and not outrageously flawed for our purposes. + +// Based on principles from http://locklessinc.com/articles/fast_hash/ +// and evaluation ideas from http://floodyberry.com/noncryptohashzoo/ +#if __LP64__ +static inline uint32_t ptr_hash(uint64_t key) +{ + key ^= key >> 4; + key *= 0x8a970be7488fda55; + key ^= __builtin_bswap64(key); + return (uint32_t)key; +} +#else +static inline uint32_t ptr_hash(uint32_t key) +{ + key ^= key >> 4; + key *= 0x5052acdb; + key ^= __builtin_bswap32(key); + return key; +} +#endif + +/* + Higher-quality hash function. This is measurably slower in some workloads. +#if __LP64__ + uint32_t ptr_hash(uint64_t key) +{ + key -= __builtin_bswap64(key); + key *= 0x8a970be7488fda55; + key ^= __builtin_bswap64(key); + key *= 0x8a970be7488fda55; + key ^= __builtin_bswap64(key); + return (uint32_t)key; +} +#else +static uint32_t ptr_hash(uint32_t key) +{ + key -= __builtin_bswap32(key); + key *= 0x5052acdb; + key ^= __builtin_bswap32(key); + key *= 0x5052acdb; + key ^= __builtin_bswap32(key); + return key; +} +#endif +*/ + + +// Inlined parts of objc_object's implementation +#include "objc-object.h" + +#endif /* _OBJC_PRIVATE_H_ */ + diff --git a/runtime/objc-probes.d b/runtime/objc-probes.d new file mode 100644 index 0000000..4a1e224 --- /dev/null +++ b/runtime/objc-probes.d @@ -0,0 +1,5 @@ +provider objc_runtime +{ + probe objc_exception_throw(void *id); + probe objc_exception_rethrow(); +}; diff --git a/runtime/objc-references.h b/runtime/objc-references.h new file mode 100644 index 0000000..1e49f7c --- /dev/null +++ b/runtime/objc-references.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2008 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * objc-references.h + */ + +#ifndef _OBJC_REFERENCES_H_ +#define _OBJC_REFERENCES_H_ + +#include "objc-api.h" +#include "objc-config.h" + +__BEGIN_DECLS + +extern void _object_set_associative_reference(id object, void *key, id value, uintptr_t policy); +extern id _object_get_associative_reference(id object, void *key); +extern void _object_remove_assocations(id object); + +__END_DECLS + +#endif diff --git a/runtime/objc-references.mm b/runtime/objc-references.mm new file mode 100644 index 0000000..712b88e --- /dev/null +++ b/runtime/objc-references.mm @@ -0,0 +1,334 @@ +/* + * Copyright (c) 2004-2007 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + Implementation of the weak / associative references for non-GC mode. +*/ + + +#include "objc-private.h" +#include +#include + +#if _LIBCPP_VERSION +# include +#else +# include + using namespace tr1; +#endif + + +// wrap all the murky C++ details in a namespace to get them out of the way. + +namespace objc_references_support { + struct DisguisedPointerEqual { + bool operator()(uintptr_t p1, uintptr_t p2) const { + return p1 == p2; + } + }; + + struct DisguisedPointerHash { + uintptr_t operator()(uintptr_t k) const { + // borrowed from CFSet.c +#if __LP64__ + uintptr_t a = 0x4368726973746F70ULL; + uintptr_t b = 0x686572204B616E65ULL; +#else + uintptr_t a = 0x4B616E65UL; + uintptr_t b = 0x4B616E65UL; +#endif + uintptr_t c = 1; + a += k; +#if __LP64__ + a -= b; a -= c; a ^= (c >> 43); + b -= c; b -= a; b ^= (a << 9); + c -= a; c -= b; c ^= (b >> 8); + a -= b; a -= c; a ^= (c >> 38); + b -= c; b -= a; b ^= (a << 23); + c -= a; c -= b; c ^= (b >> 5); + a -= b; a -= c; a ^= (c >> 35); + b -= c; b -= a; b ^= (a << 49); + c -= a; c -= b; c ^= (b >> 11); + a -= b; a -= c; a ^= (c >> 12); + b -= c; b -= a; b ^= (a << 18); + c -= a; c -= b; c ^= (b >> 22); +#else + a -= b; a -= c; a ^= (c >> 13); + b -= c; b -= a; b ^= (a << 8); + c -= a; c -= b; c ^= (b >> 13); + a -= b; a -= c; a ^= (c >> 12); + b -= c; b -= a; b ^= (a << 16); + c -= a; c -= b; c ^= (b >> 5); + a -= b; a -= c; a ^= (c >> 3); + b -= c; b -= a; b ^= (a << 10); + c -= a; c -= b; c ^= (b >> 15); +#endif + return c; + } + }; + + struct ObjectPointerLess { + bool operator()(const void *p1, const void *p2) const { + return p1 < p2; + } + }; + + struct ObjcPointerHash { + uintptr_t operator()(void *p) const { + return DisguisedPointerHash()(uintptr_t(p)); + } + }; + + // STL allocator that uses the runtime's internal allocator. + + template struct ObjcAllocator { + typedef T value_type; + typedef value_type* pointer; + typedef const value_type *const_pointer; + typedef value_type& reference; + typedef const value_type& const_reference; + typedef size_t size_type; + typedef ptrdiff_t difference_type; + + template struct rebind { typedef ObjcAllocator other; }; + + template ObjcAllocator(const ObjcAllocator&) {} + ObjcAllocator() {} + ObjcAllocator(const ObjcAllocator&) {} + ~ObjcAllocator() {} + + pointer address(reference x) const { return &x; } + const_pointer address(const_reference x) const { + return x; + } + + pointer allocate(size_type n, const_pointer = 0) { + return static_cast(::malloc(n * sizeof(T))); + } + + void deallocate(pointer p, size_type) { ::free(p); } + + size_type max_size() const { + return static_cast(-1) / sizeof(T); + } + + void construct(pointer p, const value_type& x) { + new(p) value_type(x); + } + + void destroy(pointer p) { p->~value_type(); } + + void operator=(const ObjcAllocator&); + + }; + + template<> struct ObjcAllocator { + typedef void value_type; + typedef void* pointer; + typedef const void *const_pointer; + template struct rebind { typedef ObjcAllocator other; }; + }; + + typedef uintptr_t disguised_ptr_t; + inline disguised_ptr_t DISGUISE(id value) { return ~uintptr_t(value); } + inline id UNDISGUISE(disguised_ptr_t dptr) { return id(~dptr); } + + class ObjcAssociation { + uintptr_t _policy; + id _value; + public: + ObjcAssociation(uintptr_t policy, id value) : _policy(policy), _value(value) {} + ObjcAssociation() : _policy(0), _value(nil) {} + + uintptr_t policy() const { return _policy; } + id value() const { return _value; } + + bool hasValue() { return _value != nil; } + }; + +#if TARGET_OS_WIN32 + typedef hash_map ObjectAssociationMap; + typedef hash_map AssociationsHashMap; +#else + typedef ObjcAllocator > ObjectAssociationMapAllocator; + class ObjectAssociationMap : public std::map { + public: + void *operator new(size_t n) { return ::malloc(n); } + void operator delete(void *ptr) { ::free(ptr); } + }; + typedef ObjcAllocator > AssociationsHashMapAllocator; + class AssociationsHashMap : public unordered_map { + public: + void *operator new(size_t n) { return ::malloc(n); } + void operator delete(void *ptr) { ::free(ptr); } + }; +#endif +} + +using namespace objc_references_support; + +// class AssociationsManager manages a lock / hash table singleton pair. +// Allocating an instance acquires the lock, and calling its assocations() method +// lazily allocates it. + +class AssociationsManager { + static spinlock_t _lock; + static AssociationsHashMap *_map; // associative references: object pointer -> PtrPtrHashMap. +public: + AssociationsManager() { _lock.lock(); } + ~AssociationsManager() { _lock.unlock(); } + + AssociationsHashMap &associations() { + if (_map == NULL) + _map = new AssociationsHashMap(); + return *_map; + } +}; + +spinlock_t AssociationsManager::_lock; +AssociationsHashMap *AssociationsManager::_map = NULL; + +// expanded policy bits. + +enum { + OBJC_ASSOCIATION_SETTER_ASSIGN = 0, + OBJC_ASSOCIATION_SETTER_RETAIN = 1, + OBJC_ASSOCIATION_SETTER_COPY = 3, // NOTE: both bits are set, so we can simply test 1 bit in releaseValue below. + OBJC_ASSOCIATION_GETTER_READ = (0 << 8), + OBJC_ASSOCIATION_GETTER_RETAIN = (1 << 8), + OBJC_ASSOCIATION_GETTER_AUTORELEASE = (2 << 8) +}; + +id _object_get_associative_reference(id object, void *key) { + id value = nil; + uintptr_t policy = OBJC_ASSOCIATION_ASSIGN; + { + AssociationsManager manager; + AssociationsHashMap &associations(manager.associations()); + disguised_ptr_t disguised_object = DISGUISE(object); + AssociationsHashMap::iterator i = associations.find(disguised_object); + if (i != associations.end()) { + ObjectAssociationMap *refs = i->second; + ObjectAssociationMap::iterator j = refs->find(key); + if (j != refs->end()) { + ObjcAssociation &entry = j->second; + value = entry.value(); + policy = entry.policy(); + if (policy & OBJC_ASSOCIATION_GETTER_RETAIN) ((id(*)(id, SEL))objc_msgSend)(value, SEL_retain); + } + } + } + if (value && (policy & OBJC_ASSOCIATION_GETTER_AUTORELEASE)) { + ((id(*)(id, SEL))objc_msgSend)(value, SEL_autorelease); + } + return value; +} + +static id acquireValue(id value, uintptr_t policy) { + switch (policy & 0xFF) { + case OBJC_ASSOCIATION_SETTER_RETAIN: + return ((id(*)(id, SEL))objc_msgSend)(value, SEL_retain); + case OBJC_ASSOCIATION_SETTER_COPY: + return ((id(*)(id, SEL))objc_msgSend)(value, SEL_copy); + } + return value; +} + +static void releaseValue(id value, uintptr_t policy) { + if (policy & OBJC_ASSOCIATION_SETTER_RETAIN) { + ((id(*)(id, SEL))objc_msgSend)(value, SEL_release); + } +} + +struct ReleaseValue { + void operator() (ObjcAssociation &association) { + releaseValue(association.value(), association.policy()); + } +}; + +void _object_set_associative_reference(id object, void *key, id value, uintptr_t policy) { + // retain the new value (if any) outside the lock. + ObjcAssociation old_association(0, nil); + id new_value = value ? acquireValue(value, policy) : nil; + { + AssociationsManager manager; + AssociationsHashMap &associations(manager.associations()); + disguised_ptr_t disguised_object = DISGUISE(object); + if (new_value) { + // break any existing association. + AssociationsHashMap::iterator i = associations.find(disguised_object); + if (i != associations.end()) { + // secondary table exists + ObjectAssociationMap *refs = i->second; + ObjectAssociationMap::iterator j = refs->find(key); + if (j != refs->end()) { + old_association = j->second; + j->second = ObjcAssociation(policy, new_value); + } else { + (*refs)[key] = ObjcAssociation(policy, new_value); + } + } else { + // create the new association (first time). + ObjectAssociationMap *refs = new ObjectAssociationMap; + associations[disguised_object] = refs; + (*refs)[key] = ObjcAssociation(policy, new_value); + object->setHasAssociatedObjects(); + } + } else { + // setting the association to nil breaks the association. + AssociationsHashMap::iterator i = associations.find(disguised_object); + if (i != associations.end()) { + ObjectAssociationMap *refs = i->second; + ObjectAssociationMap::iterator j = refs->find(key); + if (j != refs->end()) { + old_association = j->second; + refs->erase(j); + } + } + } + } + // release the old value (outside of the lock). + if (old_association.hasValue()) ReleaseValue()(old_association); +} + +void _object_remove_assocations(id object) { + vector< ObjcAssociation,ObjcAllocator > elements; + { + AssociationsManager manager; + AssociationsHashMap &associations(manager.associations()); + if (associations.size() == 0) return; + disguised_ptr_t disguised_object = DISGUISE(object); + AssociationsHashMap::iterator i = associations.find(disguised_object); + if (i != associations.end()) { + // copy all of the associations that need to be removed. + ObjectAssociationMap *refs = i->second; + for (ObjectAssociationMap::iterator j = refs->begin(), end = refs->end(); j != end; ++j) { + elements.push_back(j->second); + } + // remove the secondary table. + delete refs; + associations.erase(i); + } + } + // the calls to releaseValue() happen outside of the lock. + for_each(elements.begin(), elements.end(), ReleaseValue()); +} diff --git a/runtime/objc-runtime-new.h b/runtime/objc-runtime-new.h new file mode 100644 index 0000000..7b7dbac --- /dev/null +++ b/runtime/objc-runtime-new.h @@ -0,0 +1,1399 @@ +/* + * Copyright (c) 2005-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_RUNTIME_NEW_H +#define _OBJC_RUNTIME_NEW_H + +#if __LP64__ +typedef uint32_t mask_t; // x86_64 & arm64 asm are less efficient with 16-bits +#else +typedef uint16_t mask_t; +#endif +typedef uintptr_t cache_key_t; + +struct swift_class_t; + + +struct bucket_t { +private: + cache_key_t _key; + IMP _imp; + +public: + inline cache_key_t key() const { return _key; } + inline IMP imp() const { return (IMP)_imp; } + inline void setKey(cache_key_t newKey) { _key = newKey; } + inline void setImp(IMP newImp) { _imp = newImp; } + + void set(cache_key_t newKey, IMP newImp); +}; + + +struct cache_t { + struct bucket_t *_buckets; + mask_t _mask; + mask_t _occupied; + +public: + struct bucket_t *buckets(); + mask_t mask(); + mask_t occupied(); + void incrementOccupied(); + void setBucketsAndMask(struct bucket_t *newBuckets, mask_t newMask); + void initializeToEmpty(); + + mask_t capacity(); + bool isConstantEmptyCache(); + bool canBeFreed(); + + static size_t bytesForCapacity(uint32_t cap); + static struct bucket_t * endMarker(struct bucket_t *b, uint32_t cap); + + void expand(); + void reallocate(mask_t oldCapacity, mask_t newCapacity); + struct bucket_t * find(cache_key_t key, id receiver); + + static void bad_cache(id receiver, SEL sel, Class isa) __attribute__((noreturn)); +}; + + +// classref_t is unremapped class_t* +typedef struct classref * classref_t; + +/*********************************************************************** +* entsize_list_tt +* Generic implementation of an array of non-fragile structs. +* +* Element is the struct type (e.g. method_t) +* List is the specialization of entsize_list_tt (e.g. method_list_t) +* FlagMask is used to stash extra bits in the entsize field +* (e.g. method list fixup markers) +**********************************************************************/ +template +struct entsize_list_tt { + uint32_t entsizeAndFlags; + uint32_t count; + Element first; + + uint32_t entsize() const { + return entsizeAndFlags & ~FlagMask; + } + uint32_t flags() const { + return entsizeAndFlags & FlagMask; + } + + Element& getOrEnd(uint32_t i) const { + assert(i <= count); + return *(Element *)((uint8_t *)&first + i*entsize()); + } + Element& get(uint32_t i) const { + assert(i < count); + return getOrEnd(i); + } + + size_t byteSize() const { + return sizeof(*this) + (count-1)*entsize(); + } + + List *duplicate() const { + return (List *)memdup(this, this->byteSize()); + } + + struct iterator; + const iterator begin() const { + return iterator(*static_cast(this), 0); + } + iterator begin() { + return iterator(*static_cast(this), 0); + } + const iterator end() const { + return iterator(*static_cast(this), count); + } + iterator end() { + return iterator(*static_cast(this), count); + } + + struct iterator { + uint32_t entsize; + uint32_t index; // keeping track of this saves a divide in operator- + Element* element; + + typedef std::random_access_iterator_tag iterator_category; + typedef Element value_type; + typedef ptrdiff_t difference_type; + typedef Element* pointer; + typedef Element& reference; + + iterator() { } + + iterator(const List& list, uint32_t start = 0) + : entsize(list.entsize()) + , index(start) + , element(&list.getOrEnd(start)) + { } + + const iterator& operator += (ptrdiff_t delta) { + element = (Element*)((uint8_t *)element + delta*entsize); + index += (int32_t)delta; + return *this; + } + const iterator& operator -= (ptrdiff_t delta) { + element = (Element*)((uint8_t *)element - delta*entsize); + index -= (int32_t)delta; + return *this; + } + const iterator operator + (ptrdiff_t delta) const { + return iterator(*this) += delta; + } + const iterator operator - (ptrdiff_t delta) const { + return iterator(*this) -= delta; + } + + iterator& operator ++ () { *this += 1; return *this; } + iterator& operator -- () { *this -= 1; return *this; } + iterator operator ++ (int) { + iterator result(*this); *this += 1; return result; + } + iterator operator -- (int) { + iterator result(*this); *this -= 1; return result; + } + + ptrdiff_t operator - (const iterator& rhs) const { + return (ptrdiff_t)this->index - (ptrdiff_t)rhs.index; + } + + Element& operator * () const { return *element; } + Element* operator -> () const { return element; } + + operator Element& () const { return *element; } + + bool operator == (const iterator& rhs) const { + return this->element == rhs.element; + } + bool operator != (const iterator& rhs) const { + return this->element != rhs.element; + } + + bool operator < (const iterator& rhs) const { + return this->element < rhs.element; + } + bool operator > (const iterator& rhs) const { + return this->element > rhs.element; + } + }; +}; + + +struct method_t { + SEL name; + const char *types; + IMP imp; + + struct SortBySELAddress : + public std::binary_function + { + bool operator() (const method_t& lhs, + const method_t& rhs) + { return lhs.name < rhs.name; } + }; +}; + +struct ivar_t { +#if __x86_64__ + // *offset was originally 64-bit on some x86_64 platforms. + // We read and write only 32 bits of it. + // Some metadata provides all 64 bits. This is harmless for unsigned + // little-endian values. + // Some code uses all 64 bits. class_addIvar() over-allocates the + // offset for their benefit. +#endif + int32_t *offset; + const char *name; + const char *type; + // alignment is sometimes -1; use alignment() instead + uint32_t alignment_raw; + uint32_t size; + + uint32_t alignment() const { + if (alignment_raw == ~(uint32_t)0) return 1U << WORD_SHIFT; + return 1 << alignment_raw; + } +}; + +struct property_t { + const char *name; + const char *attributes; +}; + +// Two bits of entsize are used for fixup markers. +struct method_list_t : entsize_list_tt { + bool isFixedUp() const; + void setFixedUp(); + + uint32_t indexOfMethod(const method_t *meth) const { + uint32_t i = + (uint32_t)(((uintptr_t)meth - (uintptr_t)this) / entsize()); + assert(i < count); + return i; + } +}; + +struct ivar_list_t : entsize_list_tt { + bool containsIvar(Ivar ivar) const { + return (ivar >= (Ivar)&*begin() && ivar < (Ivar)&*end()); + } +}; + +struct property_list_t : entsize_list_tt { +}; + + +typedef uintptr_t protocol_ref_t; // protocol_t *, but unremapped + +// Values for protocol_t->flags +#define PROTOCOL_FIXED_UP_2 (1<<31) // must never be set by compiler +#define PROTOCOL_FIXED_UP_1 (1<<30) // must never be set by compiler +// Bits 0..15 are reserved for Swift's use. + +#define PROTOCOL_FIXED_UP_MASK (PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2) + +struct protocol_t : objc_object { + const char *mangledName; + struct protocol_list_t *protocols; + method_list_t *instanceMethods; + method_list_t *classMethods; + method_list_t *optionalInstanceMethods; + method_list_t *optionalClassMethods; + property_list_t *instanceProperties; + uint32_t size; // sizeof(protocol_t) + uint32_t flags; + // Fields below this point are not always present on disk. + const char **_extendedMethodTypes; + const char *_demangledName; + property_list_t *_classProperties; + + const char *demangledName(); + + const char *nameForLogging() { + return demangledName(); + } + + bool isFixedUp() const; + void setFixedUp(); + +# define HAS_FIELD(f) (size >= offsetof(protocol_t, f) + sizeof(f)) + + bool hasExtendedMethodTypesField() const { + return HAS_FIELD(_extendedMethodTypes); + } + bool hasDemangledNameField() const { + return HAS_FIELD(_demangledName); + } + bool hasClassPropertiesField() const { + return HAS_FIELD(_classProperties); + } + +# undef HAS_FIELD + + const char **extendedMethodTypes() const { + return hasExtendedMethodTypesField() ? _extendedMethodTypes : nil; + } + + property_list_t *classProperties() const { + return hasClassPropertiesField() ? _classProperties : nil; + } +}; + +struct protocol_list_t { + // count is 64-bit by accident. + uintptr_t count; + protocol_ref_t list[0]; // variable-size + + size_t byteSize() const { + return sizeof(*this) + count*sizeof(list[0]); + } + + protocol_list_t *duplicate() const { + return (protocol_list_t *)memdup(this, this->byteSize()); + } + + typedef protocol_ref_t* iterator; + typedef const protocol_ref_t* const_iterator; + + const_iterator begin() const { + return list; + } + iterator begin() { + return list; + } + const_iterator end() const { + return list + count; + } + iterator end() { + return list + count; + } +}; + +struct locstamped_category_t { + category_t *cat; + struct header_info *hi; +}; + +struct locstamped_category_list_t { + uint32_t count; +#if __LP64__ + uint32_t reserved; +#endif + locstamped_category_t list[0]; +}; + + +// class_data_bits_t is the class_t->data field (class_rw_t pointer plus flags) +// The extra bits are optimized for the retain/release and alloc/dealloc paths. + +// Values for class_ro_t->flags +// These are emitted by the compiler and are part of the ABI. +// Note: See CGObjCNonFragileABIMac::BuildClassRoTInitializer in clang +// class is a metaclass +#define RO_META (1<<0) +// class is a root class +#define RO_ROOT (1<<1) +// class has .cxx_construct/destruct implementations +#define RO_HAS_CXX_STRUCTORS (1<<2) +// class has +load implementation +// #define RO_HAS_LOAD_METHOD (1<<3) +// class has visibility=hidden set +#define RO_HIDDEN (1<<4) +// class has attribute(objc_exception): OBJC_EHTYPE_$_ThisClass is non-weak +#define RO_EXCEPTION (1<<5) +// this bit is available for reassignment +// #define RO_REUSE_ME (1<<6) +// class compiled with ARC +#define RO_IS_ARC (1<<7) +// class has .cxx_destruct but no .cxx_construct (with RO_HAS_CXX_STRUCTORS) +#define RO_HAS_CXX_DTOR_ONLY (1<<8) +// class is not ARC but has ARC-style weak ivar layout +#define RO_HAS_WEAK_WITHOUT_ARC (1<<9) + +// class is in an unloadable bundle - must never be set by compiler +#define RO_FROM_BUNDLE (1<<29) +// class is unrealized future class - must never be set by compiler +#define RO_FUTURE (1<<30) +// class is realized - must never be set by compiler +#define RO_REALIZED (1<<31) + +// Values for class_rw_t->flags +// These are not emitted by the compiler and are never used in class_ro_t. +// Their presence should be considered in future ABI versions. +// class_t->data is class_rw_t, not class_ro_t +#define RW_REALIZED (1<<31) +// class is unresolved future class +#define RW_FUTURE (1<<30) +// class is initialized +#define RW_INITIALIZED (1<<29) +// class is initializing +#define RW_INITIALIZING (1<<28) +// class_rw_t->ro is heap copy of class_ro_t +#define RW_COPIED_RO (1<<27) +// class allocated but not yet registered +#define RW_CONSTRUCTING (1<<26) +// class allocated and registered +#define RW_CONSTRUCTED (1<<25) +// available for use; was RW_FINALIZE_ON_MAIN_THREAD +// #define RW_24 (1<<24) +// class +load has been called +#define RW_LOADED (1<<23) +#if !SUPPORT_NONPOINTER_ISA +// class instances may have associative references +#define RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS (1<<22) +#endif +// class has instance-specific GC layout +#define RW_HAS_INSTANCE_SPECIFIC_LAYOUT (1 << 21) +// available for use +// #define RW_20 (1<<20) +// class has started realizing but not yet completed it +#define RW_REALIZING (1<<19) + +// NOTE: MORE RW_ FLAGS DEFINED BELOW + + +// Values for class_rw_t->flags or class_t->bits +// These flags are optimized for retain/release and alloc/dealloc +// 64-bit stores more of them in class_t->bits to reduce pointer indirection. + +#if !__LP64__ + +// class or superclass has .cxx_construct implementation +#define RW_HAS_CXX_CTOR (1<<18) +// class or superclass has .cxx_destruct implementation +#define RW_HAS_CXX_DTOR (1<<17) +// class or superclass has default alloc/allocWithZone: implementation +// Note this is is stored in the metaclass. +#define RW_HAS_DEFAULT_AWZ (1<<16) +// class's instances requires raw isa +#if SUPPORT_NONPOINTER_ISA +#define RW_REQUIRES_RAW_ISA (1<<15) +#endif + +// class is a Swift class +#define FAST_IS_SWIFT (1UL<<0) +// class or superclass has default retain/release/autorelease/retainCount/ +// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference +#define FAST_HAS_DEFAULT_RR (1UL<<1) +// data pointer +#define FAST_DATA_MASK 0xfffffffcUL + +#elif 1 +// Leaks-compatible version that steals low bits only. + +// class or superclass has .cxx_construct implementation +#define RW_HAS_CXX_CTOR (1<<18) +// class or superclass has .cxx_destruct implementation +#define RW_HAS_CXX_DTOR (1<<17) +// class or superclass has default alloc/allocWithZone: implementation +// Note this is is stored in the metaclass. +#define RW_HAS_DEFAULT_AWZ (1<<16) + +// class is a Swift class +#define FAST_IS_SWIFT (1UL<<0) +// class or superclass has default retain/release/autorelease/retainCount/ +// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference +#define FAST_HAS_DEFAULT_RR (1UL<<1) +// class's instances requires raw isa +#define FAST_REQUIRES_RAW_ISA (1UL<<2) +// data pointer +#define FAST_DATA_MASK 0x00007ffffffffff8UL + +#else +// Leaks-incompatible version that steals lots of bits. + +// class is a Swift class +#define FAST_IS_SWIFT (1UL<<0) +// class's instances requires raw isa +#define FAST_REQUIRES_RAW_ISA (1UL<<1) +// class or superclass has .cxx_destruct implementation +// This bit is aligned with isa_t->hasCxxDtor to save an instruction. +#define FAST_HAS_CXX_DTOR (1UL<<2) +// data pointer +#define FAST_DATA_MASK 0x00007ffffffffff8UL +// class or superclass has .cxx_construct implementation +#define FAST_HAS_CXX_CTOR (1UL<<47) +// class or superclass has default alloc/allocWithZone: implementation +// Note this is is stored in the metaclass. +#define FAST_HAS_DEFAULT_AWZ (1UL<<48) +// class or superclass has default retain/release/autorelease/retainCount/ +// _tryRetain/_isDeallocating/retainWeakReference/allowsWeakReference +#define FAST_HAS_DEFAULT_RR (1UL<<49) +// summary bit for fast alloc path: !hasCxxCtor and +// !instancesRequireRawIsa and instanceSize fits into shiftedSize +#define FAST_ALLOC (1UL<<50) +// instance size in units of 16 bytes +// or 0 if the instance size is too big in this field +// This field must be LAST +#define FAST_SHIFTED_SIZE_SHIFT 51 + +// FAST_ALLOC means +// FAST_HAS_CXX_CTOR is set +// FAST_REQUIRES_RAW_ISA is not set +// FAST_SHIFTED_SIZE is not zero +// FAST_ALLOC does NOT check FAST_HAS_DEFAULT_AWZ because that +// bit is stored on the metaclass. +#define FAST_ALLOC_MASK (FAST_HAS_CXX_CTOR | FAST_REQUIRES_RAW_ISA) +#define FAST_ALLOC_VALUE (0) + +#endif + + +struct class_ro_t { + uint32_t flags; + uint32_t instanceStart; + uint32_t instanceSize; +#ifdef __LP64__ + uint32_t reserved; +#endif + + const uint8_t * ivarLayout; + + const char * name; + method_list_t * baseMethodList; + protocol_list_t * baseProtocols; + const ivar_list_t * ivars; + + const uint8_t * weakIvarLayout; + property_list_t *baseProperties; + + method_list_t *baseMethods() const { + return baseMethodList; + } +}; + + +/*********************************************************************** +* list_array_tt +* Generic implementation for metadata that can be augmented by categories. +* +* Element is the underlying metadata type (e.g. method_t) +* List is the metadata's list type (e.g. method_list_t) +* +* A list_array_tt has one of three values: +* - empty +* - a pointer to a single list +* - an array of pointers to lists +* +* countLists/beginLists/endLists iterate the metadata lists +* count/begin/end iterate the underlying metadata elements +**********************************************************************/ +template +class list_array_tt { + struct array_t { + uint32_t count; + List* lists[0]; + + static size_t byteSize(uint32_t count) { + return sizeof(array_t) + count*sizeof(lists[0]); + } + size_t byteSize() { + return byteSize(count); + } + }; + + protected: + class iterator { + List **lists; + List **listsEnd; + typename List::iterator m, mEnd; + + public: + iterator(List **begin, List **end) + : lists(begin), listsEnd(end) + { + if (begin != end) { + m = (*begin)->begin(); + mEnd = (*begin)->end(); + } + } + + const Element& operator * () const { + return *m; + } + Element& operator * () { + return *m; + } + + bool operator != (const iterator& rhs) const { + if (lists != rhs.lists) return true; + if (lists == listsEnd) return false; // m is undefined + if (m != rhs.m) return true; + return false; + } + + const iterator& operator ++ () { + assert(m != mEnd); + m++; + if (m == mEnd) { + assert(lists != listsEnd); + lists++; + if (lists != listsEnd) { + m = (*lists)->begin(); + mEnd = (*lists)->end(); + } + } + return *this; + } + }; + + private: + union { + List* list; + uintptr_t arrayAndFlag; + }; + + bool hasArray() const { + return arrayAndFlag & 1; + } + + array_t *array() { + return (array_t *)(arrayAndFlag & ~1); + } + + void setArray(array_t *array) { + arrayAndFlag = (uintptr_t)array | 1; + } + + public: + + uint32_t count() { + uint32_t result = 0; + for (auto lists = beginLists(), end = endLists(); + lists != end; + ++lists) + { + result += (*lists)->count; + } + return result; + } + + iterator begin() { + return iterator(beginLists(), endLists()); + } + + iterator end() { + List **e = endLists(); + return iterator(e, e); + } + + + uint32_t countLists() { + if (hasArray()) { + return array()->count; + } else if (list) { + return 1; + } else { + return 0; + } + } + + List** beginLists() { + if (hasArray()) { + return array()->lists; + } else { + return &list; + } + } + + List** endLists() { + if (hasArray()) { + return array()->lists + array()->count; + } else if (list) { + return &list + 1; + } else { + return &list; + } + } + + void attachLists(List* const * addedLists, uint32_t addedCount) { + if (addedCount == 0) return; + + if (hasArray()) { + // many lists -> many lists + uint32_t oldCount = array()->count; + uint32_t newCount = oldCount + addedCount; + setArray((array_t *)realloc(array(), array_t::byteSize(newCount))); + array()->count = newCount; + memmove(array()->lists + addedCount, array()->lists, + oldCount * sizeof(array()->lists[0])); + memcpy(array()->lists, addedLists, + addedCount * sizeof(array()->lists[0])); + } + else if (!list && addedCount == 1) { + // 0 lists -> 1 list + list = addedLists[0]; + } + else { + // 1 list -> many lists + List* oldList = list; + uint32_t oldCount = oldList ? 1 : 0; + uint32_t newCount = oldCount + addedCount; + setArray((array_t *)malloc(array_t::byteSize(newCount))); + array()->count = newCount; + if (oldList) array()->lists[addedCount] = oldList; + memcpy(array()->lists, addedLists, + addedCount * sizeof(array()->lists[0])); + } + } + + void tryFree() { + if (hasArray()) { + for (uint32_t i = 0; i < array()->count; i++) { + try_free(array()->lists[i]); + } + try_free(array()); + } + else if (list) { + try_free(list); + } + } + + template + Result duplicate() { + Result result; + + if (hasArray()) { + array_t *a = array(); + result.setArray((array_t *)memdup(a, a->byteSize())); + for (uint32_t i = 0; i < a->count; i++) { + result.array()->lists[i] = a->lists[i]->duplicate(); + } + } else if (list) { + result.list = list->duplicate(); + } else { + result.list = nil; + } + + return result; + } +}; + + +class method_array_t : + public list_array_tt +{ + typedef list_array_tt Super; + + public: + method_list_t **beginCategoryMethodLists() { + return beginLists(); + } + + method_list_t **endCategoryMethodLists(Class cls); + + method_array_t duplicate() { + return Super::duplicate(); + } +}; + + +class property_array_t : + public list_array_tt +{ + typedef list_array_tt Super; + + public: + property_array_t duplicate() { + return Super::duplicate(); + } +}; + + +class protocol_array_t : + public list_array_tt +{ + typedef list_array_tt Super; + + public: + protocol_array_t duplicate() { + return Super::duplicate(); + } +}; + + +struct class_rw_t { + // Be warned that Symbolication knows the layout of this structure. + uint32_t flags; + uint32_t version; + + const class_ro_t *ro; + + method_array_t methods; + property_array_t properties; + protocol_array_t protocols; + + Class firstSubclass; + Class nextSiblingClass; + + char *demangledName; + +#if SUPPORT_INDEXED_ISA + uint32_t index; +#endif + + void setFlags(uint32_t set) + { + OSAtomicOr32Barrier(set, &flags); + } + + void clearFlags(uint32_t clear) + { + OSAtomicXor32Barrier(clear, &flags); + } + + // set and clear must not overlap + void changeFlags(uint32_t set, uint32_t clear) + { + assert((set & clear) == 0); + + uint32_t oldf, newf; + do { + oldf = flags; + newf = (oldf | set) & ~clear; + } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&flags)); + } +}; + + +struct class_data_bits_t { + + // Values are the FAST_ flags above. + uintptr_t bits; +private: + bool getBit(uintptr_t bit) + { + return bits & bit; + } + +#if FAST_ALLOC + static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) + { + if (change & FAST_ALLOC_MASK) { + if (((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) && + ((oldBits >> FAST_SHIFTED_SIZE_SHIFT) != 0)) + { + oldBits |= FAST_ALLOC; + } else { + oldBits &= ~FAST_ALLOC; + } + } + return oldBits; + } +#else + static uintptr_t updateFastAlloc(uintptr_t oldBits, uintptr_t change) { + return oldBits; + } +#endif + + void setBits(uintptr_t set) + { + uintptr_t oldBits; + uintptr_t newBits; + do { + oldBits = LoadExclusive(&bits); + newBits = updateFastAlloc(oldBits | set, set); + } while (!StoreReleaseExclusive(&bits, oldBits, newBits)); + } + + void clearBits(uintptr_t clear) + { + uintptr_t oldBits; + uintptr_t newBits; + do { + oldBits = LoadExclusive(&bits); + newBits = updateFastAlloc(oldBits & ~clear, clear); + } while (!StoreReleaseExclusive(&bits, oldBits, newBits)); + } + +public: + + class_rw_t* data() { + return (class_rw_t *)(bits & FAST_DATA_MASK); + } + void setData(class_rw_t *newData) + { + assert(!data() || (newData->flags & (RW_REALIZING | RW_FUTURE))); + // Set during realization or construction only. No locking needed. + bits = (bits & ~FAST_DATA_MASK) | (uintptr_t)newData; + } + + bool hasDefaultRR() { + return getBit(FAST_HAS_DEFAULT_RR); + } + void setHasDefaultRR() { + setBits(FAST_HAS_DEFAULT_RR); + } + void setHasCustomRR() { + clearBits(FAST_HAS_DEFAULT_RR); + } + +#if FAST_HAS_DEFAULT_AWZ + bool hasDefaultAWZ() { + return getBit(FAST_HAS_DEFAULT_AWZ); + } + void setHasDefaultAWZ() { + setBits(FAST_HAS_DEFAULT_AWZ); + } + void setHasCustomAWZ() { + clearBits(FAST_HAS_DEFAULT_AWZ); + } +#else + bool hasDefaultAWZ() { + return data()->flags & RW_HAS_DEFAULT_AWZ; + } + void setHasDefaultAWZ() { + data()->setFlags(RW_HAS_DEFAULT_AWZ); + } + void setHasCustomAWZ() { + data()->clearFlags(RW_HAS_DEFAULT_AWZ); + } +#endif + +#if FAST_HAS_CXX_CTOR + bool hasCxxCtor() { + return getBit(FAST_HAS_CXX_CTOR); + } + void setHasCxxCtor() { + setBits(FAST_HAS_CXX_CTOR); + } +#else + bool hasCxxCtor() { + return data()->flags & RW_HAS_CXX_CTOR; + } + void setHasCxxCtor() { + data()->setFlags(RW_HAS_CXX_CTOR); + } +#endif + +#if FAST_HAS_CXX_DTOR + bool hasCxxDtor() { + return getBit(FAST_HAS_CXX_DTOR); + } + void setHasCxxDtor() { + setBits(FAST_HAS_CXX_DTOR); + } +#else + bool hasCxxDtor() { + return data()->flags & RW_HAS_CXX_DTOR; + } + void setHasCxxDtor() { + data()->setFlags(RW_HAS_CXX_DTOR); + } +#endif + +#if FAST_REQUIRES_RAW_ISA + bool instancesRequireRawIsa() { + return getBit(FAST_REQUIRES_RAW_ISA); + } + void setInstancesRequireRawIsa() { + setBits(FAST_REQUIRES_RAW_ISA); + } +#elif SUPPORT_NONPOINTER_ISA + bool instancesRequireRawIsa() { + return data()->flags & RW_REQUIRES_RAW_ISA; + } + void setInstancesRequireRawIsa() { + data()->setFlags(RW_REQUIRES_RAW_ISA); + } +#else + bool instancesRequireRawIsa() { + return true; + } + void setInstancesRequireRawIsa() { + // nothing + } +#endif + +#if FAST_ALLOC + size_t fastInstanceSize() + { + assert(bits & FAST_ALLOC); + return (bits >> FAST_SHIFTED_SIZE_SHIFT) * 16; + } + void setFastInstanceSize(size_t newSize) + { + // Set during realization or construction only. No locking needed. + assert(data()->flags & RW_REALIZING); + + // Round up to 16-byte boundary, then divide to get 16-byte units + newSize = ((newSize + 15) & ~15) / 16; + + uintptr_t newBits = newSize << FAST_SHIFTED_SIZE_SHIFT; + if ((newBits >> FAST_SHIFTED_SIZE_SHIFT) == newSize) { + int shift = WORD_BITS - FAST_SHIFTED_SIZE_SHIFT; + uintptr_t oldBits = (bits << shift) >> shift; + if ((oldBits & FAST_ALLOC_MASK) == FAST_ALLOC_VALUE) { + newBits |= FAST_ALLOC; + } + bits = oldBits | newBits; + } + } + + bool canAllocFast() { + return bits & FAST_ALLOC; + } +#else + size_t fastInstanceSize() { + abort(); + } + void setFastInstanceSize(size_t) { + // nothing + } + bool canAllocFast() { + return false; + } +#endif + + void setClassArrayIndex(unsigned Idx) { +#if SUPPORT_INDEXED_ISA + // 0 is unused as then we can rely on zero-initialisation from calloc. + assert(Idx > 0); + data()->index = Idx; +#endif + } + + unsigned classArrayIndex() { +#if SUPPORT_INDEXED_ISA + return data()->index; +#else + return 0; +#endif + } + + bool isSwift() { + return getBit(FAST_IS_SWIFT); + } + + void setIsSwift() { + setBits(FAST_IS_SWIFT); + } +}; + + +struct objc_class : objc_object { + // Class ISA; + Class superclass; + cache_t cache; // formerly cache pointer and vtable + class_data_bits_t bits; // class_rw_t * plus custom rr/alloc flags + + class_rw_t *data() { + return bits.data(); + } + void setData(class_rw_t *newData) { + bits.setData(newData); + } + + void setInfo(uint32_t set) { + assert(isFuture() || isRealized()); + data()->setFlags(set); + } + + void clearInfo(uint32_t clear) { + assert(isFuture() || isRealized()); + data()->clearFlags(clear); + } + + // set and clear must not overlap + void changeInfo(uint32_t set, uint32_t clear) { + assert(isFuture() || isRealized()); + assert((set & clear) == 0); + data()->changeFlags(set, clear); + } + + bool hasCustomRR() { + return ! bits.hasDefaultRR(); + } + void setHasDefaultRR() { + assert(isInitializing()); + bits.setHasDefaultRR(); + } + void setHasCustomRR(bool inherited = false); + void printCustomRR(bool inherited); + + bool hasCustomAWZ() { + return ! bits.hasDefaultAWZ(); + } + void setHasDefaultAWZ() { + assert(isInitializing()); + bits.setHasDefaultAWZ(); + } + void setHasCustomAWZ(bool inherited = false); + void printCustomAWZ(bool inherited); + + bool instancesRequireRawIsa() { + return bits.instancesRequireRawIsa(); + } + void setInstancesRequireRawIsa(bool inherited = false); + void printInstancesRequireRawIsa(bool inherited); + + bool canAllocNonpointer() { + assert(!isFuture()); + return !instancesRequireRawIsa(); + } + bool canAllocFast() { + assert(!isFuture()); + return bits.canAllocFast(); + } + + + bool hasCxxCtor() { + // addSubclass() propagates this flag from the superclass. + assert(isRealized()); + return bits.hasCxxCtor(); + } + void setHasCxxCtor() { + bits.setHasCxxCtor(); + } + + bool hasCxxDtor() { + // addSubclass() propagates this flag from the superclass. + assert(isRealized()); + return bits.hasCxxDtor(); + } + void setHasCxxDtor() { + bits.setHasCxxDtor(); + } + + + bool isSwift() { + return bits.isSwift(); + } + + + // Return YES if the class's ivars are managed by ARC, + // or the class is MRC but has ARC-style weak ivars. + bool hasAutomaticIvars() { + return data()->ro->flags & (RO_IS_ARC | RO_HAS_WEAK_WITHOUT_ARC); + } + + // Return YES if the class's ivars are managed by ARC. + bool isARC() { + return data()->ro->flags & RO_IS_ARC; + } + + +#if SUPPORT_NONPOINTER_ISA + // Tracked in non-pointer isas; not tracked otherwise +#else + bool instancesHaveAssociatedObjects() { + // this may be an unrealized future class in the CF-bridged case + assert(isFuture() || isRealized()); + return data()->flags & RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS; + } + + void setInstancesHaveAssociatedObjects() { + // this may be an unrealized future class in the CF-bridged case + assert(isFuture() || isRealized()); + setInfo(RW_INSTANCES_HAVE_ASSOCIATED_OBJECTS); + } +#endif + + bool shouldGrowCache() { + return true; + } + + void setShouldGrowCache(bool) { + // fixme good or bad for memory use? + } + + bool isInitializing() { + return getMeta()->data()->flags & RW_INITIALIZING; + } + + void setInitializing() { + assert(!isMetaClass()); + ISA()->setInfo(RW_INITIALIZING); + } + + bool isInitialized() { + return getMeta()->data()->flags & RW_INITIALIZED; + } + + void setInitialized(); + + bool isLoadable() { + assert(isRealized()); + return true; // any class registered for +load is definitely loadable + } + + IMP getLoadMethod(); + + // Locking: To prevent concurrent realization, hold runtimeLock. + bool isRealized() { + return data()->flags & RW_REALIZED; + } + + // Returns true if this is an unrealized future class. + // Locking: To prevent concurrent realization, hold runtimeLock. + bool isFuture() { + return data()->flags & RW_FUTURE; + } + + bool isMetaClass() { + assert(this); + assert(isRealized()); + return data()->ro->flags & RO_META; + } + + // NOT identical to this->ISA when this is a metaclass + Class getMeta() { + if (isMetaClass()) return (Class)this; + else return this->ISA(); + } + + bool isRootClass() { + return superclass == nil; + } + bool isRootMetaclass() { + return ISA() == (Class)this; + } + + const char *mangledName() { + // fixme can't assert locks here + assert(this); + + if (isRealized() || isFuture()) { + return data()->ro->name; + } else { + return ((const class_ro_t *)data())->name; + } + } + + const char *demangledName(bool realize = false); + const char *nameForLogging(); + + // May be unaligned depending on class's ivars. + uint32_t unalignedInstanceStart() { + assert(isRealized()); + return data()->ro->instanceStart; + } + + // Class's instance start rounded up to a pointer-size boundary. + // This is used for ARC layout bitmaps. + uint32_t alignedInstanceStart() { + return word_align(unalignedInstanceStart()); + } + + // May be unaligned depending on class's ivars. + uint32_t unalignedInstanceSize() { + assert(isRealized()); + return data()->ro->instanceSize; + } + + // Class's ivar size rounded up to a pointer-size boundary. + uint32_t alignedInstanceSize() { + return word_align(unalignedInstanceSize()); + } + + size_t instanceSize(size_t extraBytes) { + size_t size = alignedInstanceSize() + extraBytes; + // CF requires all objects be at least 16 bytes. + if (size < 16) size = 16; + return size; + } + + void setInstanceSize(uint32_t newSize) { + assert(isRealized()); + if (newSize != data()->ro->instanceSize) { + assert(data()->flags & RW_COPIED_RO); + *const_cast(&data()->ro->instanceSize) = newSize; + } + bits.setFastInstanceSize(newSize); + } + + void chooseClassArrayIndex(); + + void setClassArrayIndex(unsigned Idx) { + bits.setClassArrayIndex(Idx); + } + + unsigned classArrayIndex() { + return bits.classArrayIndex(); + } + +}; + + +struct swift_class_t : objc_class { + uint32_t flags; + uint32_t instanceAddressOffset; + uint32_t instanceSize; + uint16_t instanceAlignMask; + uint16_t reserved; + + uint32_t classSize; + uint32_t classAddressOffset; + void *description; + // ... + + void *baseAddress() { + return (void *)((uint8_t *)this - classAddressOffset); + } +}; + + +struct category_t { + const char *name; + classref_t cls; + struct method_list_t *instanceMethods; + struct method_list_t *classMethods; + struct protocol_list_t *protocols; + struct property_list_t *instanceProperties; + // Fields below this point are not always present on disk. + struct property_list_t *_classProperties; + + method_list_t *methodsForMeta(bool isMeta) { + if (isMeta) return classMethods; + else return instanceMethods; + } + + property_list_t *propertiesForMeta(bool isMeta, struct header_info *hi); +}; + +struct objc_super2 { + id receiver; + Class current_class; +}; + +struct message_ref_t { + IMP imp; + SEL sel; +}; + + +extern Method protocol_getMethod(protocol_t *p, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive); + +static inline void +foreach_realized_class_and_subclass_2(Class top, bool (^code)(Class)) +{ + // runtimeLock.assertWriting(); + assert(top); + Class cls = top; + while (1) { + if (!code(cls)) break; + + if (cls->data()->firstSubclass) { + cls = cls->data()->firstSubclass; + } else { + while (!cls->data()->nextSiblingClass && cls != top) { + cls = cls->superclass; + } + if (cls == top) break; + cls = cls->data()->nextSiblingClass; + } + } +} + +// Enumerates a class and all of its realized subclasses. +static inline void +foreach_realized_class_and_subclass(Class top, void (^code)(Class)) +{ + foreach_realized_class_and_subclass_2(top, ^bool(Class cls) { + code(cls); return true; + }); +} + +// Enumerates all realized classes and metaclasses. +extern Class firstRealizedClass(); +static inline void +foreach_realized_class_and_metaclass(void (^code)(Class)) +{ + for (Class top = firstRealizedClass(); + top != nil; + top = top->data()->nextSiblingClass) + { + foreach_realized_class_and_subclass_2(top, ^bool(Class cls) { + code(cls); return true; + }); + } + +} + +#endif diff --git a/runtime/objc-runtime-new.mm b/runtime/objc-runtime-new.mm new file mode 100644 index 0000000..b366c19 --- /dev/null +++ b/runtime/objc-runtime-new.mm @@ -0,0 +1,6573 @@ +/* + * Copyright (c) 2005-2009 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-runtime-new.m +* Support for new-ABI classes and images. +**********************************************************************/ + +#if __OBJC2__ + +#include "objc-private.h" +#include "objc-runtime-new.h" +#include "objc-file.h" +#include "objc-cache.h" +#include +#include +#include + +#define newprotocol(p) ((protocol_t *)p) + +static void disableTaggedPointers(); +static void detach_class(Class cls, bool isMeta); +static void free_class(Class cls); +static Class setSuperclass(Class cls, Class newSuper); +static Class realizeClass(Class cls); +static method_t *getMethodNoSuper_nolock(Class cls, SEL sel); +static method_t *getMethod_nolock(Class cls, SEL sel); +static IMP addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace); +static bool isRRSelector(SEL sel); +static bool isAWZSelector(SEL sel); +static bool methodListImplementsRR(const method_list_t *mlist); +static bool methodListImplementsAWZ(const method_list_t *mlist); +static void updateCustomRR_AWZ(Class cls, method_t *meth); +static method_t *search_method_list(const method_list_t *mlist, SEL sel); +static void flushCaches(Class cls); +#if SUPPORT_FIXUP +static void fixupMessageRef(message_ref_t *msg); +#endif + +static bool MetaclassNSObjectAWZSwizzled; +static bool ClassNSObjectRRSwizzled; + +id objc_noop_imp(id self, SEL _cmd __unused) { + return self; +} + + +/*********************************************************************** +* Lock management +**********************************************************************/ +rwlock_t runtimeLock; +rwlock_t selLock; +mutex_t cacheUpdateLock; +recursive_mutex_t loadMethodLock; + +#if SUPPORT_QOS_HACK +pthread_priority_t BackgroundPriority = 0; +pthread_priority_t MainPriority = 0; +# if DEBUG +static __unused void destroyQOSKey(void *arg) { + _objc_fatal("QoS override level at thread exit is %zu instead of zero", + (size_t)(uintptr_t)arg); +} +# endif +#endif + +void lock_init(void) +{ +#if SUPPORT_QOS_HACK + BackgroundPriority = _pthread_qos_class_encode(QOS_CLASS_BACKGROUND, 0, 0); + MainPriority = _pthread_qos_class_encode(qos_class_main(), 0, 0); +# if DEBUG + pthread_key_init_np(QOS_KEY, &destroyQOSKey); +# endif +#endif +} + + +/*********************************************************************** +* Non-pointer isa decoding +**********************************************************************/ +#if SUPPORT_INDEXED_ISA + +// Indexed non-pointer isa. + +// These are used to mask the ISA and see if its got an index or not. +const uintptr_t objc_debug_indexed_isa_magic_mask = ISA_INDEX_MAGIC_MASK; +const uintptr_t objc_debug_indexed_isa_magic_value = ISA_INDEX_MAGIC_VALUE; + +// die if masks overlap +STATIC_ASSERT((ISA_INDEX_MASK & ISA_INDEX_MAGIC_MASK) == 0); + +// die if magic is wrong +STATIC_ASSERT((~ISA_INDEX_MAGIC_MASK & ISA_INDEX_MAGIC_VALUE) == 0); + +// Then these are used to extract the index from the ISA. +const uintptr_t objc_debug_indexed_isa_index_mask = ISA_INDEX_MASK; +const uintptr_t objc_debug_indexed_isa_index_shift = ISA_INDEX_SHIFT; + +asm("\n .globl _objc_absolute_indexed_isa_magic_mask" \ + "\n _objc_absolute_indexed_isa_magic_mask = " STRINGIFY2(ISA_INDEX_MAGIC_MASK)); +asm("\n .globl _objc_absolute_indexed_isa_magic_value" \ + "\n _objc_absolute_indexed_isa_magic_value = " STRINGIFY2(ISA_INDEX_MAGIC_VALUE)); +asm("\n .globl _objc_absolute_indexed_isa_index_mask" \ + "\n _objc_absolute_indexed_isa_index_mask = " STRINGIFY2(ISA_INDEX_MASK)); +asm("\n .globl _objc_absolute_indexed_isa_index_shift" \ + "\n _objc_absolute_indexed_isa_index_shift = " STRINGIFY2(ISA_INDEX_SHIFT)); + + +// And then we can use that index to get the class from this array. Note +// the size is provided so that clients can ensure the index they get is in +// bounds and not read off the end of the array. +// Defined in the objc-msg-*.s files +// const Class objc_indexed_classes[] + +// When we don't have enough bits to store a class*, we can instead store an +// index in to this array. Classes are added here when they are realized. +// Note, an index of 0 is illegal. +uintptr_t objc_indexed_classes_count = 0; + +// SUPPORT_INDEXED_ISA +#else +// not SUPPORT_INDEXED_ISA + +// These variables exist but are all set to 0 so that they are ignored. +const uintptr_t objc_debug_indexed_isa_magic_mask = 0; +const uintptr_t objc_debug_indexed_isa_magic_value = 0; +const uintptr_t objc_debug_indexed_isa_index_mask = 0; +const uintptr_t objc_debug_indexed_isa_index_shift = 0; +Class objc_indexed_classes[1] = { nil }; +uintptr_t objc_indexed_classes_count = 0; + +// not SUPPORT_INDEXED_ISA +#endif + + +#if SUPPORT_PACKED_ISA + +// Packed non-pointer isa. + +asm("\n .globl _objc_absolute_packed_isa_class_mask" \ + "\n _objc_absolute_packed_isa_class_mask = " STRINGIFY2(ISA_MASK)); + +const uintptr_t objc_debug_isa_class_mask = ISA_MASK; +const uintptr_t objc_debug_isa_magic_mask = ISA_MAGIC_MASK; +const uintptr_t objc_debug_isa_magic_value = ISA_MAGIC_VALUE; + +// die if masks overlap +STATIC_ASSERT((ISA_MASK & ISA_MAGIC_MASK) == 0); + +// die if magic is wrong +STATIC_ASSERT((~ISA_MAGIC_MASK & ISA_MAGIC_VALUE) == 0); + +// die if virtual address space bound goes up +STATIC_ASSERT((~ISA_MASK & MACH_VM_MAX_ADDRESS) == 0 || + ISA_MASK + sizeof(void*) == MACH_VM_MAX_ADDRESS); + +// SUPPORT_PACKED_ISA +#else +// not SUPPORT_PACKED_ISA + +// These variables exist but enforce pointer alignment only. +const uintptr_t objc_debug_isa_class_mask = (~WORD_MASK); +const uintptr_t objc_debug_isa_magic_mask = WORD_MASK; +const uintptr_t objc_debug_isa_magic_value = 0; + +// not SUPPORT_PACKED_ISA +#endif + + +typedef locstamped_category_list_t category_list; + + +/* + Low two bits of mlist->entsize is used as the fixed-up marker. + PREOPTIMIZED VERSION: + Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted). + (Protocol method lists are not sorted because of their extra parallel data) + Runtime fixed-up method lists get 3. + UN-PREOPTIMIZED VERSION: + Method lists from shared cache are 1 (uniqued) or 3 (uniqued and sorted) + Shared cache's sorting and uniquing are not trusted, but do affect the + location of the selector name string. + Runtime fixed-up method lists get 2. + + High two bits of protocol->flags is used as the fixed-up marker. + PREOPTIMIZED VERSION: + Protocols from shared cache are 1<<30. + Runtime fixed-up protocols get 1<<30. + UN-PREOPTIMIZED VERSION: + Protocols from shared cache are 1<<30. + Shared cache's fixups are not trusted. + Runtime fixed-up protocols get 3<<30. +*/ + +static uint32_t fixed_up_method_list = 3; +static uint32_t fixed_up_protocol = PROTOCOL_FIXED_UP_1; + +void +disableSharedCacheOptimizations(void) +{ + fixed_up_method_list = 2; + fixed_up_protocol = PROTOCOL_FIXED_UP_1 | PROTOCOL_FIXED_UP_2; +} + +bool method_list_t::isFixedUp() const { + return flags() == fixed_up_method_list; +} + +void method_list_t::setFixedUp() { + runtimeLock.assertWriting(); + assert(!isFixedUp()); + entsizeAndFlags = entsize() | fixed_up_method_list; +} + +bool protocol_t::isFixedUp() const { + return (flags & PROTOCOL_FIXED_UP_MASK) == fixed_up_protocol; +} + +void protocol_t::setFixedUp() { + runtimeLock.assertWriting(); + assert(!isFixedUp()); + flags = (flags & ~PROTOCOL_FIXED_UP_MASK) | fixed_up_protocol; +} + + +method_list_t **method_array_t::endCategoryMethodLists(Class cls) +{ + method_list_t **mlists = beginLists(); + method_list_t **mlistsEnd = endLists(); + + if (mlists == mlistsEnd || !cls->data()->ro->baseMethods()) + { + // No methods, or no base methods. + // Everything here is a category method. + return mlistsEnd; + } + + // Have base methods. Category methods are + // everything except the last method list. + return mlistsEnd - 1; +} + +static const char *sel_cname(SEL sel) +{ + return (const char *)(void *)sel; +} + + +static size_t protocol_list_size(const protocol_list_t *plist) +{ + return sizeof(protocol_list_t) + plist->count * sizeof(protocol_t *); +} + + +static void try_free(const void *p) +{ + if (p && malloc_size(p)) free((void *)p); +} + + +static Class +alloc_class_for_subclass(Class supercls, size_t extraBytes) +{ + if (!supercls || !supercls->isSwift()) { + return _calloc_class(sizeof(objc_class) + extraBytes); + } + + // Superclass is a Swift class. New subclass must duplicate its extra bits. + + // Allocate the new class, with space for super's prefix and suffix + // and self's extraBytes. + swift_class_t *swiftSupercls = (swift_class_t *)supercls; + size_t superSize = swiftSupercls->classSize; + void *superBits = swiftSupercls->baseAddress(); + void *bits = malloc(superSize + extraBytes); + + // Copy all of the superclass's data to the new class. + memcpy(bits, superBits, superSize); + + // Erase the objc data and the Swift description in the new class. + swift_class_t *swcls = (swift_class_t *) + ((uint8_t *)bits + swiftSupercls->classAddressOffset); + bzero(swcls, sizeof(objc_class)); + swcls->description = nil; + + // Mark this class as Swift-enhanced. + swcls->bits.setIsSwift(); + + return (Class)swcls; +} + + +/*********************************************************************** +* object_getIndexedIvars. +**********************************************************************/ +void *object_getIndexedIvars(id obj) +{ + uint8_t *base = (uint8_t *)obj; + + if (!obj) return nil; + if (obj->isTaggedPointer()) return nil; + + if (!obj->isClass()) return base + obj->ISA()->alignedInstanceSize(); + + Class cls = (Class)obj; + if (!cls->isSwift()) return base + sizeof(objc_class); + + swift_class_t *swcls = (swift_class_t *)cls; + return base - swcls->classAddressOffset + word_align(swcls->classSize); +} + + +/*********************************************************************** +* make_ro_writeable +* Reallocates rw->ro if necessary to make it writeable. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static class_ro_t *make_ro_writeable(class_rw_t *rw) +{ + runtimeLock.assertWriting(); + + if (rw->flags & RW_COPIED_RO) { + // already writeable, do nothing + } else { + class_ro_t *ro = (class_ro_t *) + memdup(rw->ro, sizeof(*rw->ro)); + rw->ro = ro; + rw->flags |= RW_COPIED_RO; + } + return (class_ro_t *)rw->ro; +} + + +/*********************************************************************** +* unattachedCategories +* Returns the class => categories map of unattached categories. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static NXMapTable *unattachedCategories(void) +{ + runtimeLock.assertWriting(); + + static NXMapTable *category_map = nil; + + if (category_map) return category_map; + + // fixme initial map size + category_map = NXCreateMapTable(NXPtrValueMapPrototype, 16); + + return category_map; +} + + +/*********************************************************************** +* addUnattachedCategoryForClass +* Records an unattached category. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void addUnattachedCategoryForClass(category_t *cat, Class cls, + header_info *catHeader) +{ + runtimeLock.assertWriting(); + + // DO NOT use cat->cls! cls may be cat->cls->isa instead + NXMapTable *cats = unattachedCategories(); + category_list *list; + + list = (category_list *)NXMapGet(cats, cls); + if (!list) { + list = (category_list *) + calloc(sizeof(*list) + sizeof(list->list[0]), 1); + } else { + list = (category_list *) + realloc(list, sizeof(*list) + sizeof(list->list[0]) * (list->count + 1)); + } + list->list[list->count++] = (locstamped_category_t){cat, catHeader}; + NXMapInsert(cats, cls, list); +} + + +/*********************************************************************** +* removeUnattachedCategoryForClass +* Removes an unattached category. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void removeUnattachedCategoryForClass(category_t *cat, Class cls) +{ + runtimeLock.assertWriting(); + + // DO NOT use cat->cls! cls may be cat->cls->isa instead + NXMapTable *cats = unattachedCategories(); + category_list *list; + + list = (category_list *)NXMapGet(cats, cls); + if (!list) return; + + uint32_t i; + for (i = 0; i < list->count; i++) { + if (list->list[i].cat == cat) { + // shift entries to preserve list order + memmove(&list->list[i], &list->list[i+1], + (list->count-i-1) * sizeof(list->list[i])); + list->count--; + return; + } + } +} + + +/*********************************************************************** +* unattachedCategoriesForClass +* Returns the list of unattached categories for a class, and +* deletes them from the list. +* The result must be freed by the caller. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static category_list * +unattachedCategoriesForClass(Class cls, bool realizing) +{ + runtimeLock.assertWriting(); + return (category_list *)NXMapRemove(unattachedCategories(), cls); +} + + +/*********************************************************************** +* removeAllUnattachedCategoriesForClass +* Deletes all unattached categories (loaded or not) for a class. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void removeAllUnattachedCategoriesForClass(Class cls) +{ + runtimeLock.assertWriting(); + + void *list = NXMapRemove(unattachedCategories(), cls); + if (list) free(list); +} + + +/*********************************************************************** +* classNSObject +* Returns class NSObject. +* Locking: none +**********************************************************************/ +static Class classNSObject(void) +{ + extern objc_class OBJC_CLASS_$_NSObject; + return (Class)&OBJC_CLASS_$_NSObject; +} + + +/*********************************************************************** +* printReplacements +* Implementation of PrintReplacedMethods / OBJC_PRINT_REPLACED_METHODS. +* Warn about methods from cats that override other methods in cats or cls. +* Assumes no methods from cats have been added to cls yet. +**********************************************************************/ +static void printReplacements(Class cls, category_list *cats) +{ + uint32_t c; + bool isMeta = cls->isMetaClass(); + + if (!cats) return; + + // Newest categories are LAST in cats + // Later categories override earlier ones. + for (c = 0; c < cats->count; c++) { + category_t *cat = cats->list[c].cat; + + method_list_t *mlist = cat->methodsForMeta(isMeta); + if (!mlist) continue; + + for (const auto& meth : *mlist) { + SEL s = sel_registerName(sel_cname(meth.name)); + + // Search for replaced methods in method lookup order. + // Complain about the first duplicate only. + + // Look for method in earlier categories + for (uint32_t c2 = 0; c2 < c; c2++) { + category_t *cat2 = cats->list[c2].cat; + + const method_list_t *mlist2 = cat2->methodsForMeta(isMeta); + if (!mlist2) continue; + + for (const auto& meth2 : *mlist2) { + SEL s2 = sel_registerName(sel_cname(meth2.name)); + if (s == s2) { + logReplacedMethod(cls->nameForLogging(), s, + cls->isMetaClass(), cat->name, + meth2.imp, meth.imp); + goto complained; + } + } + } + + // Look for method in cls + for (const auto& meth2 : cls->data()->methods) { + SEL s2 = sel_registerName(sel_cname(meth2.name)); + if (s == s2) { + logReplacedMethod(cls->nameForLogging(), s, + cls->isMetaClass(), cat->name, + meth2.imp, meth.imp); + goto complained; + } + } + + complained: + ; + } + } +} + + +static bool isBundleClass(Class cls) +{ + return cls->data()->ro->flags & RO_FROM_BUNDLE; +} + + +static void +fixupMethodList(method_list_t *mlist, bool bundleCopy, bool sort) +{ + runtimeLock.assertWriting(); + assert(!mlist->isFixedUp()); + + // fixme lock less in attachMethodLists ? + sel_lock(); + + // Unique selectors in list. + for (auto& meth : *mlist) { + const char *name = sel_cname(meth.name); + meth.name = sel_registerNameNoLock(name, bundleCopy); + } + + sel_unlock(); + + // Sort by selector address. + if (sort) { + method_t::SortBySELAddress sorter; + std::stable_sort(mlist->begin(), mlist->end(), sorter); + } + + // Mark method list as uniqued and sorted + mlist->setFixedUp(); +} + + +static void +prepareMethodLists(Class cls, method_list_t **addedLists, int addedCount, + bool baseMethods, bool methodsFromBundle) +{ + runtimeLock.assertWriting(); + + if (addedCount == 0) return; + + // Don't scan redundantly + bool scanForCustomRR = !cls->hasCustomRR(); + bool scanForCustomAWZ = !cls->hasCustomAWZ(); + + // There exist RR/AWZ special cases for some class's base methods. + // But this code should never need to scan base methods for RR/AWZ: + // default RR/AWZ cannot be set before setInitialized(). + // Therefore we need not handle any special cases here. + if (baseMethods) { + assert(!scanForCustomRR && !scanForCustomAWZ); + } + + // Add method lists to array. + // Reallocate un-fixed method lists. + // The new methods are PREPENDED to the method list array. + + for (int i = 0; i < addedCount; i++) { + method_list_t *mlist = addedLists[i]; + assert(mlist); + + // Fixup selectors if necessary + if (!mlist->isFixedUp()) { + fixupMethodList(mlist, methodsFromBundle, true/*sort*/); + } + + // Scan for method implementations tracked by the class's flags + if (scanForCustomRR && methodListImplementsRR(mlist)) { + cls->setHasCustomRR(); + scanForCustomRR = false; + } + if (scanForCustomAWZ && methodListImplementsAWZ(mlist)) { + cls->setHasCustomAWZ(); + scanForCustomAWZ = false; + } + } +} + + +// Attach method lists and properties and protocols from categories to a class. +// Assumes the categories in cats are all loaded and sorted by load order, +// oldest categories first. +static void +attachCategories(Class cls, category_list *cats, bool flush_caches) +{ + if (!cats) return; + if (PrintReplacedMethods) printReplacements(cls, cats); + + bool isMeta = cls->isMetaClass(); + + // fixme rearrange to remove these intermediate allocations + method_list_t **mlists = (method_list_t **) + malloc(cats->count * sizeof(*mlists)); + property_list_t **proplists = (property_list_t **) + malloc(cats->count * sizeof(*proplists)); + protocol_list_t **protolists = (protocol_list_t **) + malloc(cats->count * sizeof(*protolists)); + + // Count backwards through cats to get newest categories first + int mcount = 0; + int propcount = 0; + int protocount = 0; + int i = cats->count; + bool fromBundle = NO; + while (i--) { + auto& entry = cats->list[i]; + + method_list_t *mlist = entry.cat->methodsForMeta(isMeta); + if (mlist) { + mlists[mcount++] = mlist; + fromBundle |= entry.hi->isBundle(); + } + + property_list_t *proplist = + entry.cat->propertiesForMeta(isMeta, entry.hi); + if (proplist) { + proplists[propcount++] = proplist; + } + + protocol_list_t *protolist = entry.cat->protocols; + if (protolist) { + protolists[protocount++] = protolist; + } + } + + auto rw = cls->data(); + + prepareMethodLists(cls, mlists, mcount, NO, fromBundle); + rw->methods.attachLists(mlists, mcount); + free(mlists); + if (flush_caches && mcount > 0) flushCaches(cls); + + rw->properties.attachLists(proplists, propcount); + free(proplists); + + rw->protocols.attachLists(protolists, protocount); + free(protolists); +} + + +/*********************************************************************** +* methodizeClass +* Fixes up cls's method list, protocol list, and property list. +* Attaches any outstanding categories. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void methodizeClass(Class cls) +{ + runtimeLock.assertWriting(); + + bool isMeta = cls->isMetaClass(); + auto rw = cls->data(); + auto ro = rw->ro; + + // Methodizing for the first time + if (PrintConnecting) { + _objc_inform("CLASS: methodizing class '%s' %s", + cls->nameForLogging(), isMeta ? "(meta)" : ""); + } + + // Install methods and properties that the class implements itself. + method_list_t *list = ro->baseMethods(); + if (list) { + prepareMethodLists(cls, &list, 1, YES, isBundleClass(cls)); + rw->methods.attachLists(&list, 1); + } + + property_list_t *proplist = ro->baseProperties; + if (proplist) { + rw->properties.attachLists(&proplist, 1); + } + + protocol_list_t *protolist = ro->baseProtocols; + if (protolist) { + rw->protocols.attachLists(&protolist, 1); + } + + // Root classes get bonus method implementations if they don't have + // them already. These apply before category replacements. + if (cls->isRootMetaclass()) { + // root metaclass + addMethod(cls, SEL_initialize, (IMP)&objc_noop_imp, "", NO); + } + + // Attach categories. + category_list *cats = unattachedCategoriesForClass(cls, true /*realizing*/); + attachCategories(cls, cats, false /*don't flush caches*/); + + if (PrintConnecting) { + if (cats) { + for (uint32_t i = 0; i < cats->count; i++) { + _objc_inform("CLASS: attached category %c%s(%s)", + isMeta ? '+' : '-', + cls->nameForLogging(), cats->list[i].cat->name); + } + } + } + + if (cats) free(cats); + +#if DEBUG + // Debug: sanity-check all SELs; log method list contents + for (const auto& meth : rw->methods) { + if (PrintConnecting) { + _objc_inform("METHOD %c[%s %s]", isMeta ? '+' : '-', + cls->nameForLogging(), sel_getName(meth.name)); + } + assert(sel_registerName(sel_getName(meth.name)) == meth.name); + } +#endif +} + + +/*********************************************************************** +* remethodizeClass +* Attach outstanding categories to an existing class. +* Fixes up cls's method list, protocol list, and property list. +* Updates method caches for cls and its subclasses. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void remethodizeClass(Class cls) +{ + category_list *cats; + bool isMeta; + + runtimeLock.assertWriting(); + + isMeta = cls->isMetaClass(); + + // Re-methodizing: check for more categories + if ((cats = unattachedCategoriesForClass(cls, false/*not realizing*/))) { + if (PrintConnecting) { + _objc_inform("CLASS: attaching categories to class '%s' %s", + cls->nameForLogging(), isMeta ? "(meta)" : ""); + } + + attachCategories(cls, cats, true /*flush caches*/); + free(cats); + } +} + + +/*********************************************************************** +* nonMetaClasses +* Returns the secondary metaclass => class map +* Used for some cases of +initialize and +resolveClassMethod:. +* This map does not contain all class and metaclass pairs. It only +* contains metaclasses whose classes would be in the runtime-allocated +* named-class table, but are not because some other class with the same name +* is in that table. +* Classes with no duplicates are not included. +* Classes in the preoptimized named-class table are not included. +* Classes whose duplicates are in the preoptimized table are not included. +* Most code should use getNonMetaClass() instead of reading this table. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static NXMapTable *nonmeta_class_map = nil; +static NXMapTable *nonMetaClasses(void) +{ + runtimeLock.assertLocked(); + + if (nonmeta_class_map) return nonmeta_class_map; + + // nonmeta_class_map is typically small + INIT_ONCE_PTR(nonmeta_class_map, + NXCreateMapTable(NXPtrValueMapPrototype, 32), + NXFreeMapTable(v)); + + return nonmeta_class_map; +} + + +/*********************************************************************** +* addNonMetaClass +* Adds metacls => cls to the secondary metaclass map +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void addNonMetaClass(Class cls) +{ + runtimeLock.assertWriting(); + void *old; + old = NXMapInsert(nonMetaClasses(), cls->ISA(), cls); + + assert(!cls->isMetaClass()); + assert(cls->ISA()->isMetaClass()); + assert(!old); +} + + +static void removeNonMetaClass(Class cls) +{ + runtimeLock.assertWriting(); + NXMapRemove(nonMetaClasses(), cls->ISA()); +} + + +static bool scanMangledField(const char *&string, const char *end, + const char *&field, int& length) +{ + // Leading zero not allowed. + if (*string == '0') return false; + + length = 0; + field = string; + while (field < end) { + char c = *field; + if (!isdigit(c)) break; + field++; + if (__builtin_smul_overflow(length, 10, &length)) return false; + if (__builtin_sadd_overflow(length, c - '0', &length)) return false; + } + + string = field + length; + return length > 0 && string <= end; +} + + +/*********************************************************************** +* copySwiftV1DemangledName +* Returns the pretty form of the given Swift-v1-mangled class or protocol name. +* Returns nil if the string doesn't look like a mangled Swift v1 name. +* The result must be freed with free(). +**********************************************************************/ +static char *copySwiftV1DemangledName(const char *string, bool isProtocol = false) +{ + if (!string) return nil; + + // Swift mangling prefix. + if (strncmp(string, isProtocol ? "_TtP" : "_TtC", 4) != 0) return nil; + string += 4; + + const char *end = string + strlen(string); + + // Module name. + const char *prefix; + int prefixLength; + if (string[0] == 's') { + // "s" is the Swift module. + prefix = "Swift"; + prefixLength = 5; + string += 1; + } else { + if (! scanMangledField(string, end, prefix, prefixLength)) return nil; + } + + // Class or protocol name. + const char *suffix; + int suffixLength; + if (! scanMangledField(string, end, suffix, suffixLength)) return nil; + + if (isProtocol) { + // Remainder must be "_". + if (strcmp(string, "_") != 0) return nil; + } else { + // Remainder must be empty. + if (string != end) return nil; + } + + char *result; + asprintf(&result, "%.*s.%.*s", prefixLength,prefix, suffixLength,suffix); + return result; +} + + +/*********************************************************************** +* copySwiftV1MangledName +* Returns the Swift 1.0 mangled form of the given class or protocol name. +* Returns nil if the string doesn't look like an unmangled Swift name. +* The result must be freed with free(). +**********************************************************************/ +static char *copySwiftV1MangledName(const char *string, bool isProtocol = false) +{ + if (!string) return nil; + + size_t dotCount = 0; + size_t dotIndex; + const char *s; + for (s = string; *s; s++) { + if (*s == '.') { + dotCount++; + dotIndex = s - string; + } + } + size_t stringLength = s - string; + + if (dotCount != 1 || dotIndex == 0 || dotIndex >= stringLength-1) { + return nil; + } + + const char *prefix = string; + size_t prefixLength = dotIndex; + const char *suffix = string + dotIndex + 1; + size_t suffixLength = stringLength - (dotIndex + 1); + + char *name; + + if (prefixLength == 5 && memcmp(prefix, "Swift", 5) == 0) { + asprintf(&name, "_Tt%cs%zu%.*s%s", + isProtocol ? 'P' : 'C', + suffixLength, (int)suffixLength, suffix, + isProtocol ? "_" : ""); + } else { + asprintf(&name, "_Tt%c%zu%.*s%zu%.*s%s", + isProtocol ? 'P' : 'C', + prefixLength, (int)prefixLength, prefix, + suffixLength, (int)suffixLength, suffix, + isProtocol ? "_" : ""); + } + return name; +} + + +/*********************************************************************** +* getClass +* Looks up a class by name. The class MIGHT NOT be realized. +* Demangled Swift names are recognized. +* Locking: runtimeLock must be read- or write-locked by the caller. +**********************************************************************/ + +// This is a misnomer: gdb_objc_realized_classes is actually a list of +// named classes not in the dyld shared cache, whether realized or not. +NXMapTable *gdb_objc_realized_classes; // exported for debuggers in objc-gdb.h + +static Class getClass_impl(const char *name) +{ + runtimeLock.assertLocked(); + + // allocated in _read_images + assert(gdb_objc_realized_classes); + + // Try runtime-allocated table + Class result = (Class)NXMapGet(gdb_objc_realized_classes, name); + if (result) return result; + + // Try table from dyld shared cache + return getPreoptimizedClass(name); +} + +static Class getClass(const char *name) +{ + runtimeLock.assertLocked(); + + // Try name as-is + Class result = getClass_impl(name); + if (result) return result; + + // Try Swift-mangled equivalent of the given name. + if (char *swName = copySwiftV1MangledName(name)) { + result = getClass_impl(swName); + free(swName); + return result; + } + + return nil; +} + + +/*********************************************************************** +* addNamedClass +* Adds name => cls to the named non-meta class map. +* Warns about duplicate class names and keeps the old mapping. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void addNamedClass(Class cls, const char *name, Class replacing = nil) +{ + runtimeLock.assertWriting(); + Class old; + if ((old = getClass(name)) && old != replacing) { + inform_duplicate(name, old, cls); + + // getNonMetaClass uses name lookups. Classes not found by name + // lookup must be in the secondary meta->nonmeta table. + addNonMetaClass(cls); + } else { + NXMapInsert(gdb_objc_realized_classes, name, cls); + } + assert(!(cls->data()->flags & RO_META)); + + // wrong: constructed classes are already realized when they get here + // assert(!cls->isRealized()); +} + + +/*********************************************************************** +* removeNamedClass +* Removes cls from the name => cls map. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void removeNamedClass(Class cls, const char *name) +{ + runtimeLock.assertWriting(); + assert(!(cls->data()->flags & RO_META)); + if (cls == NXMapGet(gdb_objc_realized_classes, name)) { + NXMapRemove(gdb_objc_realized_classes, name); + } else { + // cls has a name collision with another class - don't remove the other + // but do remove cls from the secondary metaclass->class map. + removeNonMetaClass(cls); + } +} + + +/*********************************************************************** +* futureNamedClasses +* Returns the classname => future class map for unrealized future classes. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static NXMapTable *future_named_class_map = nil; +static NXMapTable *futureNamedClasses() +{ + runtimeLock.assertWriting(); + + if (future_named_class_map) return future_named_class_map; + + // future_named_class_map is big enough for CF's classes and a few others + future_named_class_map = + NXCreateMapTable(NXStrValueMapPrototype, 32); + + return future_named_class_map; +} + + +static bool haveFutureNamedClasses() { + return future_named_class_map && NXCountMapTable(future_named_class_map); +} + + +/*********************************************************************** +* addFutureNamedClass +* Installs cls as the class structure to use for the named class if it appears. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void addFutureNamedClass(const char *name, Class cls) +{ + void *old; + + runtimeLock.assertWriting(); + + if (PrintFuture) { + _objc_inform("FUTURE: reserving %p for %s", (void*)cls, name); + } + + class_rw_t *rw = (class_rw_t *)calloc(sizeof(class_rw_t), 1); + class_ro_t *ro = (class_ro_t *)calloc(sizeof(class_ro_t), 1); + ro->name = strdupIfMutable(name); + rw->ro = ro; + cls->setData(rw); + cls->data()->flags = RO_FUTURE; + + old = NXMapKeyCopyingInsert(futureNamedClasses(), name, cls); + assert(!old); +} + + +/*********************************************************************** +* popFutureNamedClass +* Removes the named class from the unrealized future class list, +* because it has been realized. +* Returns nil if the name is not used by a future class. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static Class popFutureNamedClass(const char *name) +{ + runtimeLock.assertWriting(); + + Class cls = nil; + + if (future_named_class_map) { + cls = (Class)NXMapKeyFreeingRemove(future_named_class_map, name); + if (cls && NXCountMapTable(future_named_class_map) == 0) { + NXFreeMapTable(future_named_class_map); + future_named_class_map = nil; + } + } + + return cls; +} + + +/*********************************************************************** +* remappedClasses +* Returns the oldClass => newClass map for realized future classes. +* Returns the oldClass => nil map for ignored weak-linked classes. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static NXMapTable *remappedClasses(bool create) +{ + static NXMapTable *remapped_class_map = nil; + + runtimeLock.assertLocked(); + + if (remapped_class_map) return remapped_class_map; + if (!create) return nil; + + // remapped_class_map is big enough to hold CF's classes and a few others + INIT_ONCE_PTR(remapped_class_map, + NXCreateMapTable(NXPtrValueMapPrototype, 32), + NXFreeMapTable(v)); + + return remapped_class_map; +} + + +/*********************************************************************** +* noClassesRemapped +* Returns YES if no classes have been remapped +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static bool noClassesRemapped(void) +{ + runtimeLock.assertLocked(); + + bool result = (remappedClasses(NO) == nil); +#if DEBUG + // Catch construction of an empty table, which defeats optimization. + NXMapTable *map = remappedClasses(NO); + if (map) assert(NXCountMapTable(map) > 0); +#endif + return result; +} + + +/*********************************************************************** +* addRemappedClass +* newcls is a realized future class, replacing oldcls. +* OR newcls is nil, replacing ignored weak-linked class oldcls. +* Locking: runtimeLock must be write-locked by the caller +**********************************************************************/ +static void addRemappedClass(Class oldcls, Class newcls) +{ + runtimeLock.assertWriting(); + + if (PrintFuture) { + _objc_inform("FUTURE: using %p instead of %p for %s", + (void*)newcls, (void*)oldcls, oldcls->nameForLogging()); + } + + void *old; + old = NXMapInsert(remappedClasses(YES), oldcls, newcls); + assert(!old); +} + + +/*********************************************************************** +* remapClass +* Returns the live class pointer for cls, which may be pointing to +* a class struct that has been reallocated. +* Returns nil if cls is ignored because of weak linking. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static Class remapClass(Class cls) +{ + runtimeLock.assertLocked(); + + Class c2; + + if (!cls) return nil; + + NXMapTable *map = remappedClasses(NO); + if (!map || NXMapMember(map, cls, (void**)&c2) == NX_MAPNOTAKEY) { + return cls; + } else { + return c2; + } +} + +static Class remapClass(classref_t cls) +{ + return remapClass((Class)cls); +} + +Class _class_remap(Class cls) +{ + rwlock_reader_t lock(runtimeLock); + return remapClass(cls); +} + +/*********************************************************************** +* remapClassRef +* Fix up a class ref, in case the class referenced has been reallocated +* or is an ignored weak-linked class. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static void remapClassRef(Class *clsref) +{ + runtimeLock.assertLocked(); + + Class newcls = remapClass(*clsref); + if (*clsref != newcls) *clsref = newcls; +} + + +/*********************************************************************** +* getNonMetaClass +* Return the ordinary class for this class or metaclass. +* `inst` is an instance of `cls` or a subclass thereof, or nil. +* Non-nil inst is faster. +* Used by +initialize. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static Class getNonMetaClass(Class metacls, id inst) +{ + static int total, named, secondary, sharedcache; + runtimeLock.assertLocked(); + + realizeClass(metacls); + + total++; + + // return cls itself if it's already a non-meta class + if (!metacls->isMetaClass()) return metacls; + + // metacls really is a metaclass + + // special case for root metaclass + // where inst == inst->ISA() == metacls is possible + if (metacls->ISA() == metacls) { + Class cls = metacls->superclass; + assert(cls->isRealized()); + assert(!cls->isMetaClass()); + assert(cls->ISA() == metacls); + if (cls->ISA() == metacls) return cls; + } + + // use inst if available + if (inst) { + Class cls = (Class)inst; + realizeClass(cls); + // cls may be a subclass - find the real class for metacls + while (cls && cls->ISA() != metacls) { + cls = cls->superclass; + realizeClass(cls); + } + if (cls) { + assert(!cls->isMetaClass()); + assert(cls->ISA() == metacls); + return cls; + } +#if DEBUG + _objc_fatal("cls is not an instance of metacls"); +#else + // release build: be forgiving and fall through to slow lookups +#endif + } + + // try name lookup + { + Class cls = getClass(metacls->mangledName()); + if (cls->ISA() == metacls) { + named++; + if (PrintInitializing) { + _objc_inform("INITIALIZE: %d/%d (%g%%) " + "successful by-name metaclass lookups", + named, total, named*100.0/total); + } + + realizeClass(cls); + return cls; + } + } + + // try secondary table + { + Class cls = (Class)NXMapGet(nonMetaClasses(), metacls); + if (cls) { + secondary++; + if (PrintInitializing) { + _objc_inform("INITIALIZE: %d/%d (%g%%) " + "successful secondary metaclass lookups", + secondary, total, secondary*100.0/total); + } + + assert(cls->ISA() == metacls); + realizeClass(cls); + return cls; + } + } + + // try any duplicates in the dyld shared cache + { + Class cls = nil; + + int count; + Class *classes = copyPreoptimizedClasses(metacls->mangledName(),&count); + if (classes) { + for (int i = 0; i < count; i++) { + if (classes[i]->ISA() == metacls) { + cls = classes[i]; + break; + } + } + free(classes); + } + + if (cls) { + sharedcache++; + if (PrintInitializing) { + _objc_inform("INITIALIZE: %d/%d (%g%%) " + "successful shared cache metaclass lookups", + sharedcache, total, sharedcache*100.0/total); + } + + realizeClass(cls); + return cls; + } + } + + _objc_fatal("no class for metaclass %p", (void*)metacls); +} + + +/*********************************************************************** +* _class_getNonMetaClass +* Return the ordinary class for this class or metaclass. +* Used by +initialize. +* Locking: acquires runtimeLock +**********************************************************************/ +Class _class_getNonMetaClass(Class cls, id obj) +{ + rwlock_writer_t lock(runtimeLock); + cls = getNonMetaClass(cls, obj); + assert(cls->isRealized()); + return cls; +} + + +/*********************************************************************** +* addRootClass +* Adds cls as a new realized root class. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static Class _firstRealizedClass = nil; +Class firstRealizedClass() +{ + runtimeLock.assertLocked(); + return _firstRealizedClass; +} + +static void addRootClass(Class cls) +{ + runtimeLock.assertWriting(); + + assert(cls->isRealized()); + cls->data()->nextSiblingClass = _firstRealizedClass; + _firstRealizedClass = cls; +} + +static void removeRootClass(Class cls) +{ + runtimeLock.assertWriting(); + + Class *classp; + for (classp = &_firstRealizedClass; + *classp != cls; + classp = &(*classp)->data()->nextSiblingClass) + { } + + *classp = (*classp)->data()->nextSiblingClass; +} + + +/*********************************************************************** +* addSubclass +* Adds subcls as a subclass of supercls. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void addSubclass(Class supercls, Class subcls) +{ + runtimeLock.assertWriting(); + + if (supercls && subcls) { + assert(supercls->isRealized()); + assert(subcls->isRealized()); + subcls->data()->nextSiblingClass = supercls->data()->firstSubclass; + supercls->data()->firstSubclass = subcls; + + if (supercls->hasCxxCtor()) { + subcls->setHasCxxCtor(); + } + + if (supercls->hasCxxDtor()) { + subcls->setHasCxxDtor(); + } + + if (supercls->hasCustomRR()) { + subcls->setHasCustomRR(true); + } + + if (supercls->hasCustomAWZ()) { + subcls->setHasCustomAWZ(true); + } + + // Special case: instancesRequireRawIsa does not propagate + // from root class to root metaclass + if (supercls->instancesRequireRawIsa() && supercls->superclass) { + subcls->setInstancesRequireRawIsa(true); + } + } +} + + +/*********************************************************************** +* removeSubclass +* Removes subcls as a subclass of supercls. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void removeSubclass(Class supercls, Class subcls) +{ + runtimeLock.assertWriting(); + assert(supercls->isRealized()); + assert(subcls->isRealized()); + assert(subcls->superclass == supercls); + + Class *cp; + for (cp = &supercls->data()->firstSubclass; + *cp && *cp != subcls; + cp = &(*cp)->data()->nextSiblingClass) + ; + assert(*cp == subcls); + *cp = subcls->data()->nextSiblingClass; +} + + + +/*********************************************************************** +* protocols +* Returns the protocol name => protocol map for protocols. +* Locking: runtimeLock must read- or write-locked by the caller +**********************************************************************/ +static NXMapTable *protocols(void) +{ + static NXMapTable *protocol_map = nil; + + runtimeLock.assertLocked(); + + INIT_ONCE_PTR(protocol_map, + NXCreateMapTable(NXStrValueMapPrototype, 16), + NXFreeMapTable(v) ); + + return protocol_map; +} + + +/*********************************************************************** +* getProtocol +* Looks up a protocol by name. Demangled Swift names are recognized. +* Locking: runtimeLock must be read- or write-locked by the caller. +**********************************************************************/ +static Protocol *getProtocol(const char *name) +{ + runtimeLock.assertLocked(); + + // Try name as-is. + Protocol *result = (Protocol *)NXMapGet(protocols(), name); + if (result) return result; + + // Try Swift-mangled equivalent of the given name. + if (char *swName = copySwiftV1MangledName(name, true/*isProtocol*/)) { + result = (Protocol *)NXMapGet(protocols(), swName); + free(swName); + return result; + } + + return nil; +} + + +/*********************************************************************** +* remapProtocol +* Returns the live protocol pointer for proto, which may be pointing to +* a protocol struct that has been reallocated. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static protocol_t *remapProtocol(protocol_ref_t proto) +{ + runtimeLock.assertLocked(); + + protocol_t *newproto = (protocol_t *) + getProtocol(((protocol_t *)proto)->mangledName); + return newproto ? newproto : (protocol_t *)proto; +} + + +/*********************************************************************** +* remapProtocolRef +* Fix up a protocol ref, in case the protocol referenced has been reallocated. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static size_t UnfixedProtocolReferences; +static void remapProtocolRef(protocol_t **protoref) +{ + runtimeLock.assertLocked(); + + protocol_t *newproto = remapProtocol((protocol_ref_t)*protoref); + if (*protoref != newproto) { + *protoref = newproto; + UnfixedProtocolReferences++; + } +} + + +/*********************************************************************** +* moveIvars +* Slides a class's ivars to accommodate the given superclass size. +* Ivars are NOT compacted to compensate for a superclass that shrunk. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void moveIvars(class_ro_t *ro, uint32_t superSize) +{ + runtimeLock.assertWriting(); + + uint32_t diff; + + assert(superSize > ro->instanceStart); + diff = superSize - ro->instanceStart; + + if (ro->ivars) { + // Find maximum alignment in this class's ivars + uint32_t maxAlignment = 1; + for (const auto& ivar : *ro->ivars) { + if (!ivar.offset) continue; // anonymous bitfield + + uint32_t alignment = ivar.alignment(); + if (alignment > maxAlignment) maxAlignment = alignment; + } + + // Compute a slide value that preserves that alignment + uint32_t alignMask = maxAlignment - 1; + diff = (diff + alignMask) & ~alignMask; + + // Slide all of this class's ivars en masse + for (const auto& ivar : *ro->ivars) { + if (!ivar.offset) continue; // anonymous bitfield + + uint32_t oldOffset = (uint32_t)*ivar.offset; + uint32_t newOffset = oldOffset + diff; + *ivar.offset = newOffset; + + if (PrintIvars) { + _objc_inform("IVARS: offset %u -> %u for %s " + "(size %u, align %u)", + oldOffset, newOffset, ivar.name, + ivar.size, ivar.alignment()); + } + } + } + + *(uint32_t *)&ro->instanceStart += diff; + *(uint32_t *)&ro->instanceSize += diff; +} + + +static void reconcileInstanceVariables(Class cls, Class supercls, const class_ro_t*& ro) +{ + class_rw_t *rw = cls->data(); + + assert(supercls); + assert(!cls->isMetaClass()); + + /* debug: print them all before sliding + if (ro->ivars) { + for (const auto& ivar : *ro->ivars) { + if (!ivar.offset) continue; // anonymous bitfield + + _objc_inform("IVARS: %s.%s (offset %u, size %u, align %u)", + ro->name, ivar.name, + *ivar.offset, ivar.size, ivar.alignment()); + } + } + */ + + // Non-fragile ivars - reconcile this class with its superclass + const class_ro_t *super_ro = supercls->data()->ro; + + if (DebugNonFragileIvars) { + // Debugging: Force non-fragile ivars to slide. + // Intended to find compiler, runtime, and program bugs. + // If it fails with this and works without, you have a problem. + + // Operation: Reset everything to 0 + misalignment. + // Then force the normal sliding logic to push everything back. + + // Exceptions: root classes, metaclasses, *NSCF* classes, + // __CF* classes, NSConstantString, NSSimpleCString + + // (already know it's not root because supercls != nil) + const char *clsname = cls->mangledName(); + if (!strstr(clsname, "NSCF") && + 0 != strncmp(clsname, "__CF", 4) && + 0 != strcmp(clsname, "NSConstantString") && + 0 != strcmp(clsname, "NSSimpleCString")) + { + uint32_t oldStart = ro->instanceStart; + class_ro_t *ro_w = make_ro_writeable(rw); + ro = rw->ro; + + // Find max ivar alignment in class. + // default to word size to simplify ivar update + uint32_t alignment = 1<ivars) { + for (const auto& ivar : *ro->ivars) { + if (ivar.alignment() > alignment) { + alignment = ivar.alignment(); + } + } + } + uint32_t misalignment = ro->instanceStart % alignment; + uint32_t delta = ro->instanceStart - misalignment; + ro_w->instanceStart = misalignment; + ro_w->instanceSize -= delta; + + if (PrintIvars) { + _objc_inform("IVARS: DEBUG: forcing ivars for class '%s' " + "to slide (instanceStart %zu -> %zu)", + cls->nameForLogging(), (size_t)oldStart, + (size_t)ro->instanceStart); + } + + if (ro->ivars) { + for (const auto& ivar : *ro->ivars) { + if (!ivar.offset) continue; // anonymous bitfield + *ivar.offset -= delta; + } + } + } + } + + if (ro->instanceStart >= super_ro->instanceSize) { + // Superclass has not overgrown its space. We're done here. + return; + } + // fixme can optimize for "class has no new ivars", etc + + if (ro->instanceStart < super_ro->instanceSize) { + // Superclass has changed size. This class's ivars must move. + // Also slide layout bits in parallel. + // This code is incapable of compacting the subclass to + // compensate for a superclass that shrunk, so don't do that. + if (PrintIvars) { + _objc_inform("IVARS: sliding ivars for class %s " + "(superclass was %u bytes, now %u)", + cls->nameForLogging(), ro->instanceStart, + super_ro->instanceSize); + } + class_ro_t *ro_w = make_ro_writeable(rw); + ro = rw->ro; + moveIvars(ro_w, super_ro->instanceSize); + gdb_objc_class_changed(cls, OBJC_CLASS_IVARS_CHANGED, ro->name); + } +} + + +/*********************************************************************** +* realizeClass +* Performs first-time initialization on class cls, +* including allocating its read-write data. +* Returns the real class structure for the class. +* Locking: runtimeLock must be write-locked by the caller +**********************************************************************/ +static Class realizeClass(Class cls) +{ + runtimeLock.assertWriting(); + + const class_ro_t *ro; + class_rw_t *rw; + Class supercls; + Class metacls; + bool isMeta; + + if (!cls) return nil; + if (cls->isRealized()) return cls; + assert(cls == remapClass(cls)); + + // fixme verify class is not in an un-dlopened part of the shared cache? + + ro = (const class_ro_t *)cls->data(); + if (ro->flags & RO_FUTURE) { + // This was a future class. rw data is already allocated. + rw = cls->data(); + ro = cls->data()->ro; + cls->changeInfo(RW_REALIZED|RW_REALIZING, RW_FUTURE); + } else { + // Normal class. Allocate writeable class data. + rw = (class_rw_t *)calloc(sizeof(class_rw_t), 1); + rw->ro = ro; + rw->flags = RW_REALIZED|RW_REALIZING; + cls->setData(rw); + } + + isMeta = ro->flags & RO_META; + + rw->version = isMeta ? 7 : 0; // old runtime went up to 6 + + + // Choose an index for this class. + // Sets cls->instancesRequireRawIsa if indexes no more indexes are available + cls->chooseClassArrayIndex(); + + if (PrintConnecting) { + _objc_inform("CLASS: realizing class '%s'%s %p %p #%u", + cls->nameForLogging(), isMeta ? " (meta)" : "", + (void*)cls, ro, cls->classArrayIndex()); + } + + // Realize superclass and metaclass, if they aren't already. + // This needs to be done after RW_REALIZED is set above, for root classes. + // This needs to be done after class index is chosen, for root metaclasses. + supercls = realizeClass(remapClass(cls->superclass)); + metacls = realizeClass(remapClass(cls->ISA())); + +#if SUPPORT_NONPOINTER_ISA + // Disable non-pointer isa for some classes and/or platforms. + // Set instancesRequireRawIsa. + bool instancesRequireRawIsa = cls->instancesRequireRawIsa(); + bool rawIsaIsInherited = false; + static bool hackedDispatch = false; + + if (DisableNonpointerIsa) { + // Non-pointer isa disabled by environment or app SDK version + instancesRequireRawIsa = true; + } + else if (!hackedDispatch && !(ro->flags & RO_META) && + 0 == strcmp(ro->name, "OS_object")) + { + // hack for libdispatch et al - isa also acts as vtable pointer + hackedDispatch = true; + instancesRequireRawIsa = true; + } + else if (supercls && supercls->superclass && + supercls->instancesRequireRawIsa()) + { + // This is also propagated by addSubclass() + // but nonpointer isa setup needs it earlier. + // Special case: instancesRequireRawIsa does not propagate + // from root class to root metaclass + instancesRequireRawIsa = true; + rawIsaIsInherited = true; + } + + if (instancesRequireRawIsa) { + cls->setInstancesRequireRawIsa(rawIsaIsInherited); + } +// SUPPORT_NONPOINTER_ISA +#endif + + // Update superclass and metaclass in case of remapping + cls->superclass = supercls; + cls->initClassIsa(metacls); + + // Reconcile instance variable offsets / layout. + // This may reallocate class_ro_t, updating our ro variable. + if (supercls && !isMeta) reconcileInstanceVariables(cls, supercls, ro); + + // Set fastInstanceSize if it wasn't set already. + cls->setInstanceSize(ro->instanceSize); + + // Copy some flags from ro to rw + if (ro->flags & RO_HAS_CXX_STRUCTORS) { + cls->setHasCxxDtor(); + if (! (ro->flags & RO_HAS_CXX_DTOR_ONLY)) { + cls->setHasCxxCtor(); + } + } + + // Connect this class to its superclass's subclass lists + if (supercls) { + addSubclass(supercls, cls); + } else { + addRootClass(cls); + } + + // Attach categories + methodizeClass(cls); + + return cls; +} + + +/*********************************************************************** +* missingWeakSuperclass +* Return YES if some superclass of cls was weak-linked and is missing. +**********************************************************************/ +static bool +missingWeakSuperclass(Class cls) +{ + assert(!cls->isRealized()); + + if (!cls->superclass) { + // superclass nil. This is normal for root classes only. + return (!(cls->data()->flags & RO_ROOT)); + } else { + // superclass not nil. Check if a higher superclass is missing. + Class supercls = remapClass(cls->superclass); + assert(cls != cls->superclass); + assert(cls != supercls); + if (!supercls) return YES; + if (supercls->isRealized()) return NO; + return missingWeakSuperclass(supercls); + } +} + + +/*********************************************************************** +* realizeAllClassesInImage +* Non-lazily realizes all unrealized classes in the given image. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void realizeAllClassesInImage(header_info *hi) +{ + runtimeLock.assertWriting(); + + size_t count, i; + classref_t *classlist; + + if (hi->areAllClassesRealized()) return; + + classlist = _getObjc2ClassList(hi, &count); + + for (i = 0; i < count; i++) { + realizeClass(remapClass(classlist[i])); + } + + hi->setAllClassesRealized(YES); +} + + +/*********************************************************************** +* realizeAllClasses +* Non-lazily realizes all unrealized classes in all known images. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void realizeAllClasses(void) +{ + runtimeLock.assertWriting(); + + header_info *hi; + for (hi = FirstHeader; hi; hi = hi->getNext()) { + realizeAllClassesInImage(hi); + } +} + + +/*********************************************************************** +* _objc_allocateFutureClass +* Allocate an unresolved future class for the given class name. +* Returns any existing allocation if one was already made. +* Assumes the named class doesn't exist yet. +* Locking: acquires runtimeLock +**********************************************************************/ +Class _objc_allocateFutureClass(const char *name) +{ + rwlock_writer_t lock(runtimeLock); + + Class cls; + NXMapTable *map = futureNamedClasses(); + + if ((cls = (Class)NXMapGet(map, name))) { + // Already have a future class for this name. + return cls; + } + + cls = _calloc_class(sizeof(objc_class)); + addFutureNamedClass(name, cls); + + return cls; +} + + +/*********************************************************************** +* objc_getFutureClass. Return the id of the named class. +* If the class does not exist, return an uninitialized class +* structure that will be used for the class when and if it +* does get loaded. +* Not thread safe. +**********************************************************************/ +Class objc_getFutureClass(const char *name) +{ + Class cls; + + // YES unconnected, NO class handler + // (unconnected is OK because it will someday be the real class) + cls = look_up_class(name, YES, NO); + if (cls) { + if (PrintFuture) { + _objc_inform("FUTURE: found %p already in use for %s", + (void*)cls, name); + } + + return cls; + } + + // No class or future class with that name yet. Make one. + // fixme not thread-safe with respect to + // simultaneous library load or getFutureClass. + return _objc_allocateFutureClass(name); +} + + +BOOL _class_isFutureClass(Class cls) +{ + return cls && cls->isFuture(); +} + + +/*********************************************************************** +* _objc_flush_caches +* Flushes all caches. +* (Historical behavior: flush caches for cls, its metaclass, +* and subclasses thereof. Nil flushes all classes.) +* Locking: acquires runtimeLock +**********************************************************************/ +static void flushCaches(Class cls) +{ + runtimeLock.assertWriting(); + + mutex_locker_t lock(cacheUpdateLock); + + if (cls) { + foreach_realized_class_and_subclass(cls, ^(Class c){ + cache_erase_nolock(c); + }); + } + else { + foreach_realized_class_and_metaclass(^(Class c){ + cache_erase_nolock(c); + }); + } +} + + +void _objc_flush_caches(Class cls) +{ + { + rwlock_writer_t lock(runtimeLock); + flushCaches(cls); + if (cls && cls->superclass && cls != cls->getIsa()) { + flushCaches(cls->getIsa()); + } else { + // cls is a root class or root metaclass. Its metaclass is itself + // or a subclass so the metaclass caches were already flushed. + } + } + + if (!cls) { + // collectALot if cls==nil + mutex_locker_t lock(cacheUpdateLock); + cache_collect(true); + } +} + + +/*********************************************************************** +* map_images +* Process the given images which are being mapped in by dyld. +* Calls ABI-agnostic code after taking ABI-specific locks. +* +* Locking: write-locks runtimeLock +**********************************************************************/ +void +map_2_images(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]) +{ + rwlock_writer_t lock(runtimeLock); + return map_images_nolock(count, paths, mhdrs); +} + + +/*********************************************************************** +* load_images +* Process +load in the given images which are being mapped in by dyld. +* +* Locking: write-locks runtimeLock and loadMethodLock +**********************************************************************/ +extern bool hasLoadMethods(const headerType *mhdr); +extern void prepare_load_methods(const headerType *mhdr); + +void +load_images(const char *path __unused, const struct mach_header *mh) +{ + // Return without taking locks if there are no +load methods here. + if (!hasLoadMethods((const headerType *)mh)) return; + + recursive_mutex_locker_t lock(loadMethodLock); + + // Discover load methods + { + rwlock_writer_t lock2(runtimeLock); + prepare_load_methods((const headerType *)mh); + } + + // Call +load methods (without runtimeLock - re-entrant) + call_load_methods(); +} + + +/*********************************************************************** +* unmap_image +* Process the given image which is about to be unmapped by dyld. +* +* Locking: write-locks runtimeLock and loadMethodLock +**********************************************************************/ +void +unmap_image(const char *path __unused, const struct mach_header *mh) +{ + recursive_mutex_locker_t lock(loadMethodLock); + rwlock_writer_t lock2(runtimeLock); + unmap_image_nolock(mh); +} + + + + +/*********************************************************************** +* mustReadClasses +* Preflight check in advance of readClass() from an image. +**********************************************************************/ +bool mustReadClasses(header_info *hi) +{ + const char *reason; + + // If the image is not preoptimized then we must read classes. + if (!hi->isPreoptimized()) { + reason = nil; // Don't log this one because it is noisy. + goto readthem; + } + + // If iOS simulator then we must read classes. +#if TARGET_OS_SIMULATOR + reason = "the image is for iOS simulator"; + goto readthem; +#endif + + assert(!hi->isBundle()); // no MH_BUNDLE in shared cache + + // If the image may have missing weak superclasses then we must read classes + if (!noMissingWeakSuperclasses()) { + reason = "the image may contain classes with missing weak superclasses"; + goto readthem; + } + + // If there are unresolved future classes then we must read classes. + if (haveFutureNamedClasses()) { + reason = "there are unresolved future classes pending"; + goto readthem; + } + + // readClass() does not need to do anything. + return NO; + + readthem: + if (PrintPreopt && reason) { + _objc_inform("PREOPTIMIZATION: reading classes manually from %s " + "because %s", hi->fname(), reason); + } + return YES; +} + + +/*********************************************************************** +* readClass +* Read a class and metaclass as written by a compiler. +* Returns the new class pointer. This could be: +* - cls +* - nil (cls has a missing weak-linked superclass) +* - something else (space for this class was reserved by a future class) +* +* Note that all work performed by this function is preflighted by +* mustReadClasses(). Do not change this function without updating that one. +* +* Locking: runtimeLock acquired by map_images or objc_readClassPair +**********************************************************************/ +Class readClass(Class cls, bool headerIsBundle, bool headerIsPreoptimized) +{ + const char *mangledName = cls->mangledName(); + + if (missingWeakSuperclass(cls)) { + // No superclass (probably weak-linked). + // Disavow any knowledge of this subclass. + if (PrintConnecting) { + _objc_inform("CLASS: IGNORING class '%s' with " + "missing weak-linked superclass", + cls->nameForLogging()); + } + addRemappedClass(cls, nil); + cls->superclass = nil; + return nil; + } + + // Note: Class __ARCLite__'s hack does not go through here. + // Class structure fixups that apply to it also need to be + // performed in non-lazy realization below. + + // These fields should be set to zero because of the + // binding of _objc_empty_vtable, but OS X 10.8's dyld + // does not bind shared cache absolute symbols as expected. + // This (and the __ARCLite__ hack below) can be removed + // once the simulator drops 10.8 support. +#if TARGET_OS_SIMULATOR + if (cls->cache._mask) cls->cache._mask = 0; + if (cls->cache._occupied) cls->cache._occupied = 0; + if (cls->ISA()->cache._mask) cls->ISA()->cache._mask = 0; + if (cls->ISA()->cache._occupied) cls->ISA()->cache._occupied = 0; +#endif + + Class replacing = nil; + if (Class newCls = popFutureNamedClass(mangledName)) { + // This name was previously allocated as a future class. + // Copy objc_class to future class's struct. + // Preserve future's rw data block. + + if (newCls->isSwift()) { + _objc_fatal("Can't complete future class request for '%s' " + "because the real class is too big.", + cls->nameForLogging()); + } + + class_rw_t *rw = newCls->data(); + const class_ro_t *old_ro = rw->ro; + memcpy(newCls, cls, sizeof(objc_class)); + rw->ro = (class_ro_t *)newCls->data(); + newCls->setData(rw); + freeIfMutable((char *)old_ro->name); + free((void *)old_ro); + + addRemappedClass(cls, newCls); + + replacing = cls; + cls = newCls; + } + + if (headerIsPreoptimized && !replacing) { + // class list built in shared cache + // fixme strict assert doesn't work because of duplicates + // assert(cls == getClass(name)); + assert(getClass(mangledName)); + } else { + addNamedClass(cls, mangledName, replacing); + } + + // for future reference: shared cache never contains MH_BUNDLEs + if (headerIsBundle) { + cls->data()->flags |= RO_FROM_BUNDLE; + cls->ISA()->data()->flags |= RO_FROM_BUNDLE; + } + + return cls; +} + + +/*********************************************************************** +* readProtocol +* Read a protocol as written by a compiler. +**********************************************************************/ +static void +readProtocol(protocol_t *newproto, Class protocol_class, + NXMapTable *protocol_map, + bool headerIsPreoptimized, bool headerIsBundle) +{ + // This is not enough to make protocols in unloaded bundles safe, + // but it does prevent crashes when looking up unrelated protocols. + auto insertFn = headerIsBundle ? NXMapKeyCopyingInsert : NXMapInsert; + + protocol_t *oldproto = (protocol_t *)getProtocol(newproto->mangledName); + + if (oldproto) { + // Some other definition already won. + if (PrintProtocols) { + _objc_inform("PROTOCOLS: protocol at %p is %s " + "(duplicate of %p)", + newproto, oldproto->nameForLogging(), oldproto); + } + } + else if (headerIsPreoptimized) { + // Shared cache initialized the protocol object itself, + // but in order to allow out-of-cache replacement we need + // to add it to the protocol table now. + + protocol_t *cacheproto = (protocol_t *) + getPreoptimizedProtocol(newproto->mangledName); + protocol_t *installedproto; + if (cacheproto && cacheproto != newproto) { + // Another definition in the shared cache wins (because + // everything in the cache was fixed up to point to it). + installedproto = cacheproto; + } + else { + // This definition wins. + installedproto = newproto; + } + + assert(installedproto->getIsa() == protocol_class); + assert(installedproto->size >= sizeof(protocol_t)); + insertFn(protocol_map, installedproto->mangledName, + installedproto); + + if (PrintProtocols) { + _objc_inform("PROTOCOLS: protocol at %p is %s", + installedproto, installedproto->nameForLogging()); + if (newproto != installedproto) { + _objc_inform("PROTOCOLS: protocol at %p is %s " + "(duplicate of %p)", + newproto, installedproto->nameForLogging(), + installedproto); + } + } + } + else if (newproto->size >= sizeof(protocol_t)) { + // New protocol from an un-preoptimized image + // with sufficient storage. Fix it up in place. + // fixme duplicate protocols from unloadable bundle + newproto->initIsa(protocol_class); // fixme pinned + insertFn(protocol_map, newproto->mangledName, newproto); + if (PrintProtocols) { + _objc_inform("PROTOCOLS: protocol at %p is %s", + newproto, newproto->nameForLogging()); + } + } + else { + // New protocol from an un-preoptimized image + // with insufficient storage. Reallocate it. + // fixme duplicate protocols from unloadable bundle + size_t size = max(sizeof(protocol_t), (size_t)newproto->size); + protocol_t *installedproto = (protocol_t *)calloc(size, 1); + memcpy(installedproto, newproto, newproto->size); + installedproto->size = (typeof(installedproto->size))size; + + installedproto->initIsa(protocol_class); // fixme pinned + insertFn(protocol_map, installedproto->mangledName, installedproto); + if (PrintProtocols) { + _objc_inform("PROTOCOLS: protocol at %p is %s ", + installedproto, installedproto->nameForLogging()); + _objc_inform("PROTOCOLS: protocol at %p is %s " + "(reallocated to %p)", + newproto, installedproto->nameForLogging(), + installedproto); + } + } +} + +/*********************************************************************** +* _read_images +* Perform initial processing of the headers in the linked +* list beginning with headerList. +* +* Called by: map_images_nolock +* +* Locking: runtimeLock acquired by map_images +**********************************************************************/ +void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClasses) +{ + header_info *hi; + uint32_t hIndex; + size_t count; + size_t i; + Class *resolvedFutureClasses = nil; + size_t resolvedFutureClassCount = 0; + static bool doneOnce; + TimeLogger ts(PrintImageTimes); + + runtimeLock.assertWriting(); + +#define EACH_HEADER \ + hIndex = 0; \ + hIndex < hCount && (hi = hList[hIndex]); \ + hIndex++ + + if (!doneOnce) { + doneOnce = YES; + +#if SUPPORT_NONPOINTER_ISA + // Disable non-pointer isa under some conditions. + +# if SUPPORT_INDEXED_ISA + // Disable nonpointer isa if any image contains old Swift code + for (EACH_HEADER) { + if (hi->info()->containsSwift() && + hi->info()->swiftVersion() < objc_image_info::SwiftVersion3) + { + DisableNonpointerIsa = true; + if (PrintRawIsa) { + _objc_inform("RAW ISA: disabling non-pointer isa because " + "the app or a framework contains Swift code " + "older than Swift 3.0"); + } + break; + } + } +# endif + +# if TARGET_OS_OSX + // Disable non-pointer isa if the app is too old + // (linked before OS X 10.11) + if (dyld_get_program_sdk_version() < DYLD_MACOSX_VERSION_10_11) { + DisableNonpointerIsa = true; + if (PrintRawIsa) { + _objc_inform("RAW ISA: disabling non-pointer isa because " + "the app is too old (SDK version " SDK_FORMAT ")", + FORMAT_SDK(dyld_get_program_sdk_version())); + } + } + + // Disable non-pointer isa if the app has a __DATA,__objc_rawisa section + // New apps that load old extensions may need this. + for (EACH_HEADER) { + if (hi->mhdr()->filetype != MH_EXECUTE) continue; + unsigned long size; + if (getsectiondata(hi->mhdr(), "__DATA", "__objc_rawisa", &size)) { + DisableNonpointerIsa = true; + if (PrintRawIsa) { + _objc_inform("RAW ISA: disabling non-pointer isa because " + "the app has a __DATA,__objc_rawisa section"); + } + } + break; // assume only one MH_EXECUTE image + } +# endif + +#endif + + if (DisableTaggedPointers) { + disableTaggedPointers(); + } + + if (PrintConnecting) { + _objc_inform("CLASS: found %d classes during launch", totalClasses); + } + + // namedClasses + // Preoptimized classes don't go in this table. + // 4/3 is NXMapTable's load factor + int namedClassesSize = + (isPreoptimized() ? unoptimizedTotalClasses : totalClasses) * 4 / 3; + gdb_objc_realized_classes = + NXCreateMapTable(NXStrValueMapPrototype, namedClassesSize); + + ts.log("IMAGE TIMES: first time tasks"); + } + + + // Discover classes. Fix up unresolved future classes. Mark bundle classes. + + for (EACH_HEADER) { + if (! mustReadClasses(hi)) { + // Image is sufficiently optimized that we need not call readClass() + continue; + } + + bool headerIsBundle = hi->isBundle(); + bool headerIsPreoptimized = hi->isPreoptimized(); + + classref_t *classlist = _getObjc2ClassList(hi, &count); + for (i = 0; i < count; i++) { + Class cls = (Class)classlist[i]; + Class newCls = readClass(cls, headerIsBundle, headerIsPreoptimized); + + if (newCls != cls && newCls) { + // Class was moved but not deleted. Currently this occurs + // only when the new class resolved a future class. + // Non-lazily realize the class below. + resolvedFutureClasses = (Class *) + realloc(resolvedFutureClasses, + (resolvedFutureClassCount+1) * sizeof(Class)); + resolvedFutureClasses[resolvedFutureClassCount++] = newCls; + } + } + } + + ts.log("IMAGE TIMES: discover classes"); + + // Fix up remapped classes + // Class list and nonlazy class list remain unremapped. + // Class refs and super refs are remapped for message dispatching. + + if (!noClassesRemapped()) { + for (EACH_HEADER) { + Class *classrefs = _getObjc2ClassRefs(hi, &count); + for (i = 0; i < count; i++) { + remapClassRef(&classrefs[i]); + } + // fixme why doesn't test future1 catch the absence of this? + classrefs = _getObjc2SuperRefs(hi, &count); + for (i = 0; i < count; i++) { + remapClassRef(&classrefs[i]); + } + } + } + + ts.log("IMAGE TIMES: remap classes"); + + // Fix up @selector references + static size_t UnfixedSelectors; + sel_lock(); + for (EACH_HEADER) { + if (hi->isPreoptimized()) continue; + + bool isBundle = hi->isBundle(); + SEL *sels = _getObjc2SelectorRefs(hi, &count); + UnfixedSelectors += count; + for (i = 0; i < count; i++) { + const char *name = sel_cname(sels[i]); + sels[i] = sel_registerNameNoLock(name, isBundle); + } + } + sel_unlock(); + + ts.log("IMAGE TIMES: fix up selector references"); + +#if SUPPORT_FIXUP + // Fix up old objc_msgSend_fixup call sites + for (EACH_HEADER) { + message_ref_t *refs = _getObjc2MessageRefs(hi, &count); + if (count == 0) continue; + + if (PrintVtables) { + _objc_inform("VTABLES: repairing %zu unsupported vtable dispatch " + "call sites in %s", count, hi->fname()); + } + for (i = 0; i < count; i++) { + fixupMessageRef(refs+i); + } + } + + ts.log("IMAGE TIMES: fix up objc_msgSend_fixup"); +#endif + + // Discover protocols. Fix up protocol refs. + for (EACH_HEADER) { + extern objc_class OBJC_CLASS_$_Protocol; + Class cls = (Class)&OBJC_CLASS_$_Protocol; + assert(cls); + NXMapTable *protocol_map = protocols(); + bool isPreoptimized = hi->isPreoptimized(); + bool isBundle = hi->isBundle(); + + protocol_t **protolist = _getObjc2ProtocolList(hi, &count); + for (i = 0; i < count; i++) { + readProtocol(protolist[i], cls, protocol_map, + isPreoptimized, isBundle); + } + } + + ts.log("IMAGE TIMES: discover protocols"); + + // Fix up @protocol references + // Preoptimized images may have the right + // answer already but we don't know for sure. + for (EACH_HEADER) { + protocol_t **protolist = _getObjc2ProtocolRefs(hi, &count); + for (i = 0; i < count; i++) { + remapProtocolRef(&protolist[i]); + } + } + + ts.log("IMAGE TIMES: fix up @protocol references"); + + // Realize non-lazy classes (for +load methods and static instances) + for (EACH_HEADER) { + classref_t *classlist = + _getObjc2NonlazyClassList(hi, &count); + for (i = 0; i < count; i++) { + Class cls = remapClass(classlist[i]); + if (!cls) continue; + + // hack for class __ARCLite__, which didn't get this above +#if TARGET_OS_SIMULATOR + if (cls->cache._buckets == (void*)&_objc_empty_cache && + (cls->cache._mask || cls->cache._occupied)) + { + cls->cache._mask = 0; + cls->cache._occupied = 0; + } + if (cls->ISA()->cache._buckets == (void*)&_objc_empty_cache && + (cls->ISA()->cache._mask || cls->ISA()->cache._occupied)) + { + cls->ISA()->cache._mask = 0; + cls->ISA()->cache._occupied = 0; + } +#endif + + realizeClass(cls); + } + } + + ts.log("IMAGE TIMES: realize non-lazy classes"); + + // Realize newly-resolved future classes, in case CF manipulates them + if (resolvedFutureClasses) { + for (i = 0; i < resolvedFutureClassCount; i++) { + realizeClass(resolvedFutureClasses[i]); + resolvedFutureClasses[i]->setInstancesRequireRawIsa(false/*inherited*/); + } + free(resolvedFutureClasses); + } + + ts.log("IMAGE TIMES: realize future classes"); + + // Discover categories. + for (EACH_HEADER) { + category_t **catlist = + _getObjc2CategoryList(hi, &count); + bool hasClassProperties = hi->info()->hasCategoryClassProperties(); + + for (i = 0; i < count; i++) { + category_t *cat = catlist[i]; + Class cls = remapClass(cat->cls); + + if (!cls) { + // Category's target class is missing (probably weak-linked). + // Disavow any knowledge of this category. + catlist[i] = nil; + if (PrintConnecting) { + _objc_inform("CLASS: IGNORING category \?\?\?(%s) %p with " + "missing weak-linked target class", + cat->name, cat); + } + continue; + } + + // Process this category. + // First, register the category with its target class. + // Then, rebuild the class's method lists (etc) if + // the class is realized. + bool classExists = NO; + if (cat->instanceMethods || cat->protocols + || cat->instanceProperties) + { + addUnattachedCategoryForClass(cat, cls, hi); + if (cls->isRealized()) { + remethodizeClass(cls); + classExists = YES; + } + if (PrintConnecting) { + _objc_inform("CLASS: found category -%s(%s) %s", + cls->nameForLogging(), cat->name, + classExists ? "on existing class" : ""); + } + } + + if (cat->classMethods || cat->protocols + || (hasClassProperties && cat->_classProperties)) + { + addUnattachedCategoryForClass(cat, cls->ISA(), hi); + if (cls->ISA()->isRealized()) { + remethodizeClass(cls->ISA()); + } + if (PrintConnecting) { + _objc_inform("CLASS: found category +%s(%s)", + cls->nameForLogging(), cat->name); + } + } + } + } + + ts.log("IMAGE TIMES: discover categories"); + + // Category discovery MUST BE LAST to avoid potential races + // when other threads call the new category code before + // this thread finishes its fixups. + + // +load handled by prepare_load_methods() + + if (DebugNonFragileIvars) { + realizeAllClasses(); + } + + + // Print preoptimization statistics + if (PrintPreopt) { + static unsigned int PreoptTotalMethodLists; + static unsigned int PreoptOptimizedMethodLists; + static unsigned int PreoptTotalClasses; + static unsigned int PreoptOptimizedClasses; + + for (EACH_HEADER) { + if (hi->isPreoptimized()) { + _objc_inform("PREOPTIMIZATION: honoring preoptimized selectors " + "in %s", hi->fname()); + } + else if (hi->info()->optimizedByDyld()) { + _objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors " + "in %s", hi->fname()); + } + + classref_t *classlist = _getObjc2ClassList(hi, &count); + for (i = 0; i < count; i++) { + Class cls = remapClass(classlist[i]); + if (!cls) continue; + + PreoptTotalClasses++; + if (hi->isPreoptimized()) { + PreoptOptimizedClasses++; + } + + const method_list_t *mlist; + if ((mlist = ((class_ro_t *)cls->data())->baseMethods())) { + PreoptTotalMethodLists++; + if (mlist->isFixedUp()) { + PreoptOptimizedMethodLists++; + } + } + if ((mlist=((class_ro_t *)cls->ISA()->data())->baseMethods())) { + PreoptTotalMethodLists++; + if (mlist->isFixedUp()) { + PreoptOptimizedMethodLists++; + } + } + } + } + + _objc_inform("PREOPTIMIZATION: %zu selector references not " + "pre-optimized", UnfixedSelectors); + _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) method lists pre-sorted", + PreoptOptimizedMethodLists, PreoptTotalMethodLists, + PreoptTotalMethodLists + ? 100.0*PreoptOptimizedMethodLists/PreoptTotalMethodLists + : 0.0); + _objc_inform("PREOPTIMIZATION: %u/%u (%.3g%%) classes pre-registered", + PreoptOptimizedClasses, PreoptTotalClasses, + PreoptTotalClasses + ? 100.0*PreoptOptimizedClasses/PreoptTotalClasses + : 0.0); + _objc_inform("PREOPTIMIZATION: %zu protocol references not " + "pre-optimized", UnfixedProtocolReferences); + } + +#undef EACH_HEADER +} + + +/*********************************************************************** +* prepare_load_methods +* Schedule +load for classes in this image, any un-+load-ed +* superclasses in other images, and any categories in this image. +**********************************************************************/ +// Recursively schedule +load for cls and any un-+load-ed superclasses. +// cls must already be connected. +static void schedule_class_load(Class cls) +{ + if (!cls) return; + assert(cls->isRealized()); // _read_images should realize + + if (cls->data()->flags & RW_LOADED) return; + + // Ensure superclass-first ordering + schedule_class_load(cls->superclass); + + add_class_to_loadable_list(cls); + cls->setInfo(RW_LOADED); +} + +// Quick scan for +load methods that doesn't take a lock. +bool hasLoadMethods(const headerType *mhdr) +{ + size_t count; + if (_getObjc2NonlazyClassList(mhdr, &count) && count > 0) return true; + if (_getObjc2NonlazyCategoryList(mhdr, &count) && count > 0) return true; + return false; +} + +void prepare_load_methods(const headerType *mhdr) +{ + size_t count, i; + + runtimeLock.assertWriting(); + + classref_t *classlist = + _getObjc2NonlazyClassList(mhdr, &count); + for (i = 0; i < count; i++) { + schedule_class_load(remapClass(classlist[i])); + } + + category_t **categorylist = _getObjc2NonlazyCategoryList(mhdr, &count); + for (i = 0; i < count; i++) { + category_t *cat = categorylist[i]; + Class cls = remapClass(cat->cls); + if (!cls) continue; // category for ignored weak-linked class + realizeClass(cls); + assert(cls->ISA()->isRealized()); + add_category_to_loadable_list(cat); + } +} + + +/*********************************************************************** +* _unload_image +* Only handles MH_BUNDLE for now. +* Locking: write-lock and loadMethodLock acquired by unmap_image +**********************************************************************/ +void _unload_image(header_info *hi) +{ + size_t count, i; + + loadMethodLock.assertLocked(); + runtimeLock.assertWriting(); + + // Unload unattached categories and categories waiting for +load. + + category_t **catlist = _getObjc2CategoryList(hi, &count); + for (i = 0; i < count; i++) { + category_t *cat = catlist[i]; + if (!cat) continue; // category for ignored weak-linked class + Class cls = remapClass(cat->cls); + assert(cls); // shouldn't have live category for dead class + + // fixme for MH_DYLIB cat's class may have been unloaded already + + // unattached list + removeUnattachedCategoryForClass(cat, cls); + + // +load queue + remove_category_from_loadable_list(cat); + } + + // Unload classes. + + // Gather classes from both __DATA,__objc_clslist + // and __DATA,__objc_nlclslist. arclite's hack puts a class in the latter + // only, and we need to unload that class if we unload an arclite image. + + NXHashTable *classes = NXCreateHashTable(NXPtrPrototype, 0, nil); + classref_t *classlist; + + classlist = _getObjc2ClassList(hi, &count); + for (i = 0; i < count; i++) { + Class cls = remapClass(classlist[i]); + if (cls) NXHashInsert(classes, cls); + } + + classlist = _getObjc2NonlazyClassList(hi, &count); + for (i = 0; i < count; i++) { + Class cls = remapClass(classlist[i]); + if (cls) NXHashInsert(classes, cls); + } + + // First detach classes from each other. Then free each class. + // This avoid bugs where this loop unloads a subclass before its superclass + + NXHashState hs; + Class cls; + + hs = NXInitHashState(classes); + while (NXNextHashState(classes, &hs, (void**)&cls)) { + remove_class_from_loadable_list(cls); + detach_class(cls->ISA(), YES); + detach_class(cls, NO); + } + hs = NXInitHashState(classes); + while (NXNextHashState(classes, &hs, (void**)&cls)) { + free_class(cls->ISA()); + free_class(cls); + } + + NXFreeHashTable(classes); + + // XXX FIXME -- Clean up protocols: + // Support unloading protocols at dylib/image unload time + + // fixme DebugUnload +} + + +/*********************************************************************** +* method_getDescription +* Returns a pointer to this method's objc_method_description. +* Locking: none +**********************************************************************/ +struct objc_method_description * +method_getDescription(Method m) +{ + if (!m) return nil; + return (struct objc_method_description *)m; +} + + +IMP +method_getImplementation(Method m) +{ + return m ? m->imp : nil; +} + + +/*********************************************************************** +* method_getName +* Returns this method's selector. +* The method must not be nil. +* The method must already have been fixed-up. +* Locking: none +**********************************************************************/ +SEL +method_getName(Method m) +{ + if (!m) return nil; + + assert(m->name == sel_registerName(sel_getName(m->name))); + return m->name; +} + + +/*********************************************************************** +* method_getTypeEncoding +* Returns this method's old-style type encoding string. +* The method must not be nil. +* Locking: none +**********************************************************************/ +const char * +method_getTypeEncoding(Method m) +{ + if (!m) return nil; + return m->types; +} + + +/*********************************************************************** +* method_setImplementation +* Sets this method's implementation to imp. +* The previous implementation is returned. +**********************************************************************/ +static IMP +_method_setImplementation(Class cls, method_t *m, IMP imp) +{ + runtimeLock.assertWriting(); + + if (!m) return nil; + if (!imp) return nil; + + IMP old = m->imp; + m->imp = imp; + + // Cache updates are slow if cls is nil (i.e. unknown) + // RR/AWZ updates are slow if cls is nil (i.e. unknown) + // fixme build list of classes whose Methods are known externally? + + flushCaches(cls); + + updateCustomRR_AWZ(cls, m); + + return old; +} + +IMP +method_setImplementation(Method m, IMP imp) +{ + // Don't know the class - will be slow if RR/AWZ are affected + // fixme build list of classes whose Methods are known externally? + rwlock_writer_t lock(runtimeLock); + return _method_setImplementation(Nil, m, imp); +} + + +void method_exchangeImplementations(Method m1, Method m2) +{ + if (!m1 || !m2) return; + + rwlock_writer_t lock(runtimeLock); + + IMP m1_imp = m1->imp; + m1->imp = m2->imp; + m2->imp = m1_imp; + + + // RR/AWZ updates are slow because class is unknown + // Cache updates are slow because class is unknown + // fixme build list of classes whose Methods are known externally? + + flushCaches(nil); + + updateCustomRR_AWZ(nil, m1); + updateCustomRR_AWZ(nil, m2); +} + + +/*********************************************************************** +* ivar_getOffset +* fixme +* Locking: none +**********************************************************************/ +ptrdiff_t +ivar_getOffset(Ivar ivar) +{ + if (!ivar) return 0; + return *ivar->offset; +} + + +/*********************************************************************** +* ivar_getName +* fixme +* Locking: none +**********************************************************************/ +const char * +ivar_getName(Ivar ivar) +{ + if (!ivar) return nil; + return ivar->name; +} + + +/*********************************************************************** +* ivar_getTypeEncoding +* fixme +* Locking: none +**********************************************************************/ +const char * +ivar_getTypeEncoding(Ivar ivar) +{ + if (!ivar) return nil; + return ivar->type; +} + + + +const char *property_getName(objc_property_t prop) +{ + return prop->name; +} + +const char *property_getAttributes(objc_property_t prop) +{ + return prop->attributes; +} + +objc_property_attribute_t *property_copyAttributeList(objc_property_t prop, + unsigned int *outCount) +{ + if (!prop) { + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + return copyPropertyAttributeList(prop->attributes,outCount); +} + +char * property_copyAttributeValue(objc_property_t prop, const char *name) +{ + if (!prop || !name || *name == '\0') return nil; + + rwlock_reader_t lock(runtimeLock); + return copyPropertyAttributeValue(prop->attributes, name); +} + + +/*********************************************************************** +* getExtendedTypesIndexesForMethod +* Returns: +* a is the count of methods in all method lists before m's method list +* b is the index of m in m's method list +* a+b is the index of m's extended types in the extended types array +**********************************************************************/ +static void getExtendedTypesIndexesForMethod(protocol_t *proto, const method_t *m, bool isRequiredMethod, bool isInstanceMethod, uint32_t& a, uint32_t &b) +{ + a = 0; + + if (proto->instanceMethods) { + if (isRequiredMethod && isInstanceMethod) { + b = proto->instanceMethods->indexOfMethod(m); + return; + } + a += proto->instanceMethods->count; + } + + if (proto->classMethods) { + if (isRequiredMethod && !isInstanceMethod) { + b = proto->classMethods->indexOfMethod(m); + return; + } + a += proto->classMethods->count; + } + + if (proto->optionalInstanceMethods) { + if (!isRequiredMethod && isInstanceMethod) { + b = proto->optionalInstanceMethods->indexOfMethod(m); + return; + } + a += proto->optionalInstanceMethods->count; + } + + if (proto->optionalClassMethods) { + if (!isRequiredMethod && !isInstanceMethod) { + b = proto->optionalClassMethods->indexOfMethod(m); + return; + } + a += proto->optionalClassMethods->count; + } +} + + +/*********************************************************************** +* getExtendedTypesIndexForMethod +* Returns the index of m's extended types in proto's extended types array. +**********************************************************************/ +static uint32_t getExtendedTypesIndexForMethod(protocol_t *proto, const method_t *m, bool isRequiredMethod, bool isInstanceMethod) +{ + uint32_t a; + uint32_t b; + getExtendedTypesIndexesForMethod(proto, m, isRequiredMethod, + isInstanceMethod, a, b); + return a + b; +} + + +/*********************************************************************** +* fixupProtocolMethodList +* Fixes up a single method list in a protocol. +**********************************************************************/ +static void +fixupProtocolMethodList(protocol_t *proto, method_list_t *mlist, + bool required, bool instance) +{ + runtimeLock.assertWriting(); + + if (!mlist) return; + if (mlist->isFixedUp()) return; + + const char **extTypes = proto->extendedMethodTypes(); + fixupMethodList(mlist, true/*always copy for simplicity*/, + !extTypes/*sort if no extended method types*/); + + if (extTypes) { + // Sort method list and extended method types together. + // fixupMethodList() can't do this. + // fixme COW stomp + uint32_t count = mlist->count; + uint32_t prefix; + uint32_t junk; + getExtendedTypesIndexesForMethod(proto, &mlist->get(0), + required, instance, prefix, junk); + for (uint32_t i = 0; i < count; i++) { + for (uint32_t j = i+1; j < count; j++) { + method_t& mi = mlist->get(i); + method_t& mj = mlist->get(j); + if (mi.name > mj.name) { + std::swap(mi, mj); + std::swap(extTypes[prefix+i], extTypes[prefix+j]); + } + } + } + } +} + + +/*********************************************************************** +* fixupProtocol +* Fixes up all of a protocol's method lists. +**********************************************************************/ +static void +fixupProtocol(protocol_t *proto) +{ + runtimeLock.assertWriting(); + + if (proto->protocols) { + for (uintptr_t i = 0; i < proto->protocols->count; i++) { + protocol_t *sub = remapProtocol(proto->protocols->list[i]); + if (!sub->isFixedUp()) fixupProtocol(sub); + } + } + + fixupProtocolMethodList(proto, proto->instanceMethods, YES, YES); + fixupProtocolMethodList(proto, proto->classMethods, YES, NO); + fixupProtocolMethodList(proto, proto->optionalInstanceMethods, NO, YES); + fixupProtocolMethodList(proto, proto->optionalClassMethods, NO, NO); + + // fixme memory barrier so we can check this with no lock + proto->setFixedUp(); +} + + +/*********************************************************************** +* fixupProtocolIfNeeded +* Fixes up all of a protocol's method lists if they aren't fixed up already. +* Locking: write-locks runtimeLock. +**********************************************************************/ +static void +fixupProtocolIfNeeded(protocol_t *proto) +{ + runtimeLock.assertUnlocked(); + assert(proto); + + if (!proto->isFixedUp()) { + rwlock_writer_t lock(runtimeLock); + fixupProtocol(proto); + } +} + + +static method_list_t * +getProtocolMethodList(protocol_t *proto, bool required, bool instance) +{ + method_list_t **mlistp = nil; + if (required) { + if (instance) { + mlistp = &proto->instanceMethods; + } else { + mlistp = &proto->classMethods; + } + } else { + if (instance) { + mlistp = &proto->optionalInstanceMethods; + } else { + mlistp = &proto->optionalClassMethods; + } + } + + return *mlistp; +} + + +/*********************************************************************** +* protocol_getMethod_nolock +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static method_t * +protocol_getMethod_nolock(protocol_t *proto, SEL sel, + bool isRequiredMethod, bool isInstanceMethod, + bool recursive) +{ + runtimeLock.assertLocked(); + + if (!proto || !sel) return nil; + + assert(proto->isFixedUp()); + + method_list_t *mlist = + getProtocolMethodList(proto, isRequiredMethod, isInstanceMethod); + if (mlist) { + method_t *m = search_method_list(mlist, sel); + if (m) return m; + } + + if (recursive && proto->protocols) { + method_t *m; + for (uint32_t i = 0; i < proto->protocols->count; i++) { + protocol_t *realProto = remapProtocol(proto->protocols->list[i]); + m = protocol_getMethod_nolock(realProto, sel, + isRequiredMethod, isInstanceMethod, + true); + if (m) return m; + } + } + + return nil; +} + + +/*********************************************************************** +* protocol_getMethod +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +Method +protocol_getMethod(protocol_t *proto, SEL sel, bool isRequiredMethod, bool isInstanceMethod, bool recursive) +{ + if (!proto) return nil; + fixupProtocolIfNeeded(proto); + + rwlock_reader_t lock(runtimeLock); + return protocol_getMethod_nolock(proto, sel, isRequiredMethod, + isInstanceMethod, recursive); +} + + +/*********************************************************************** +* protocol_getMethodTypeEncoding_nolock +* Return the @encode string for the requested protocol method. +* Returns nil if the compiler did not emit any extended @encode data. +* Locking: runtimeLock must be held for writing by the caller +**********************************************************************/ +const char * +protocol_getMethodTypeEncoding_nolock(protocol_t *proto, SEL sel, + bool isRequiredMethod, + bool isInstanceMethod) +{ + runtimeLock.assertLocked(); + + if (!proto) return nil; + if (!proto->extendedMethodTypes()) return nil; + + assert(proto->isFixedUp()); + + method_t *m = + protocol_getMethod_nolock(proto, sel, + isRequiredMethod, isInstanceMethod, false); + if (m) { + uint32_t i = getExtendedTypesIndexForMethod(proto, m, + isRequiredMethod, + isInstanceMethod); + return proto->extendedMethodTypes()[i]; + } + + // No method with that name. Search incorporated protocols. + if (proto->protocols) { + for (uintptr_t i = 0; i < proto->protocols->count; i++) { + const char *enc = + protocol_getMethodTypeEncoding_nolock(remapProtocol(proto->protocols->list[i]), sel, isRequiredMethod, isInstanceMethod); + if (enc) return enc; + } + } + + return nil; +} + +/*********************************************************************** +* _protocol_getMethodTypeEncoding +* Return the @encode string for the requested protocol method. +* Returns nil if the compiler did not emit any extended @encode data. +* Locking: acquires runtimeLock +**********************************************************************/ +const char * +_protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel, + BOOL isRequiredMethod, BOOL isInstanceMethod) +{ + protocol_t *proto = newprotocol(proto_gen); + + if (!proto) return nil; + fixupProtocolIfNeeded(proto); + + rwlock_reader_t lock(runtimeLock); + return protocol_getMethodTypeEncoding_nolock(proto, sel, + isRequiredMethod, + isInstanceMethod); +} + + +/*********************************************************************** +* protocol_t::demangledName +* Returns the (Swift-demangled) name of the given protocol. +* Locking: none +**********************************************************************/ +const char * +protocol_t::demangledName() +{ + assert(hasDemangledNameField()); + + if (! _demangledName) { + char *de = copySwiftV1DemangledName(mangledName, true/*isProtocol*/); + if (! OSAtomicCompareAndSwapPtrBarrier(nil, (void*)(de ?: mangledName), + (void**)&_demangledName)) + { + if (de) free(de); + } + } + return _demangledName; +} + +/*********************************************************************** +* protocol_getName +* Returns the (Swift-demangled) name of the given protocol. +* Locking: runtimeLock must not be held by the caller +**********************************************************************/ +const char * +protocol_getName(Protocol *proto) +{ + if (!proto) return "nil"; + else return newprotocol(proto)->demangledName(); +} + + +/*********************************************************************** +* protocol_getInstanceMethodDescription +* Returns the description of a named instance method. +* Locking: runtimeLock must not be held by the caller +**********************************************************************/ +struct objc_method_description +protocol_getMethodDescription(Protocol *p, SEL aSel, + BOOL isRequiredMethod, BOOL isInstanceMethod) +{ + Method m = + protocol_getMethod(newprotocol(p), aSel, + isRequiredMethod, isInstanceMethod, true); + if (m) return *method_getDescription(m); + else return (struct objc_method_description){nil, nil}; +} + + +/*********************************************************************** +* protocol_conformsToProtocol_nolock +* Returns YES if self conforms to other. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static bool +protocol_conformsToProtocol_nolock(protocol_t *self, protocol_t *other) +{ + runtimeLock.assertLocked(); + + if (!self || !other) { + return NO; + } + + // protocols need not be fixed up + + if (0 == strcmp(self->mangledName, other->mangledName)) { + return YES; + } + + if (self->protocols) { + uintptr_t i; + for (i = 0; i < self->protocols->count; i++) { + protocol_t *proto = remapProtocol(self->protocols->list[i]); + if (0 == strcmp(other->mangledName, proto->mangledName)) { + return YES; + } + if (protocol_conformsToProtocol_nolock(proto, other)) { + return YES; + } + } + } + + return NO; +} + + +/*********************************************************************** +* protocol_conformsToProtocol +* Returns YES if self conforms to other. +* Locking: acquires runtimeLock +**********************************************************************/ +BOOL protocol_conformsToProtocol(Protocol *self, Protocol *other) +{ + rwlock_reader_t lock(runtimeLock); + return protocol_conformsToProtocol_nolock(newprotocol(self), + newprotocol(other)); +} + + +/*********************************************************************** +* protocol_isEqual +* Return YES if two protocols are equal (i.e. conform to each other) +* Locking: acquires runtimeLock +**********************************************************************/ +BOOL protocol_isEqual(Protocol *self, Protocol *other) +{ + if (self == other) return YES; + if (!self || !other) return NO; + + if (!protocol_conformsToProtocol(self, other)) return NO; + if (!protocol_conformsToProtocol(other, self)) return NO; + + return YES; +} + + +/*********************************************************************** +* protocol_copyMethodDescriptionList +* Returns descriptions of a protocol's methods. +* Locking: acquires runtimeLock +**********************************************************************/ +struct objc_method_description * +protocol_copyMethodDescriptionList(Protocol *p, + BOOL isRequiredMethod,BOOL isInstanceMethod, + unsigned int *outCount) +{ + protocol_t *proto = newprotocol(p); + struct objc_method_description *result = nil; + unsigned int count = 0; + + if (!proto) { + if (outCount) *outCount = 0; + return nil; + } + + fixupProtocolIfNeeded(proto); + + rwlock_reader_t lock(runtimeLock); + + method_list_t *mlist = + getProtocolMethodList(proto, isRequiredMethod, isInstanceMethod); + + if (mlist) { + result = (struct objc_method_description *) + calloc(mlist->count + 1, sizeof(struct objc_method_description)); + for (const auto& meth : *mlist) { + result[count].name = meth.name; + result[count].types = (char *)meth.types; + count++; + } + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* protocol_getProperty +* fixme +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static property_t * +protocol_getProperty_nolock(protocol_t *proto, const char *name, + bool isRequiredProperty, bool isInstanceProperty) +{ + runtimeLock.assertLocked(); + + if (!isRequiredProperty) { + // Only required properties are currently supported. + return nil; + } + + property_list_t *plist = isInstanceProperty ? + proto->instanceProperties : proto->classProperties(); + if (plist) { + for (auto& prop : *plist) { + if (0 == strcmp(name, prop.name)) { + return ∝ + } + } + } + + if (proto->protocols) { + uintptr_t i; + for (i = 0; i < proto->protocols->count; i++) { + protocol_t *p = remapProtocol(proto->protocols->list[i]); + property_t *prop = + protocol_getProperty_nolock(p, name, + isRequiredProperty, + isInstanceProperty); + if (prop) return prop; + } + } + + return nil; +} + +objc_property_t protocol_getProperty(Protocol *p, const char *name, + BOOL isRequiredProperty, BOOL isInstanceProperty) +{ + if (!p || !name) return nil; + + rwlock_reader_t lock(runtimeLock); + return (objc_property_t) + protocol_getProperty_nolock(newprotocol(p), name, + isRequiredProperty, isInstanceProperty); +} + + +/*********************************************************************** +* protocol_copyPropertyList +* protocol_copyPropertyList2 +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +static property_t ** +copyPropertyList(property_list_t *plist, unsigned int *outCount) +{ + property_t **result = nil; + unsigned int count = 0; + + if (plist) { + count = plist->count; + } + + if (count > 0) { + result = (property_t **)malloc((count+1) * sizeof(property_t *)); + + count = 0; + for (auto& prop : *plist) { + result[count++] = ∝ + } + result[count] = nil; + } + + if (outCount) *outCount = count; + return result; +} + +objc_property_t * +protocol_copyPropertyList2(Protocol *proto, unsigned int *outCount, + BOOL isRequiredProperty, BOOL isInstanceProperty) +{ + if (!proto || !isRequiredProperty) { + // Optional properties are not currently supported. + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + + property_list_t *plist = isInstanceProperty + ? newprotocol(proto)->instanceProperties + : newprotocol(proto)->classProperties(); + return (objc_property_t *)copyPropertyList(plist, outCount); +} + +objc_property_t * +protocol_copyPropertyList(Protocol *proto, unsigned int *outCount) +{ + return protocol_copyPropertyList2(proto, outCount, + YES/*required*/, YES/*instance*/); +} + + +/*********************************************************************** +* protocol_copyProtocolList +* Copies this protocol's incorporated protocols. +* Does not copy those protocol's incorporated protocols in turn. +* Locking: acquires runtimeLock +**********************************************************************/ +Protocol * __unsafe_unretained * +protocol_copyProtocolList(Protocol *p, unsigned int *outCount) +{ + unsigned int count = 0; + Protocol **result = nil; + protocol_t *proto = newprotocol(p); + + if (!proto) { + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + + if (proto->protocols) { + count = (unsigned int)proto->protocols->count; + } + if (count > 0) { + result = (Protocol **)malloc((count+1) * sizeof(Protocol *)); + + unsigned int i; + for (i = 0; i < count; i++) { + result[i] = (Protocol *)remapProtocol(proto->protocols->list[i]); + } + result[i] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* objc_allocateProtocol +* Creates a new protocol. The protocol may not be used until +* objc_registerProtocol() is called. +* Returns nil if a protocol with the same name already exists. +* Locking: acquires runtimeLock +**********************************************************************/ +Protocol * +objc_allocateProtocol(const char *name) +{ + rwlock_writer_t lock(runtimeLock); + + if (getProtocol(name)) { + return nil; + } + + protocol_t *result = (protocol_t *)calloc(sizeof(protocol_t), 1); + + extern objc_class OBJC_CLASS_$___IncompleteProtocol; + Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol; + result->initProtocolIsa(cls); + result->size = sizeof(protocol_t); + // fixme mangle the name if it looks swift-y? + result->mangledName = strdupIfMutable(name); + + // fixme reserve name without installing + + return (Protocol *)result; +} + + +/*********************************************************************** +* objc_registerProtocol +* Registers a newly-constructed protocol. The protocol is now +* ready for use and immutable. +* Locking: acquires runtimeLock +**********************************************************************/ +void objc_registerProtocol(Protocol *proto_gen) +{ + protocol_t *proto = newprotocol(proto_gen); + + rwlock_writer_t lock(runtimeLock); + + extern objc_class OBJC_CLASS_$___IncompleteProtocol; + Class oldcls = (Class)&OBJC_CLASS_$___IncompleteProtocol; + extern objc_class OBJC_CLASS_$_Protocol; + Class cls = (Class)&OBJC_CLASS_$_Protocol; + + if (proto->ISA() == cls) { + _objc_inform("objc_registerProtocol: protocol '%s' was already " + "registered!", proto->nameForLogging()); + return; + } + if (proto->ISA() != oldcls) { + _objc_inform("objc_registerProtocol: protocol '%s' was not allocated " + "with objc_allocateProtocol!", proto->nameForLogging()); + return; + } + + // NOT initProtocolIsa(). The protocol object may already + // have been retained and we must preserve that count. + proto->changeIsa(cls); + + NXMapKeyCopyingInsert(protocols(), proto->mangledName, proto); +} + + +/*********************************************************************** +* protocol_addProtocol +* Adds an incorporated protocol to another protocol. +* No method enforcement is performed. +* `proto` must be under construction. `addition` must not. +* Locking: acquires runtimeLock +**********************************************************************/ +void +protocol_addProtocol(Protocol *proto_gen, Protocol *addition_gen) +{ + protocol_t *proto = newprotocol(proto_gen); + protocol_t *addition = newprotocol(addition_gen); + + extern objc_class OBJC_CLASS_$___IncompleteProtocol; + Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol; + + if (!proto_gen) return; + if (!addition_gen) return; + + rwlock_writer_t lock(runtimeLock); + + if (proto->ISA() != cls) { + _objc_inform("protocol_addProtocol: modified protocol '%s' is not " + "under construction!", proto->nameForLogging()); + return; + } + if (addition->ISA() == cls) { + _objc_inform("protocol_addProtocol: added protocol '%s' is still " + "under construction!", addition->nameForLogging()); + return; + } + + protocol_list_t *protolist = proto->protocols; + if (!protolist) { + protolist = (protocol_list_t *) + calloc(1, sizeof(protocol_list_t) + + sizeof(protolist->list[0])); + } else { + protolist = (protocol_list_t *) + realloc(protolist, protocol_list_size(protolist) + + sizeof(protolist->list[0])); + } + + protolist->list[protolist->count++] = (protocol_ref_t)addition; + proto->protocols = protolist; +} + + +/*********************************************************************** +* protocol_addMethodDescription +* Adds a method to a protocol. The protocol must be under construction. +* Locking: acquires runtimeLock +**********************************************************************/ +static void +protocol_addMethod_nolock(method_list_t*& list, SEL name, const char *types) +{ + if (!list) { + list = (method_list_t *)calloc(sizeof(method_list_t), 1); + list->entsizeAndFlags = sizeof(list->first); + list->setFixedUp(); + } else { + size_t size = list->byteSize() + list->entsize(); + list = (method_list_t *)realloc(list, size); + } + + method_t& meth = list->get(list->count++); + meth.name = name; + meth.types = types ? strdupIfMutable(types) : ""; + meth.imp = nil; +} + +void +protocol_addMethodDescription(Protocol *proto_gen, SEL name, const char *types, + BOOL isRequiredMethod, BOOL isInstanceMethod) +{ + protocol_t *proto = newprotocol(proto_gen); + + extern objc_class OBJC_CLASS_$___IncompleteProtocol; + Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol; + + if (!proto_gen) return; + + rwlock_writer_t lock(runtimeLock); + + if (proto->ISA() != cls) { + _objc_inform("protocol_addMethodDescription: protocol '%s' is not " + "under construction!", proto->nameForLogging()); + return; + } + + if (isRequiredMethod && isInstanceMethod) { + protocol_addMethod_nolock(proto->instanceMethods, name, types); + } else if (isRequiredMethod && !isInstanceMethod) { + protocol_addMethod_nolock(proto->classMethods, name, types); + } else if (!isRequiredMethod && isInstanceMethod) { + protocol_addMethod_nolock(proto->optionalInstanceMethods, name,types); + } else /* !isRequiredMethod && !isInstanceMethod) */ { + protocol_addMethod_nolock(proto->optionalClassMethods, name, types); + } +} + + +/*********************************************************************** +* protocol_addProperty +* Adds a property to a protocol. The protocol must be under construction. +* Locking: acquires runtimeLock +**********************************************************************/ +static void +protocol_addProperty_nolock(property_list_t *&plist, const char *name, + const objc_property_attribute_t *attrs, + unsigned int count) +{ + if (!plist) { + plist = (property_list_t *)calloc(sizeof(property_list_t), 1); + plist->entsizeAndFlags = sizeof(property_t); + } else { + plist = (property_list_t *) + realloc(plist, sizeof(property_list_t) + + plist->count * plist->entsize()); + } + + property_t& prop = plist->get(plist->count++); + prop.name = strdupIfMutable(name); + prop.attributes = copyPropertyAttributeString(attrs, count); +} + +void +protocol_addProperty(Protocol *proto_gen, const char *name, + const objc_property_attribute_t *attrs, + unsigned int count, + BOOL isRequiredProperty, BOOL isInstanceProperty) +{ + protocol_t *proto = newprotocol(proto_gen); + + extern objc_class OBJC_CLASS_$___IncompleteProtocol; + Class cls = (Class)&OBJC_CLASS_$___IncompleteProtocol; + + if (!proto) return; + if (!name) return; + + rwlock_writer_t lock(runtimeLock); + + if (proto->ISA() != cls) { + _objc_inform("protocol_addProperty: protocol '%s' is not " + "under construction!", proto->nameForLogging()); + return; + } + + if (isRequiredProperty && isInstanceProperty) { + protocol_addProperty_nolock(proto->instanceProperties, name, attrs, count); + } + else if (isRequiredProperty && !isInstanceProperty) { + protocol_addProperty_nolock(proto->_classProperties, name, attrs, count); + } + //else if (!isRequiredProperty && isInstanceProperty) { + // protocol_addProperty_nolock(proto->optionalInstanceProperties, name, attrs, count); + //} + //else /* !isRequiredProperty && !isInstanceProperty) */ { + // protocol_addProperty_nolock(proto->optionalClassProperties, name, attrs, count); + //} +} + + +/*********************************************************************** +* objc_getClassList +* Returns pointers to all classes. +* This requires all classes be realized, which is regretfully non-lazy. +* Locking: acquires runtimeLock +**********************************************************************/ +int +objc_getClassList(Class *buffer, int bufferLen) +{ + rwlock_writer_t lock(runtimeLock); + + realizeAllClasses(); + + __block int count = 0; + foreach_realized_class_and_metaclass(^(Class cls) { + if (!cls->isMetaClass()) count++; + }); + + if (buffer) { + __block int c = 0; + foreach_realized_class_and_metaclass(^(Class cls) { + if (c < bufferLen && !cls->isMetaClass()) { + buffer[c++] = cls; + } + }); + } + + return count; +} + + +/*********************************************************************** +* objc_copyClassList +* Returns pointers to all classes. +* This requires all classes be realized, which is regretfully non-lazy. +* +* outCount may be nil. *outCount is the number of classes returned. +* If the returned array is not nil, it is nil-terminated and must be +* freed with free(). +* Locking: write-locks runtimeLock +**********************************************************************/ +Class * +objc_copyClassList(unsigned int *outCount) +{ + rwlock_writer_t lock(runtimeLock); + + realizeAllClasses(); + + Class *result = nil; + + __block unsigned int count = 0; + foreach_realized_class_and_metaclass(^(Class cls) { + if (!cls->isMetaClass()) count++; + }); + + if (count > 0) { + result = (Class *)malloc((1+count) * sizeof(Class)); + __block unsigned int c = 0; + foreach_realized_class_and_metaclass(^(Class cls) { + if (!cls->isMetaClass()) { + result[c++] = cls; + } + }); + result[c] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* objc_copyProtocolList +* Returns pointers to all protocols. +* Locking: read-locks runtimeLock +**********************************************************************/ +Protocol * __unsafe_unretained * +objc_copyProtocolList(unsigned int *outCount) +{ + rwlock_reader_t lock(runtimeLock); + + NXMapTable *protocol_map = protocols(); + + unsigned int count = NXCountMapTable(protocol_map); + if (count == 0) { + if (outCount) *outCount = 0; + return nil; + } + + Protocol **result = (Protocol **)malloc((count+1) * sizeof(Protocol*)); + + unsigned int i = 0; + Protocol *proto; + const char *name; + NXMapState state = NXInitMapState(protocol_map); + while (NXNextMapState(protocol_map, &state, + (const void **)&name, (const void **)&proto)) + { + result[i++] = proto; + } + + result[i++] = nil; + assert(i == count+1); + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* objc_getProtocol +* Get a protocol by name, or return nil +* Locking: read-locks runtimeLock +**********************************************************************/ +Protocol *objc_getProtocol(const char *name) +{ + rwlock_reader_t lock(runtimeLock); + return getProtocol(name); +} + + +/*********************************************************************** +* class_copyMethodList +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +Method * +class_copyMethodList(Class cls, unsigned int *outCount) +{ + unsigned int count = 0; + Method *result = nil; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + + assert(cls->isRealized()); + + count = cls->data()->methods.count(); + + if (count > 0) { + result = (Method *)malloc((count + 1) * sizeof(Method)); + + count = 0; + for (auto& meth : cls->data()->methods) { + result[count++] = &meth; + } + result[count] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* class_copyIvarList +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +Ivar * +class_copyIvarList(Class cls, unsigned int *outCount) +{ + const ivar_list_t *ivars; + Ivar *result = nil; + unsigned int count = 0; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + + assert(cls->isRealized()); + + if ((ivars = cls->data()->ro->ivars) && ivars->count) { + result = (Ivar *)malloc((ivars->count+1) * sizeof(Ivar)); + + for (auto& ivar : *ivars) { + if (!ivar.offset) continue; // anonymous bitfield + result[count++] = &ivar; + } + result[count] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* class_copyPropertyList. Returns a heap block containing the +* properties declared in the class, or nil if the class +* declares no properties. Caller must free the block. +* Does not copy any superclass's properties. +* Locking: read-locks runtimeLock +**********************************************************************/ +objc_property_t * +class_copyPropertyList(Class cls, unsigned int *outCount) +{ + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + + assert(cls->isRealized()); + auto rw = cls->data(); + + property_t **result = nil; + unsigned int count = rw->properties.count(); + if (count > 0) { + result = (property_t **)malloc((count + 1) * sizeof(property_t *)); + + count = 0; + for (auto& prop : rw->properties) { + result[count++] = ∝ + } + result[count] = nil; + } + + if (outCount) *outCount = count; + return (objc_property_t *)result; +} + + +/*********************************************************************** +* objc_class::getLoadMethod +* fixme +* Called only from add_class_to_loadable_list. +* Locking: runtimeLock must be read- or write-locked by the caller. +**********************************************************************/ +IMP +objc_class::getLoadMethod() +{ + runtimeLock.assertLocked(); + + const method_list_t *mlist; + + assert(isRealized()); + assert(ISA()->isRealized()); + assert(!isMetaClass()); + assert(ISA()->isMetaClass()); + + mlist = ISA()->data()->ro->baseMethods(); + if (mlist) { + for (const auto& meth : *mlist) { + const char *name = sel_cname(meth.name); + if (0 == strcmp(name, "load")) { + return meth.imp; + } + } + } + + return nil; +} + + +/*********************************************************************** +* _category_getName +* Returns a category's name. +* Locking: none +**********************************************************************/ +const char * +_category_getName(Category cat) +{ + return cat->name; +} + + +/*********************************************************************** +* _category_getClassName +* Returns a category's class's name +* Called only from add_category_to_loadable_list and +* remove_category_from_loadable_list for logging purposes. +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +const char * +_category_getClassName(Category cat) +{ + runtimeLock.assertLocked(); + return remapClass(cat->cls)->nameForLogging(); +} + + +/*********************************************************************** +* _category_getClass +* Returns a category's class +* Called only by call_category_loads. +* Locking: read-locks runtimeLock +**********************************************************************/ +Class +_category_getClass(Category cat) +{ + rwlock_reader_t lock(runtimeLock); + Class result = remapClass(cat->cls); + assert(result->isRealized()); // ok for call_category_loads' usage + return result; +} + + +/*********************************************************************** +* _category_getLoadMethod +* fixme +* Called only from add_category_to_loadable_list +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +IMP +_category_getLoadMethod(Category cat) +{ + runtimeLock.assertLocked(); + + const method_list_t *mlist; + + mlist = cat->classMethods; + if (mlist) { + for (const auto& meth : *mlist) { + const char *name = sel_cname(meth.name); + if (0 == strcmp(name, "load")) { + return meth.imp; + } + } + } + + return nil; +} + + +/*********************************************************************** +* category_t::propertiesForMeta +* Return a category's instance or class properties. +* hi is the image containing the category. +**********************************************************************/ +property_list_t * +category_t::propertiesForMeta(bool isMeta, struct header_info *hi) +{ + if (!isMeta) return instanceProperties; + else if (hi->info()->hasCategoryClassProperties()) return _classProperties; + else return nil; +} + + +/*********************************************************************** +* class_copyProtocolList +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +Protocol * __unsafe_unretained * +class_copyProtocolList(Class cls, unsigned int *outCount) +{ + unsigned int count = 0; + Protocol **result = nil; + + if (!cls) { + if (outCount) *outCount = 0; + return nil; + } + + rwlock_reader_t lock(runtimeLock); + + assert(cls->isRealized()); + + count = cls->data()->protocols.count(); + + if (count > 0) { + result = (Protocol **)malloc((count+1) * sizeof(Protocol *)); + + count = 0; + for (const auto& proto : cls->data()->protocols) { + result[count++] = (Protocol *)remapProtocol(proto); + } + result[count] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* _objc_copyClassNamesForImage +* fixme +* Locking: write-locks runtimeLock +**********************************************************************/ +const char ** +_objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount) +{ + size_t count, i, shift; + classref_t *classlist; + const char **names; + + // Need to write-lock in case demangledName() needs to realize a class. + rwlock_writer_t lock(runtimeLock); + + classlist = _getObjc2ClassList(hi, &count); + names = (const char **)malloc((count+1) * sizeof(const char *)); + + shift = 0; + for (i = 0; i < count; i++) { + Class cls = remapClass(classlist[i]); + if (cls) { + names[i-shift] = cls->demangledName(true/*realize*/); + } else { + shift++; // ignored weak-linked class + } + } + count -= shift; + names[count] = nil; + + if (outCount) *outCount = (unsigned int)count; + return names; +} + + +/*********************************************************************** +* saveTemporaryString +* Save a string in a thread-local FIFO buffer. +* This is suitable for temporary strings generated for logging purposes. +**********************************************************************/ +static void +saveTemporaryString(char *str) +{ + // Fixed-size FIFO. We free the first string, shift + // the rest, and add the new string to the end. + _objc_pthread_data *data = _objc_fetch_pthread_data(true); + if (data->printableNames[0]) { + free(data->printableNames[0]); + } + int last = countof(data->printableNames) - 1; + for (int i = 0; i < last; i++) { + data->printableNames[i] = data->printableNames[i+1]; + } + data->printableNames[last] = str; +} + + +/*********************************************************************** +* objc_class::nameForLogging +* Returns the class's name, suitable for display. +* The returned memory is TEMPORARY. Print it or copy it immediately. +* Locking: none +**********************************************************************/ +const char * +objc_class::nameForLogging() +{ + // Handle the easy case directly. + if (isRealized() || isFuture()) { + if (data()->demangledName) return data()->demangledName; + } + + char *result; + + const char *name = mangledName(); + char *de = copySwiftV1DemangledName(name); + if (de) result = de; + else result = strdup(name); + + saveTemporaryString(result); + return result; +} + + +/*********************************************************************** +* objc_class::demangledName +* If realize=false, the class must already be realized or future. +* Locking: If realize=true, runtimeLock must be held for writing by the caller. +**********************************************************************/ +static mutex_t DemangleCacheLock; +static NXHashTable *DemangleCache; +const char * +objc_class::demangledName(bool realize) +{ + // Return previously demangled name if available. + if (isRealized() || isFuture()) { + if (data()->demangledName) return data()->demangledName; + } + + // Try demangling the mangled name. + const char *mangled = mangledName(); + char *de = copySwiftV1DemangledName(mangled); + if (isRealized() || isFuture()) { + // Class is already realized or future. + // Save demangling result in rw data. + // We may not own rwlock for writing so use an atomic operation instead. + if (! OSAtomicCompareAndSwapPtrBarrier(nil, (void*)(de ?: mangled), + (void**)&data()->demangledName)) + { + if (de) free(de); + } + return data()->demangledName; + } + + // Class is not yet realized. + if (!de) { + // Name is not mangled. Return it without caching. + return mangled; + } + + // Class is not yet realized and name is mangled. Realize the class. + // Only objc_copyClassNamesForImage() should get here. + + // fixme lldb's calls to class_getName() can also get here when + // interrogating the dyld shared cache. (rdar://27258517) + // fixme runtimeLock.assertWriting(); + // fixme assert(realize); + + if (realize) { + runtimeLock.assertWriting(); + realizeClass((Class)this); + data()->demangledName = de; + return de; + } + else { + // Save the string to avoid leaks. + char *cached; + { + mutex_locker_t lock(DemangleCacheLock); + if (!DemangleCache) { + DemangleCache = NXCreateHashTable(NXStrPrototype, 0, nil); + } + cached = (char *)NXHashInsertIfAbsent(DemangleCache, de); + } + if (cached != de) free(de); + return cached; + } +} + + +/*********************************************************************** +* class_getName +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +const char *class_getName(Class cls) +{ + if (!cls) return "nil"; + // fixme lldb calls class_getName() on unrealized classes (rdar://27258517) + // assert(cls->isRealized() || cls->isFuture()); + return cls->demangledName(); +} + + +/*********************************************************************** +* class_getVersion +* fixme +* Locking: none +**********************************************************************/ +int +class_getVersion(Class cls) +{ + if (!cls) return 0; + assert(cls->isRealized()); + return cls->data()->version; +} + + +/*********************************************************************** +* class_setVersion +* fixme +* Locking: none +**********************************************************************/ +void +class_setVersion(Class cls, int version) +{ + if (!cls) return; + assert(cls->isRealized()); + cls->data()->version = version; +} + + +static method_t *findMethodInSortedMethodList(SEL key, const method_list_t *list) +{ + assert(list); + + const method_t * const first = &list->first; + const method_t *base = first; + const method_t *probe; + uintptr_t keyValue = (uintptr_t)key; + uint32_t count; + + for (count = list->count; count != 0; count >>= 1) { + probe = base + (count >> 1); + + uintptr_t probeValue = (uintptr_t)probe->name; + + if (keyValue == probeValue) { + // `probe` is a match. + // Rewind looking for the *first* occurrence of this value. + // This is required for correct category overrides. + while (probe > first && keyValue == (uintptr_t)probe[-1].name) { + probe--; + } + return (method_t *)probe; + } + + if (keyValue > probeValue) { + base = probe + 1; + count--; + } + } + + return nil; +} + +/*********************************************************************** +* getMethodNoSuper_nolock +* fixme +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static method_t *search_method_list(const method_list_t *mlist, SEL sel) +{ + int methodListIsFixedUp = mlist->isFixedUp(); + int methodListHasExpectedSize = mlist->entsize() == sizeof(method_t); + + if (__builtin_expect(methodListIsFixedUp && methodListHasExpectedSize, 1)) { + return findMethodInSortedMethodList(sel, mlist); + } else { + // Linear search of unsorted method list + for (auto& meth : *mlist) { + if (meth.name == sel) return &meth; + } + } + +#if DEBUG + // sanity-check negative results + if (mlist->isFixedUp()) { + for (auto& meth : *mlist) { + if (meth.name == sel) { + _objc_fatal("linear search worked when binary search did not"); + } + } + } +#endif + + return nil; +} + +static method_t * +getMethodNoSuper_nolock(Class cls, SEL sel) +{ + runtimeLock.assertLocked(); + + assert(cls->isRealized()); + // fixme nil cls? + // fixme nil sel? + + for (auto mlists = cls->data()->methods.beginLists(), + end = cls->data()->methods.endLists(); + mlists != end; + ++mlists) + { + method_t *m = search_method_list(*mlists, sel); + if (m) return m; + } + + return nil; +} + + +/*********************************************************************** +* getMethod_nolock +* fixme +* Locking: runtimeLock must be read- or write-locked by the caller +**********************************************************************/ +static method_t * +getMethod_nolock(Class cls, SEL sel) +{ + method_t *m = nil; + + runtimeLock.assertLocked(); + + // fixme nil cls? + // fixme nil sel? + + assert(cls->isRealized()); + + while (cls && ((m = getMethodNoSuper_nolock(cls, sel))) == nil) { + cls = cls->superclass; + } + + return m; +} + + +/*********************************************************************** +* _class_getMethod +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +static Method _class_getMethod(Class cls, SEL sel) +{ + rwlock_reader_t lock(runtimeLock); + return getMethod_nolock(cls, sel); +} + + +/*********************************************************************** +* class_getInstanceMethod. Return the instance method for the +* specified class and selector. +**********************************************************************/ +Method class_getInstanceMethod(Class cls, SEL sel) +{ + if (!cls || !sel) return nil; + + // This deliberately avoids +initialize because it historically did so. + + // This implementation is a bit weird because it's the only place that + // wants a Method instead of an IMP. + +#warning fixme build and search caches + + // Search method lists, try method resolver, etc. + lookUpImpOrNil(cls, sel, nil, + NO/*initialize*/, NO/*cache*/, YES/*resolver*/); + +#warning fixme build and search caches + + return _class_getMethod(cls, sel); +} + + +/*********************************************************************** +* log_and_fill_cache +* Log this method call. If the logger permits it, fill the method cache. +* cls is the method whose cache should be filled. +* implementer is the class that owns the implementation in question. +**********************************************************************/ +static void +log_and_fill_cache(Class cls, IMP imp, SEL sel, id receiver, Class implementer) +{ +#if SUPPORT_MESSAGE_LOGGING + if (objcMsgLogEnabled) { + bool cacheIt = logMessageSend(implementer->isMetaClass(), + cls->nameForLogging(), + implementer->nameForLogging(), + sel); + if (!cacheIt) return; + } +#endif + cache_fill (cls, sel, imp, receiver); +} + + +/*********************************************************************** +* _class_lookupMethodAndLoadCache. +* Method lookup for dispatchers ONLY. OTHER CODE SHOULD USE lookUpImp(). +* This lookup avoids optimistic cache scan because the dispatcher +* already tried that. +**********************************************************************/ +IMP _class_lookupMethodAndLoadCache3(id obj, SEL sel, Class cls) +{ + return lookUpImpOrForward(cls, sel, obj, + YES/*initialize*/, NO/*cache*/, YES/*resolver*/); +} + + +/*********************************************************************** +* lookUpImpOrForward. +* The standard IMP lookup. +* initialize==NO tries to avoid +initialize (but sometimes fails) +* cache==NO skips optimistic unlocked lookup (but uses cache elsewhere) +* Most callers should use initialize==YES and cache==YES. +* inst is an instance of cls or a subclass thereof, or nil if none is known. +* If cls is an un-initialized metaclass then a non-nil inst is faster. +* May return _objc_msgForward_impcache. IMPs destined for external use +* must be converted to _objc_msgForward or _objc_msgForward_stret. +* If you don't want forwarding at all, use lookUpImpOrNil() instead. +**********************************************************************/ +IMP lookUpImpOrForward(Class cls, SEL sel, id inst, + bool initialize, bool cache, bool resolver) +{ + Class curClass; + IMP imp = nil; + Method meth; + bool triedResolver = NO; + + runtimeLock.assertUnlocked(); + + // Optimistic cache lookup + if (cache) { + imp = cache_getImp(cls, sel); + if (imp) return imp; + } + + if (!cls->isRealized()) { + rwlock_writer_t lock(runtimeLock); + realizeClass(cls); + } + + if (initialize && !cls->isInitialized()) { + _class_initialize (_class_getNonMetaClass(cls, inst)); + // If sel == initialize, _class_initialize will send +initialize and + // then the messenger will send +initialize again after this + // procedure finishes. Of course, if this is not being called + // from the messenger then it won't happen. 2778172 + } + + // The lock is held to make method-lookup + cache-fill atomic + // with respect to method addition. Otherwise, a category could + // be added but ignored indefinitely because the cache was re-filled + // with the old value after the cache flush on behalf of the category. + retry: + runtimeLock.read(); + + // Try this class's cache. + + imp = cache_getImp(cls, sel); + if (imp) goto done; + + // Try this class's method lists. + + meth = getMethodNoSuper_nolock(cls, sel); + if (meth) { + log_and_fill_cache(cls, meth->imp, sel, inst, cls); + imp = meth->imp; + goto done; + } + + // Try superclass caches and method lists. + + curClass = cls; + while ((curClass = curClass->superclass)) { + // Superclass cache. + imp = cache_getImp(curClass, sel); + if (imp) { + if (imp != (IMP)_objc_msgForward_impcache) { + // Found the method in a superclass. Cache it in this class. + log_and_fill_cache(cls, imp, sel, inst, curClass); + goto done; + } + else { + // Found a forward:: entry in a superclass. + // Stop searching, but don't cache yet; call method + // resolver for this class first. + break; + } + } + + // Superclass method list. + meth = getMethodNoSuper_nolock(curClass, sel); + if (meth) { + log_and_fill_cache(cls, meth->imp, sel, inst, curClass); + imp = meth->imp; + goto done; + } + } + + // No implementation found. Try method resolver once. + + if (resolver && !triedResolver) { + runtimeLock.unlockRead(); + _class_resolveMethod(cls, sel, inst); + // Don't cache the result; we don't hold the lock so it may have + // changed already. Re-do the search from scratch instead. + triedResolver = YES; + goto retry; + } + + // No implementation found, and method resolver didn't help. + // Use forwarding. + + imp = (IMP)_objc_msgForward_impcache; + cache_fill(cls, sel, imp, inst); + + done: + runtimeLock.unlockRead(); + + return imp; +} + + +/*********************************************************************** +* lookUpImpOrNil. +* Like lookUpImpOrForward, but returns nil instead of _objc_msgForward_impcache +**********************************************************************/ +IMP lookUpImpOrNil(Class cls, SEL sel, id inst, + bool initialize, bool cache, bool resolver) +{ + IMP imp = lookUpImpOrForward(cls, sel, inst, initialize, cache, resolver); + if (imp == _objc_msgForward_impcache) return nil; + else return imp; +} + + +/*********************************************************************** +* lookupMethodInClassAndLoadCache. +* Like _class_lookupMethodAndLoadCache, but does not search superclasses. +* Caches and returns objc_msgForward if the method is not found in the class. +**********************************************************************/ +IMP lookupMethodInClassAndLoadCache(Class cls, SEL sel) +{ + Method meth; + IMP imp; + + // fixme this is incomplete - no resolver, +initialize - + // but it's only used for .cxx_construct/destruct so we don't care + assert(sel == SEL_cxx_construct || sel == SEL_cxx_destruct); + + // Search cache first. + imp = cache_getImp(cls, sel); + if (imp) return imp; + + // Cache miss. Search method list. + + rwlock_reader_t lock(runtimeLock); + + meth = getMethodNoSuper_nolock(cls, sel); + + if (meth) { + // Hit in method list. Cache it. + cache_fill(cls, sel, meth->imp, nil); + return meth->imp; + } else { + // Miss in method list. Cache objc_msgForward. + cache_fill(cls, sel, _objc_msgForward_impcache, nil); + return _objc_msgForward_impcache; + } +} + + +/*********************************************************************** +* class_getProperty +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +objc_property_t class_getProperty(Class cls, const char *name) +{ + if (!cls || !name) return nil; + + rwlock_reader_t lock(runtimeLock); + + assert(cls->isRealized()); + + for ( ; cls; cls = cls->superclass) { + for (auto& prop : cls->data()->properties) { + if (0 == strcmp(name, prop.name)) { + return (objc_property_t)∝ + } + } + } + + return nil; +} + + +/*********************************************************************** +* Locking: fixme +**********************************************************************/ + +Class gdb_class_getClass(Class cls) +{ + const char *className = cls->mangledName(); + if(!className || !strlen(className)) return Nil; + Class rCls = look_up_class(className, NO, NO); + return rCls; +} + +Class gdb_object_getClass(id obj) +{ + if (!obj) return nil; + return gdb_class_getClass(obj->getIsa()); +} + + +/*********************************************************************** +* Locking: write-locks runtimeLock +**********************************************************************/ +void +objc_class::setInitialized() +{ + Class metacls; + Class cls; + + assert(!isMetaClass()); + + cls = (Class)this; + metacls = cls->ISA(); + + rwlock_reader_t lock(runtimeLock); + + // Scan metaclass for custom AWZ. + // Scan metaclass for custom RR. + // Scan class for custom RR. + // Also print custom RR/AWZ because we probably haven't done it yet. + + // Special cases: + // NSObject AWZ class methods are default. + // NSObject RR instance methods are default. + // updateCustomRR_AWZ() also knows these special cases. + // attachMethodLists() also knows these special cases. + + bool inherited; + bool metaCustomAWZ = NO; + if (MetaclassNSObjectAWZSwizzled) { + // Somebody already swizzled NSObject's methods + metaCustomAWZ = YES; + inherited = NO; + } + else if (metacls == classNSObject()->ISA()) { + // NSObject's metaclass AWZ is default, but we still need to check cats + auto& methods = metacls->data()->methods; + for (auto mlists = methods.beginCategoryMethodLists(), + end = methods.endCategoryMethodLists(metacls); + mlists != end; + ++mlists) + { + if (methodListImplementsAWZ(*mlists)) { + metaCustomAWZ = YES; + inherited = NO; + break; + } + } + } + else if (metacls->superclass->hasCustomAWZ()) { + // Superclass is custom AWZ, therefore we are too. + metaCustomAWZ = YES; + inherited = YES; + } + else { + // Not metaclass NSObject. + auto& methods = metacls->data()->methods; + for (auto mlists = methods.beginLists(), + end = methods.endLists(); + mlists != end; + ++mlists) + { + if (methodListImplementsAWZ(*mlists)) { + metaCustomAWZ = YES; + inherited = NO; + break; + } + } + } + if (!metaCustomAWZ) metacls->setHasDefaultAWZ(); + + if (PrintCustomAWZ && metaCustomAWZ) metacls->printCustomAWZ(inherited); + // metacls->printCustomRR(); + + + bool clsCustomRR = NO; + if (ClassNSObjectRRSwizzled) { + // Somebody already swizzled NSObject's methods + clsCustomRR = YES; + inherited = NO; + } + if (cls == classNSObject()) { + // NSObject's RR is default, but we still need to check categories + auto& methods = cls->data()->methods; + for (auto mlists = methods.beginCategoryMethodLists(), + end = methods.endCategoryMethodLists(cls); + mlists != end; + ++mlists) + { + if (methodListImplementsRR(*mlists)) { + clsCustomRR = YES; + inherited = NO; + break; + } + } + } + else if (!cls->superclass) { + // Custom root class + clsCustomRR = YES; + inherited = NO; + } + else if (cls->superclass->hasCustomRR()) { + // Superclass is custom RR, therefore we are too. + clsCustomRR = YES; + inherited = YES; + } + else { + // Not class NSObject. + auto& methods = cls->data()->methods; + for (auto mlists = methods.beginLists(), + end = methods.endLists(); + mlists != end; + ++mlists) + { + if (methodListImplementsRR(*mlists)) { + clsCustomRR = YES; + inherited = NO; + break; + } + } + } + if (!clsCustomRR) cls->setHasDefaultRR(); + + // cls->printCustomAWZ(); + if (PrintCustomRR && clsCustomRR) cls->printCustomRR(inherited); + + // Update the +initialize flags. + // Do this last. + metacls->changeInfo(RW_INITIALIZED, RW_INITIALIZING); +} + + +/*********************************************************************** +* Return YES if sel is used by retain/release implementors +**********************************************************************/ +static bool +isRRSelector(SEL sel) +{ + return (sel == SEL_retain || sel == SEL_release || + sel == SEL_autorelease || sel == SEL_retainCount || + sel == SEL_tryRetain || sel == SEL_retainWeakReference || + sel == SEL_isDeallocating || sel == SEL_allowsWeakReference); +} + + +/*********************************************************************** +* Return YES if mlist implements one of the isRRSelector() methods +**********************************************************************/ +static bool +methodListImplementsRR(const method_list_t *mlist) +{ + return (search_method_list(mlist, SEL_retain) || + search_method_list(mlist, SEL_release) || + search_method_list(mlist, SEL_autorelease) || + search_method_list(mlist, SEL_retainCount) || + search_method_list(mlist, SEL_tryRetain) || + search_method_list(mlist, SEL_isDeallocating) || + search_method_list(mlist, SEL_retainWeakReference) || + search_method_list(mlist, SEL_allowsWeakReference)); +} + + +/*********************************************************************** +* Return YES if sel is used by alloc or allocWithZone implementors +**********************************************************************/ +static bool +isAWZSelector(SEL sel) +{ + return (sel == SEL_allocWithZone || sel == SEL_alloc); +} + + +/*********************************************************************** +* Return YES if mlist implements one of the isAWZSelector() methods +**********************************************************************/ +static bool +methodListImplementsAWZ(const method_list_t *mlist) +{ + return (search_method_list(mlist, SEL_allocWithZone) || + search_method_list(mlist, SEL_alloc)); +} + + +void +objc_class::printCustomRR(bool inherited) +{ + assert(PrintCustomRR); + assert(hasCustomRR()); + _objc_inform("CUSTOM RR: %s%s%s", nameForLogging(), + isMetaClass() ? " (meta)" : "", + inherited ? " (inherited)" : ""); +} + +void +objc_class::printCustomAWZ(bool inherited) +{ + assert(PrintCustomAWZ); + assert(hasCustomAWZ()); + _objc_inform("CUSTOM AWZ: %s%s%s", nameForLogging(), + isMetaClass() ? " (meta)" : "", + inherited ? " (inherited)" : ""); +} + +void +objc_class::printInstancesRequireRawIsa(bool inherited) +{ + assert(PrintRawIsa); + assert(instancesRequireRawIsa()); + _objc_inform("RAW ISA: %s%s%s", nameForLogging(), + isMetaClass() ? " (meta)" : "", + inherited ? " (inherited)" : ""); +} + + +/*********************************************************************** +* Mark this class and all of its subclasses as implementors or +* inheritors of custom RR (retain/release/autorelease/retainCount) +**********************************************************************/ +void objc_class::setHasCustomRR(bool inherited) +{ + Class cls = (Class)this; + runtimeLock.assertWriting(); + + if (hasCustomRR()) return; + + foreach_realized_class_and_subclass(cls, ^(Class c){ + if (c != cls && !c->isInitialized()) { + // Subclass not yet initialized. Wait for setInitialized() to do it + // fixme short circuit recursion? + return; + } + if (c->hasCustomRR()) { + // fixme short circuit recursion? + return; + } + + c->bits.setHasCustomRR(); + + if (PrintCustomRR) c->printCustomRR(inherited || c != cls); + }); +} + +/*********************************************************************** +* Mark this class and all of its subclasses as implementors or +* inheritors of custom alloc/allocWithZone: +**********************************************************************/ +void objc_class::setHasCustomAWZ(bool inherited) +{ + Class cls = (Class)this; + runtimeLock.assertWriting(); + + if (hasCustomAWZ()) return; + + foreach_realized_class_and_subclass(cls, ^(Class c){ + if (c != cls && !c->isInitialized()) { + // Subclass not yet initialized. Wait for setInitialized() to do it + // fixme short circuit recursion? + return; + } + if (c->hasCustomAWZ()) { + // fixme short circuit recursion? + return; + } + + c->bits.setHasCustomAWZ(); + + if (PrintCustomAWZ) c->printCustomAWZ(inherited || c != cls); + }); +} + + +/*********************************************************************** +* Mark this class and all of its subclasses as requiring raw isa pointers +**********************************************************************/ +void objc_class::setInstancesRequireRawIsa(bool inherited) +{ + Class cls = (Class)this; + runtimeLock.assertWriting(); + + if (instancesRequireRawIsa()) return; + + foreach_realized_class_and_subclass(cls, ^(Class c){ + if (c->instancesRequireRawIsa()) { + // fixme short circuit recursion? + return; + } + + c->bits.setInstancesRequireRawIsa(); + + if (PrintRawIsa) c->printInstancesRequireRawIsa(inherited || c != cls); + }); +} + + +/*********************************************************************** +* Choose a class index. +* Set instancesRequireRawIsa if no more class indexes are available. +**********************************************************************/ +void objc_class::chooseClassArrayIndex() +{ +#if SUPPORT_INDEXED_ISA + Class cls = (Class)this; + runtimeLock.assertWriting(); + + if (objc_indexed_classes_count >= ISA_INDEX_COUNT) { + // No more indexes available. + assert(cls->classArrayIndex() == 0); + cls->setInstancesRequireRawIsa(false/*not inherited*/); + return; + } + + unsigned index = objc_indexed_classes_count++; + if (index == 0) index = objc_indexed_classes_count++; // index 0 is unused + classForIndex(index) = cls; + cls->setClassArrayIndex(index); +#endif +} + + +/*********************************************************************** +* Update custom RR and AWZ when a method changes its IMP +**********************************************************************/ +static void +updateCustomRR_AWZ(Class cls, method_t *meth) +{ + // In almost all cases, IMP swizzling does not affect custom RR/AWZ bits. + // Custom RR/AWZ search will already find the method whether or not + // it is swizzled, so it does not transition from non-custom to custom. + // + // The only cases where IMP swizzling can affect the RR/AWZ bits is + // if the swizzled method is one of the methods that is assumed to be + // non-custom. These special cases are listed in setInitialized(). + // We look for such cases here. + + if (isRRSelector(meth->name)) { + + if ((classNSObject()->isInitialized() && + classNSObject()->hasCustomRR()) + || + ClassNSObjectRRSwizzled) + { + // already custom, nothing would change + return; + } + + bool swizzlingNSObject = NO; + if (cls == classNSObject()) { + swizzlingNSObject = YES; + } else { + // Don't know the class. + // The only special case is class NSObject. + for (const auto& meth2 : classNSObject()->data()->methods) { + if (meth == &meth2) { + swizzlingNSObject = YES; + break; + } + } + } + if (swizzlingNSObject) { + if (classNSObject()->isInitialized()) { + classNSObject()->setHasCustomRR(); + } else { + // NSObject not yet +initialized, so custom RR has not yet + // been checked, and setInitialized() will not notice the + // swizzle. + ClassNSObjectRRSwizzled = YES; + } + } + } + else if (isAWZSelector(meth->name)) { + Class metaclassNSObject = classNSObject()->ISA(); + + if ((metaclassNSObject->isInitialized() && + metaclassNSObject->hasCustomAWZ()) + || + MetaclassNSObjectAWZSwizzled) + { + // already custom, nothing would change + return; + } + + bool swizzlingNSObject = NO; + if (cls == metaclassNSObject) { + swizzlingNSObject = YES; + } else { + // Don't know the class. + // The only special case is metaclass NSObject. + for (const auto& meth2 : metaclassNSObject->data()->methods) { + if (meth == &meth2) { + swizzlingNSObject = YES; + break; + } + } + } + if (swizzlingNSObject) { + if (metaclassNSObject->isInitialized()) { + metaclassNSObject->setHasCustomAWZ(); + } else { + // NSObject not yet +initialized, so custom RR has not yet + // been checked, and setInitialized() will not notice the + // swizzle. + MetaclassNSObjectAWZSwizzled = YES; + } + } + } +} + + +/*********************************************************************** +* class_getIvarLayout +* Called by the garbage collector. +* The class must be nil or already realized. +* Locking: none +**********************************************************************/ +const uint8_t * +class_getIvarLayout(Class cls) +{ + if (cls) return cls->data()->ro->ivarLayout; + else return nil; +} + + +/*********************************************************************** +* class_getWeakIvarLayout +* Called by the garbage collector. +* The class must be nil or already realized. +* Locking: none +**********************************************************************/ +const uint8_t * +class_getWeakIvarLayout(Class cls) +{ + if (cls) return cls->data()->ro->weakIvarLayout; + else return nil; +} + + +/*********************************************************************** +* class_setIvarLayout +* Changes the class's ivar layout. +* nil layout means no unscanned ivars +* The class must be under construction. +* fixme: sanity-check layout vs instance size? +* fixme: sanity-check layout vs superclass? +* Locking: acquires runtimeLock +**********************************************************************/ +void +class_setIvarLayout(Class cls, const uint8_t *layout) +{ + if (!cls) return; + + rwlock_writer_t lock(runtimeLock); + + // Can only change layout of in-construction classes. + // note: if modifications to post-construction classes were + // allowed, there would be a race below (us vs. concurrent object_setIvar) + if (!(cls->data()->flags & RW_CONSTRUCTING)) { + _objc_inform("*** Can't set ivar layout for already-registered " + "class '%s'", cls->nameForLogging()); + return; + } + + class_ro_t *ro_w = make_ro_writeable(cls->data()); + + try_free(ro_w->ivarLayout); + ro_w->ivarLayout = ustrdupMaybeNil(layout); +} + +// SPI: Instance-specific object layout. + +void +_class_setIvarLayoutAccessor(Class cls, const uint8_t* (*accessor) (id object)) { + if (!cls) return; + + rwlock_writer_t lock(runtimeLock); + + class_ro_t *ro_w = make_ro_writeable(cls->data()); + + // FIXME: this really isn't safe to free if there are instances of this class already. + if (!(cls->data()->flags & RW_HAS_INSTANCE_SPECIFIC_LAYOUT)) try_free(ro_w->ivarLayout); + ro_w->ivarLayout = (uint8_t *)accessor; + cls->setInfo(RW_HAS_INSTANCE_SPECIFIC_LAYOUT); +} + +const uint8_t * +_object_getIvarLayout(Class cls, id object) +{ + if (cls) { + const uint8_t* layout = cls->data()->ro->ivarLayout; + if (cls->data()->flags & RW_HAS_INSTANCE_SPECIFIC_LAYOUT) { + const uint8_t* (*accessor) (id object) = (const uint8_t* (*)(id))layout; + layout = accessor(object); + } + return layout; + } + return nil; +} + +/*********************************************************************** +* class_setWeakIvarLayout +* Changes the class's weak ivar layout. +* nil layout means no weak ivars +* The class must be under construction. +* fixme: sanity-check layout vs instance size? +* fixme: sanity-check layout vs superclass? +* Locking: acquires runtimeLock +**********************************************************************/ +void +class_setWeakIvarLayout(Class cls, const uint8_t *layout) +{ + if (!cls) return; + + rwlock_writer_t lock(runtimeLock); + + // Can only change layout of in-construction classes. + // note: if modifications to post-construction classes were + // allowed, there would be a race below (us vs. concurrent object_setIvar) + if (!(cls->data()->flags & RW_CONSTRUCTING)) { + _objc_inform("*** Can't set weak ivar layout for already-registered " + "class '%s'", cls->nameForLogging()); + return; + } + + class_ro_t *ro_w = make_ro_writeable(cls->data()); + + try_free(ro_w->weakIvarLayout); + ro_w->weakIvarLayout = ustrdupMaybeNil(layout); +} + + +/*********************************************************************** +* getIvar +* Look up an ivar by name. +* Locking: runtimeLock must be read- or write-locked by the caller. +**********************************************************************/ +static ivar_t *getIvar(Class cls, const char *name) +{ + runtimeLock.assertLocked(); + + const ivar_list_t *ivars; + assert(cls->isRealized()); + if ((ivars = cls->data()->ro->ivars)) { + for (auto& ivar : *ivars) { + if (!ivar.offset) continue; // anonymous bitfield + + // ivar.name may be nil for anonymous bitfields etc. + if (ivar.name && 0 == strcmp(name, ivar.name)) { + return &ivar; + } + } + } + + return nil; +} + + +/*********************************************************************** +* _class_getClassForIvar +* Given a class and an ivar that is in it or one of its superclasses, +* find the actual class that defined the ivar. +**********************************************************************/ +Class _class_getClassForIvar(Class cls, Ivar ivar) +{ + rwlock_reader_t lock(runtimeLock); + + for ( ; cls; cls = cls->superclass) { + if (auto ivars = cls->data()->ro->ivars) { + if (ivars->containsIvar(ivar)) { + return cls; + } + } + } + + return nil; +} + + +/*********************************************************************** +* _class_getVariable +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +Ivar +_class_getVariable(Class cls, const char *name) +{ + rwlock_reader_t lock(runtimeLock); + + for ( ; cls; cls = cls->superclass) { + ivar_t *ivar = getIvar(cls, name); + if (ivar) { + return ivar; + } + } + + return nil; +} + + +/*********************************************************************** +* class_conformsToProtocol +* fixme +* Locking: read-locks runtimeLock +**********************************************************************/ +BOOL class_conformsToProtocol(Class cls, Protocol *proto_gen) +{ + protocol_t *proto = newprotocol(proto_gen); + + if (!cls) return NO; + if (!proto_gen) return NO; + + rwlock_reader_t lock(runtimeLock); + + assert(cls->isRealized()); + + for (const auto& proto_ref : cls->data()->protocols) { + protocol_t *p = remapProtocol(proto_ref); + if (p == proto || protocol_conformsToProtocol_nolock(p, proto)) { + return YES; + } + } + + return NO; +} + + +/********************************************************************** +* addMethod +* fixme +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static IMP +addMethod(Class cls, SEL name, IMP imp, const char *types, bool replace) +{ + IMP result = nil; + + runtimeLock.assertWriting(); + + assert(types); + assert(cls->isRealized()); + + method_t *m; + if ((m = getMethodNoSuper_nolock(cls, name))) { + // already exists + if (!replace) { + result = m->imp; + } else { + result = _method_setImplementation(cls, m, imp); + } + } else { + // fixme optimize + method_list_t *newlist; + newlist = (method_list_t *)calloc(sizeof(*newlist), 1); + newlist->entsizeAndFlags = + (uint32_t)sizeof(method_t) | fixed_up_method_list; + newlist->count = 1; + newlist->first.name = name; + newlist->first.types = strdupIfMutable(types); + newlist->first.imp = imp; + + prepareMethodLists(cls, &newlist, 1, NO, NO); + cls->data()->methods.attachLists(&newlist, 1); + flushCaches(cls); + + result = nil; + } + + return result; +} + + +BOOL +class_addMethod(Class cls, SEL name, IMP imp, const char *types) +{ + if (!cls) return NO; + + rwlock_writer_t lock(runtimeLock); + return ! addMethod(cls, name, imp, types ?: "", NO); +} + + +IMP +class_replaceMethod(Class cls, SEL name, IMP imp, const char *types) +{ + if (!cls) return nil; + + rwlock_writer_t lock(runtimeLock); + return addMethod(cls, name, imp, types ?: "", YES); +} + + +/*********************************************************************** +* class_addIvar +* Adds an ivar to a class. +* Locking: acquires runtimeLock +**********************************************************************/ +BOOL +class_addIvar(Class cls, const char *name, size_t size, + uint8_t alignment, const char *type) +{ + if (!cls) return NO; + + if (!type) type = ""; + if (name && 0 == strcmp(name, "")) name = nil; + + rwlock_writer_t lock(runtimeLock); + + assert(cls->isRealized()); + + // No class variables + if (cls->isMetaClass()) { + return NO; + } + + // Can only add ivars to in-construction classes. + if (!(cls->data()->flags & RW_CONSTRUCTING)) { + return NO; + } + + // Check for existing ivar with this name, unless it's anonymous. + // Check for too-big ivar. + // fixme check for superclass ivar too? + if ((name && getIvar(cls, name)) || size > UINT32_MAX) { + return NO; + } + + class_ro_t *ro_w = make_ro_writeable(cls->data()); + + // fixme allocate less memory here + + ivar_list_t *oldlist, *newlist; + if ((oldlist = (ivar_list_t *)cls->data()->ro->ivars)) { + size_t oldsize = oldlist->byteSize(); + newlist = (ivar_list_t *)calloc(oldsize + oldlist->entsize(), 1); + memcpy(newlist, oldlist, oldsize); + free(oldlist); + } else { + newlist = (ivar_list_t *)calloc(sizeof(ivar_list_t), 1); + newlist->entsizeAndFlags = (uint32_t)sizeof(ivar_t); + } + + uint32_t offset = cls->unalignedInstanceSize(); + uint32_t alignMask = (1<get(newlist->count++); +#if __x86_64__ + // Deliberately over-allocate the ivar offset variable. + // Use calloc() to clear all 64 bits. See the note in struct ivar_t. + ivar.offset = (int32_t *)(int64_t *)calloc(sizeof(int64_t), 1); +#else + ivar.offset = (int32_t *)malloc(sizeof(int32_t)); +#endif + *ivar.offset = offset; + ivar.name = name ? strdupIfMutable(name) : nil; + ivar.type = strdupIfMutable(type); + ivar.alignment_raw = alignment; + ivar.size = (uint32_t)size; + + ro_w->ivars = newlist; + cls->setInstanceSize((uint32_t)(offset + size)); + + // Ivar layout updated in registerClass. + + return YES; +} + + +/*********************************************************************** +* class_addProtocol +* Adds a protocol to a class. +* Locking: acquires runtimeLock +**********************************************************************/ +BOOL class_addProtocol(Class cls, Protocol *protocol_gen) +{ + protocol_t *protocol = newprotocol(protocol_gen); + + if (!cls) return NO; + if (class_conformsToProtocol(cls, protocol_gen)) return NO; + + rwlock_writer_t lock(runtimeLock); + + assert(cls->isRealized()); + + // fixme optimize + protocol_list_t *protolist = (protocol_list_t *) + malloc(sizeof(protocol_list_t) + sizeof(protocol_t *)); + protolist->count = 1; + protolist->list[0] = (protocol_ref_t)protocol; + + cls->data()->protocols.attachLists(&protolist, 1); + + // fixme metaclass? + + return YES; +} + + +/*********************************************************************** +* class_addProperty +* Adds a property to a class. +* Locking: acquires runtimeLock +**********************************************************************/ +static bool +_class_addProperty(Class cls, const char *name, + const objc_property_attribute_t *attrs, unsigned int count, + bool replace) +{ + if (!cls) return NO; + if (!name) return NO; + + property_t *prop = class_getProperty(cls, name); + if (prop && !replace) { + // already exists, refuse to replace + return NO; + } + else if (prop) { + // replace existing + rwlock_writer_t lock(runtimeLock); + try_free(prop->attributes); + prop->attributes = copyPropertyAttributeString(attrs, count); + return YES; + } + else { + rwlock_writer_t lock(runtimeLock); + + assert(cls->isRealized()); + + property_list_t *proplist = (property_list_t *) + malloc(sizeof(*proplist)); + proplist->count = 1; + proplist->entsizeAndFlags = sizeof(proplist->first); + proplist->first.name = strdupIfMutable(name); + proplist->first.attributes = copyPropertyAttributeString(attrs, count); + + cls->data()->properties.attachLists(&proplist, 1); + + return YES; + } +} + +BOOL +class_addProperty(Class cls, const char *name, + const objc_property_attribute_t *attrs, unsigned int n) +{ + return _class_addProperty(cls, name, attrs, n, NO); +} + +void +class_replaceProperty(Class cls, const char *name, + const objc_property_attribute_t *attrs, unsigned int n) +{ + _class_addProperty(cls, name, attrs, n, YES); +} + + +/*********************************************************************** +* look_up_class +* Look up a class by name, and realize it. +* Locking: acquires runtimeLock +**********************************************************************/ +Class +look_up_class(const char *name, + bool includeUnconnected __attribute__((unused)), + bool includeClassHandler __attribute__((unused))) +{ + if (!name) return nil; + + Class result; + bool unrealized; + { + rwlock_reader_t lock(runtimeLock); + result = getClass(name); + unrealized = result && !result->isRealized(); + } + if (unrealized) { + rwlock_writer_t lock(runtimeLock); + realizeClass(result); + } + return result; +} + + +/*********************************************************************** +* objc_duplicateClass +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +Class +objc_duplicateClass(Class original, const char *name, + size_t extraBytes) +{ + Class duplicate; + + rwlock_writer_t lock(runtimeLock); + + assert(original->isRealized()); + assert(!original->isMetaClass()); + + duplicate = alloc_class_for_subclass(original, extraBytes); + + duplicate->initClassIsa(original->ISA()); + duplicate->superclass = original->superclass; + + duplicate->cache.initializeToEmpty(); + + class_rw_t *rw = (class_rw_t *)calloc(sizeof(*original->data()), 1); + rw->flags = (original->data()->flags | RW_COPIED_RO | RW_REALIZING); + rw->version = original->data()->version; + rw->firstSubclass = nil; + rw->nextSiblingClass = nil; + + duplicate->bits = original->bits; + duplicate->setData(rw); + + rw->ro = (class_ro_t *) + memdup(original->data()->ro, sizeof(*original->data()->ro)); + *(char **)&rw->ro->name = strdupIfMutable(name); + + rw->methods = original->data()->methods.duplicate(); + + // fixme dies when categories are added to the base + rw->properties = original->data()->properties; + rw->protocols = original->data()->protocols; + + duplicate->chooseClassArrayIndex(); + + if (duplicate->superclass) { + addSubclass(duplicate->superclass, duplicate); + // duplicate->isa == original->isa so don't addSubclass() for it + } else { + addRootClass(duplicate); + } + + // Don't methodize class - construction above is correct + + addNamedClass(duplicate, duplicate->data()->ro->name); + + if (PrintConnecting) { + _objc_inform("CLASS: realizing class '%s' (duplicate of %s) %p %p", + name, original->nameForLogging(), + (void*)duplicate, duplicate->data()->ro); + } + + duplicate->clearInfo(RW_REALIZING); + + return duplicate; +} + +/*********************************************************************** +* objc_initializeClassPair +* Locking: runtimeLock must be write-locked by the caller +**********************************************************************/ + +// &UnsetLayout is the default ivar layout during class construction +static const uint8_t UnsetLayout = 0; + +static void objc_initializeClassPair_internal(Class superclass, const char *name, Class cls, Class meta) +{ + runtimeLock.assertWriting(); + + class_ro_t *cls_ro_w, *meta_ro_w; + + cls->setData((class_rw_t *)calloc(sizeof(class_rw_t), 1)); + meta->setData((class_rw_t *)calloc(sizeof(class_rw_t), 1)); + cls_ro_w = (class_ro_t *)calloc(sizeof(class_ro_t), 1); + meta_ro_w = (class_ro_t *)calloc(sizeof(class_ro_t), 1); + cls->data()->ro = cls_ro_w; + meta->data()->ro = meta_ro_w; + + // Set basic info + + cls->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED | RW_REALIZING; + meta->data()->flags = RW_CONSTRUCTING | RW_COPIED_RO | RW_REALIZED | RW_REALIZING; + cls->data()->version = 0; + meta->data()->version = 7; + + cls_ro_w->flags = 0; + meta_ro_w->flags = RO_META; + if (!superclass) { + cls_ro_w->flags |= RO_ROOT; + meta_ro_w->flags |= RO_ROOT; + } + if (superclass) { + cls_ro_w->instanceStart = superclass->unalignedInstanceSize(); + meta_ro_w->instanceStart = superclass->ISA()->unalignedInstanceSize(); + cls->setInstanceSize(cls_ro_w->instanceStart); + meta->setInstanceSize(meta_ro_w->instanceStart); + } else { + cls_ro_w->instanceStart = 0; + meta_ro_w->instanceStart = (uint32_t)sizeof(objc_class); + cls->setInstanceSize((uint32_t)sizeof(id)); // just an isa + meta->setInstanceSize(meta_ro_w->instanceStart); + } + + cls_ro_w->name = strdupIfMutable(name); + meta_ro_w->name = strdupIfMutable(name); + + cls_ro_w->ivarLayout = &UnsetLayout; + cls_ro_w->weakIvarLayout = &UnsetLayout; + + meta->chooseClassArrayIndex(); + cls->chooseClassArrayIndex(); + + // Connect to superclasses and metaclasses + cls->initClassIsa(meta); + if (superclass) { + meta->initClassIsa(superclass->ISA()->ISA()); + cls->superclass = superclass; + meta->superclass = superclass->ISA(); + addSubclass(superclass, cls); + addSubclass(superclass->ISA(), meta); + } else { + meta->initClassIsa(meta); + cls->superclass = Nil; + meta->superclass = cls; + addRootClass(cls); + addSubclass(cls, meta); + } + + cls->cache.initializeToEmpty(); + meta->cache.initializeToEmpty(); +} + + +/*********************************************************************** +* verifySuperclass +* Sanity-check the superclass provided to +* objc_allocateClassPair, objc_initializeClassPair, or objc_readClassPair. +**********************************************************************/ +bool +verifySuperclass(Class superclass, bool rootOK) +{ + if (!superclass) { + // Superclass does not exist. + // If subclass may be a root class, this is OK. + // If subclass must not be a root class, this is bad. + return rootOK; + } + + // Superclass must be realized. + if (! superclass->isRealized()) return false; + + // Superclass must not be under construction. + if (superclass->data()->flags & RW_CONSTRUCTING) return false; + + return true; +} + + +/*********************************************************************** +* objc_initializeClassPair +**********************************************************************/ +Class objc_initializeClassPair(Class superclass, const char *name, Class cls, Class meta) +{ + rwlock_writer_t lock(runtimeLock); + + // Fail if the class name is in use. + // Fail if the superclass isn't kosher. + if (getClass(name) || !verifySuperclass(superclass, true/*rootOK*/)) { + return nil; + } + + objc_initializeClassPair_internal(superclass, name, cls, meta); + + return cls; +} + + +/*********************************************************************** +* objc_allocateClassPair +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +Class objc_allocateClassPair(Class superclass, const char *name, + size_t extraBytes) +{ + Class cls, meta; + + rwlock_writer_t lock(runtimeLock); + + // Fail if the class name is in use. + // Fail if the superclass isn't kosher. + if (getClass(name) || !verifySuperclass(superclass, true/*rootOK*/)) { + return nil; + } + + // Allocate new classes. + cls = alloc_class_for_subclass(superclass, extraBytes); + meta = alloc_class_for_subclass(superclass, extraBytes); + + // fixme mangle the name if it looks swift-y? + objc_initializeClassPair_internal(superclass, name, cls, meta); + + return cls; +} + + +/*********************************************************************** +* objc_registerClassPair +* fixme +* Locking: acquires runtimeLock +**********************************************************************/ +void objc_registerClassPair(Class cls) +{ + rwlock_writer_t lock(runtimeLock); + + if ((cls->data()->flags & RW_CONSTRUCTED) || + (cls->ISA()->data()->flags & RW_CONSTRUCTED)) + { + _objc_inform("objc_registerClassPair: class '%s' was already " + "registered!", cls->data()->ro->name); + return; + } + + if (!(cls->data()->flags & RW_CONSTRUCTING) || + !(cls->ISA()->data()->flags & RW_CONSTRUCTING)) + { + _objc_inform("objc_registerClassPair: class '%s' was not " + "allocated with objc_allocateClassPair!", + cls->data()->ro->name); + return; + } + + // Clear "under construction" bit, set "done constructing" bit + cls->ISA()->changeInfo(RW_CONSTRUCTED, RW_CONSTRUCTING | RW_REALIZING); + cls->changeInfo(RW_CONSTRUCTED, RW_CONSTRUCTING | RW_REALIZING); + + // Add to named class table. + addNamedClass(cls, cls->data()->ro->name); +} + + +/*********************************************************************** +* objc_readClassPair() +* Read a class and metaclass as written by a compiler. +* Assumes the class and metaclass are not referenced by other things +* that might need to be fixed up (such as categories and subclasses). +* Does not call +load. +* Returns the class pointer, or nil. +* +* Locking: runtimeLock acquired by map_images +**********************************************************************/ +Class objc_readClassPair(Class bits, const struct objc_image_info *info) +{ + rwlock_writer_t lock(runtimeLock); + + // No info bits are significant yet. + (void)info; + + // Fail if the class name is in use. + // Fail if the superclass isn't kosher. + const char *name = bits->mangledName(); + bool rootOK = bits->data()->flags & RO_ROOT; + if (getClass(name) || !verifySuperclass(bits->superclass, rootOK)){ + return nil; + } + + Class cls = readClass(bits, false/*bundle*/, false/*shared cache*/); + if (cls != bits) { + // This function isn't allowed to remap anything. + _objc_fatal("objc_readClassPair for class %s changed %p to %p", + cls->nameForLogging(), bits, cls); + } + realizeClass(cls); + + return cls; +} + + +/*********************************************************************** +* detach_class +* Disconnect a class from other data structures. +* Exception: does not remove the class from the +load list +* Call this before free_class. +* Locking: runtimeLock must be held by the caller. +**********************************************************************/ +static void detach_class(Class cls, bool isMeta) +{ + runtimeLock.assertWriting(); + + // categories not yet attached to this class + removeAllUnattachedCategoriesForClass(cls); + + // superclass's subclass list + if (cls->isRealized()) { + Class supercls = cls->superclass; + if (supercls) { + removeSubclass(supercls, cls); + } else { + removeRootClass(cls); + } + } + + // class tables and +load queue + if (!isMeta) { + removeNamedClass(cls, cls->mangledName()); + } +} + + +/*********************************************************************** +* free_class +* Frees a class's data structures. +* Call this after detach_class. +* Locking: runtimeLock must be held by the caller +**********************************************************************/ +static void free_class(Class cls) +{ + runtimeLock.assertWriting(); + + if (! cls->isRealized()) return; + + auto rw = cls->data(); + auto ro = rw->ro; + + cache_delete(cls); + + for (auto& meth : rw->methods) { + try_free(meth.types); + } + rw->methods.tryFree(); + + const ivar_list_t *ivars = ro->ivars; + if (ivars) { + for (auto& ivar : *ivars) { + try_free(ivar.offset); + try_free(ivar.name); + try_free(ivar.type); + } + try_free(ivars); + } + + for (auto& prop : rw->properties) { + try_free(prop.name); + try_free(prop.attributes); + } + rw->properties.tryFree(); + + rw->protocols.tryFree(); + + try_free(ro->ivarLayout); + try_free(ro->weakIvarLayout); + try_free(ro->name); + try_free(ro); + try_free(rw); + try_free(cls); +} + + +void objc_disposeClassPair(Class cls) +{ + rwlock_writer_t lock(runtimeLock); + + if (!(cls->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING)) || + !(cls->ISA()->data()->flags & (RW_CONSTRUCTED|RW_CONSTRUCTING))) + { + // class not allocated with objc_allocateClassPair + // disposing still-unregistered class is OK! + _objc_inform("objc_disposeClassPair: class '%s' was not " + "allocated with objc_allocateClassPair!", + cls->data()->ro->name); + return; + } + + if (cls->isMetaClass()) { + _objc_inform("objc_disposeClassPair: class '%s' is a metaclass, " + "not a class!", cls->data()->ro->name); + return; + } + + // Shouldn't have any live subclasses. + if (cls->data()->firstSubclass) { + _objc_inform("objc_disposeClassPair: class '%s' still has subclasses, " + "including '%s'!", cls->data()->ro->name, + cls->data()->firstSubclass->nameForLogging()); + } + if (cls->ISA()->data()->firstSubclass) { + _objc_inform("objc_disposeClassPair: class '%s' still has subclasses, " + "including '%s'!", cls->data()->ro->name, + cls->ISA()->data()->firstSubclass->nameForLogging()); + } + + // don't remove_class_from_loadable_list() + // - it's not there and we don't have the lock + detach_class(cls->ISA(), YES); + detach_class(cls, NO); + free_class(cls->ISA()); + free_class(cls); +} + + +/*********************************************************************** +* objc_constructInstance +* Creates an instance of `cls` at the location pointed to by `bytes`. +* `bytes` must point to at least class_getInstanceSize(cls) bytes of +* well-aligned zero-filled memory. +* The new object's isa is set. Any C++ constructors are called. +* Returns `bytes` if successful. Returns nil if `cls` or `bytes` is +* nil, or if C++ constructors fail. +* Note: class_createInstance() and class_createInstances() preflight this. +**********************************************************************/ +id +objc_constructInstance(Class cls, void *bytes) +{ + if (!cls || !bytes) return nil; + + id obj = (id)bytes; + + // Read class's info bits all at once for performance + bool hasCxxCtor = cls->hasCxxCtor(); + bool hasCxxDtor = cls->hasCxxDtor(); + bool fast = cls->canAllocNonpointer(); + + if (fast) { + obj->initInstanceIsa(cls, hasCxxDtor); + } else { + obj->initIsa(cls); + } + + if (hasCxxCtor) { + return object_cxxConstructFromClass(obj, cls); + } else { + return obj; + } +} + + +/*********************************************************************** +* class_createInstance +* fixme +* Locking: none +**********************************************************************/ + +static __attribute__((always_inline)) +id +_class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone, + bool cxxConstruct = true, + size_t *outAllocatedSize = nil) +{ + if (!cls) return nil; + + assert(cls->isRealized()); + + // Read class's info bits all at once for performance + bool hasCxxCtor = cls->hasCxxCtor(); + bool hasCxxDtor = cls->hasCxxDtor(); + bool fast = cls->canAllocNonpointer(); + + size_t size = cls->instanceSize(extraBytes); + if (outAllocatedSize) *outAllocatedSize = size; + + id obj; + if (!zone && fast) { + obj = (id)calloc(1, size); + if (!obj) return nil; + obj->initInstanceIsa(cls, hasCxxDtor); + } + else { + if (zone) { + obj = (id)malloc_zone_calloc ((malloc_zone_t *)zone, 1, size); + } else { + obj = (id)calloc(1, size); + } + if (!obj) return nil; + + // Use raw pointer isa on the assumption that they might be + // doing something weird with the zone or RR. + obj->initIsa(cls); + } + + if (cxxConstruct && hasCxxCtor) { + obj = _objc_constructOrFree(obj, cls); + } + + return obj; +} + + +id +class_createInstance(Class cls, size_t extraBytes) +{ + return _class_createInstanceFromZone(cls, extraBytes, nil); +} + + +/*********************************************************************** +* class_createInstances +* fixme +* Locking: none +**********************************************************************/ +#if SUPPORT_NONPOINTER_ISA +#warning fixme optimize class_createInstances +#endif +unsigned +class_createInstances(Class cls, size_t extraBytes, + id *results, unsigned num_requested) +{ + return _class_createInstancesFromZone(cls, extraBytes, nil, + results, num_requested); +} + +/*********************************************************************** +* object_copyFromZone +* fixme +* Locking: none +**********************************************************************/ +static id +_object_copyFromZone(id oldObj, size_t extraBytes, void *zone) +{ + if (!oldObj) return nil; + if (oldObj->isTaggedPointer()) return oldObj; + + // fixme this doesn't handle C++ ivars correctly (#4619414) + + Class cls = oldObj->ISA(); + size_t size; + id obj = _class_createInstanceFromZone(cls, extraBytes, zone, false, &size); + if (!obj) return nil; + + // Copy everything except the isa, which was already set above. + uint8_t *copyDst = (uint8_t *)obj + sizeof(Class); + uint8_t *copySrc = (uint8_t *)oldObj + sizeof(Class); + size_t copySize = size - sizeof(Class); + memmove(copyDst, copySrc, copySize); + + fixupCopiedIvars(obj, oldObj); + + return obj; +} + + +/*********************************************************************** +* object_copy +* fixme +* Locking: none +**********************************************************************/ +id +object_copy(id oldObj, size_t extraBytes) +{ + return _object_copyFromZone(oldObj, extraBytes, malloc_default_zone()); +} + + +#if !(TARGET_OS_EMBEDDED || TARGET_OS_IPHONE) + +/*********************************************************************** +* class_createInstanceFromZone +* fixme +* Locking: none +**********************************************************************/ +id +class_createInstanceFromZone(Class cls, size_t extraBytes, void *zone) +{ + return _class_createInstanceFromZone(cls, extraBytes, zone); +} + +/*********************************************************************** +* object_copyFromZone +* fixme +* Locking: none +**********************************************************************/ +id +object_copyFromZone(id oldObj, size_t extraBytes, void *zone) +{ + return _object_copyFromZone(oldObj, extraBytes, zone); +} + +#endif + + +/*********************************************************************** +* objc_destructInstance +* Destroys an instance without freeing memory. +* Calls C++ destructors. +* Calls ARC ivar cleanup. +* Removes associative references. +* Returns `obj`. Does nothing if `obj` is nil. +**********************************************************************/ +void *objc_destructInstance(id obj) +{ + if (obj) { + // Read all of the flags at once for performance. + bool cxx = obj->hasCxxDtor(); + bool assoc = obj->hasAssociatedObjects(); + + // This order is important. + if (cxx) object_cxxDestruct(obj); + if (assoc) _object_remove_assocations(obj); + obj->clearDeallocating(); + } + + return obj; +} + + +/*********************************************************************** +* object_dispose +* fixme +* Locking: none +**********************************************************************/ +id +object_dispose(id obj) +{ + if (!obj) return nil; + + objc_destructInstance(obj); + free(obj); + + return nil; +} + + +/*********************************************************************** +* _objc_getFreedObjectClass +* fixme +* Locking: none +**********************************************************************/ +Class _objc_getFreedObjectClass (void) +{ + return nil; +} + + + +/*********************************************************************** +* Tagged pointer objects. +* +* Tagged pointer objects store the class and the object value in the +* object pointer; the "pointer" does not actually point to anything. +* +* Tagged pointer objects currently use this representation: +* (LSB) +* 1 bit set if tagged, clear if ordinary object pointer +* 3 bits tag index +* 60 bits payload +* (MSB) +* The tag index defines the object's class. +* The payload format is defined by the object's class. +* +* If the tag index is 0b111, the tagged pointer object uses an +* "extended" representation, allowing more classes but with smaller payloads: +* (LSB) +* 1 bit set if tagged, clear if ordinary object pointer +* 3 bits 0b111 +* 8 bits extended tag index +* 52 bits payload +* (MSB) +* +* Some architectures reverse the MSB and LSB in these representations. +* +* This representation is subject to change. Representation-agnostic SPI is: +* objc-internal.h for class implementers. +* objc-gdb.h for debuggers. +**********************************************************************/ +#if !SUPPORT_TAGGED_POINTERS + +// These variables are always provided for debuggers. +uintptr_t objc_debug_taggedpointer_mask = 0; +unsigned objc_debug_taggedpointer_slot_shift = 0; +uintptr_t objc_debug_taggedpointer_slot_mask = 0; +unsigned objc_debug_taggedpointer_payload_lshift = 0; +unsigned objc_debug_taggedpointer_payload_rshift = 0; +Class objc_debug_taggedpointer_classes[1] = { nil }; + +uintptr_t objc_debug_taggedpointer_ext_mask = 0; +unsigned objc_debug_taggedpointer_ext_slot_shift = 0; +uintptr_t objc_debug_taggedpointer_ext_slot_mask = 0; +unsigned objc_debug_taggedpointer_ext_payload_lshift = 0; +unsigned objc_debug_taggedpointer_ext_payload_rshift = 0; +Class objc_debug_taggedpointer_ext_classes[1] = { nil }; + +static void +disableTaggedPointers() { } + +#else + +// The "slot" used in the class table and given to the debugger +// includes the is-tagged bit. This makes objc_msgSend faster. +// The "ext" representation doesn't do that. + +uintptr_t objc_debug_taggedpointer_mask = _OBJC_TAG_MASK; +unsigned objc_debug_taggedpointer_slot_shift = _OBJC_TAG_SLOT_SHIFT; +uintptr_t objc_debug_taggedpointer_slot_mask = _OBJC_TAG_SLOT_MASK; +unsigned objc_debug_taggedpointer_payload_lshift = _OBJC_TAG_PAYLOAD_LSHIFT; +unsigned objc_debug_taggedpointer_payload_rshift = _OBJC_TAG_PAYLOAD_RSHIFT; +// objc_debug_taggedpointer_classes is defined in objc-msg-*.s + +uintptr_t objc_debug_taggedpointer_ext_mask = _OBJC_TAG_EXT_MASK; +unsigned objc_debug_taggedpointer_ext_slot_shift = _OBJC_TAG_EXT_SLOT_SHIFT; +uintptr_t objc_debug_taggedpointer_ext_slot_mask = _OBJC_TAG_EXT_SLOT_MASK; +unsigned objc_debug_taggedpointer_ext_payload_lshift = _OBJC_TAG_EXT_PAYLOAD_LSHIFT; +unsigned objc_debug_taggedpointer_ext_payload_rshift = _OBJC_TAG_EXT_PAYLOAD_RSHIFT; +// objc_debug_taggedpointer_ext_classes is defined in objc-msg-*.s + +static void +disableTaggedPointers() +{ + objc_debug_taggedpointer_mask = 0; + objc_debug_taggedpointer_slot_shift = 0; + objc_debug_taggedpointer_slot_mask = 0; + objc_debug_taggedpointer_payload_lshift = 0; + objc_debug_taggedpointer_payload_rshift = 0; + + objc_debug_taggedpointer_ext_mask = 0; + objc_debug_taggedpointer_ext_slot_shift = 0; + objc_debug_taggedpointer_ext_slot_mask = 0; + objc_debug_taggedpointer_ext_payload_lshift = 0; + objc_debug_taggedpointer_ext_payload_rshift = 0; +} + + +// Returns a pointer to the class's storage in the tagged class arrays. +// Assumes the tag is a valid basic tag. +static Class * +classSlotForBasicTagIndex(objc_tag_index_t tag) +{ + // Array index in objc_tag_classes includes the tagged bit itself +#if SUPPORT_MSB_TAGGED_POINTERS + return &objc_tag_classes[0x8 | tag]; +#else + return &objc_tag_classes[(tag << 1) | 1]; +#endif +} + + +// Returns a pointer to the class's storage in the tagged class arrays, +// or nil if the tag is out of range. +static Class * +classSlotForTagIndex(objc_tag_index_t tag) +{ + if (tag >= OBJC_TAG_First60BitPayload && tag <= OBJC_TAG_Last60BitPayload) { + return classSlotForBasicTagIndex(tag); + } + + if (tag >= OBJC_TAG_First52BitPayload && tag <= OBJC_TAG_Last52BitPayload) { + return &objc_tag_ext_classes[tag - OBJC_TAG_First52BitPayload]; + } + + return nil; +} + + +/*********************************************************************** +* _objc_registerTaggedPointerClass +* Set the class to use for the given tagged pointer index. +* Aborts if the tag is out of range, or if the tag is already +* used by some other class. +**********************************************************************/ +void +_objc_registerTaggedPointerClass(objc_tag_index_t tag, Class cls) +{ + if (objc_debug_taggedpointer_mask == 0) { + _objc_fatal("tagged pointers are disabled"); + } + + Class *slot = classSlotForTagIndex(tag); + if (!slot) { + _objc_fatal("tag index %u is invalid", (unsigned int)tag); + } + + Class oldCls = *slot; + + if (cls && oldCls && cls != oldCls) { + _objc_fatal("tag index %u used for two different classes " + "(was %p %s, now %p %s)", tag, + oldCls, oldCls->nameForLogging(), + cls, cls->nameForLogging()); + } + + *slot = cls; + + // Store a placeholder class in the basic tag slot that is + // reserved for the extended tag space, if it isn't set already. + // Do this lazily when the first extended tag is registered so + // that old debuggers characterize bogus pointers correctly more often. + if (tag < OBJC_TAG_First60BitPayload || tag > OBJC_TAG_Last60BitPayload) { + Class *extSlot = classSlotForBasicTagIndex(OBJC_TAG_RESERVED_7); + if (*extSlot == nil) { + extern objc_class OBJC_CLASS_$___NSUnrecognizedTaggedPointer; + *extSlot = (Class)&OBJC_CLASS_$___NSUnrecognizedTaggedPointer; + } + } +} + + +/*********************************************************************** +* _objc_getClassForTag +* Returns the class that is using the given tagged pointer tag. +* Returns nil if no class is using that tag or the tag is out of range. +**********************************************************************/ +Class +_objc_getClassForTag(objc_tag_index_t tag) +{ + Class *slot = classSlotForTagIndex(tag); + if (slot) return *slot; + else return nil; +} + +#endif + + +#if SUPPORT_FIXUP + +OBJC_EXTERN void objc_msgSend_fixup(void); +OBJC_EXTERN void objc_msgSendSuper2_fixup(void); +OBJC_EXTERN void objc_msgSend_stret_fixup(void); +OBJC_EXTERN void objc_msgSendSuper2_stret_fixup(void); +#if defined(__i386__) || defined(__x86_64__) +OBJC_EXTERN void objc_msgSend_fpret_fixup(void); +#endif +#if defined(__x86_64__) +OBJC_EXTERN void objc_msgSend_fp2ret_fixup(void); +#endif + +OBJC_EXTERN void objc_msgSend_fixedup(void); +OBJC_EXTERN void objc_msgSendSuper2_fixedup(void); +OBJC_EXTERN void objc_msgSend_stret_fixedup(void); +OBJC_EXTERN void objc_msgSendSuper2_stret_fixedup(void); +#if defined(__i386__) || defined(__x86_64__) +OBJC_EXTERN void objc_msgSend_fpret_fixedup(void); +#endif +#if defined(__x86_64__) +OBJC_EXTERN void objc_msgSend_fp2ret_fixedup(void); +#endif + +/*********************************************************************** +* fixupMessageRef +* Repairs an old vtable dispatch call site. +* vtable dispatch itself is not supported. +**********************************************************************/ +static void +fixupMessageRef(message_ref_t *msg) +{ + msg->sel = sel_registerName((const char *)msg->sel); + + if (msg->imp == &objc_msgSend_fixup) { + if (msg->sel == SEL_alloc) { + msg->imp = (IMP)&objc_alloc; + } else if (msg->sel == SEL_allocWithZone) { + msg->imp = (IMP)&objc_allocWithZone; + } else if (msg->sel == SEL_retain) { + msg->imp = (IMP)&objc_retain; + } else if (msg->sel == SEL_release) { + msg->imp = (IMP)&objc_release; + } else if (msg->sel == SEL_autorelease) { + msg->imp = (IMP)&objc_autorelease; + } else { + msg->imp = &objc_msgSend_fixedup; + } + } + else if (msg->imp == &objc_msgSendSuper2_fixup) { + msg->imp = &objc_msgSendSuper2_fixedup; + } + else if (msg->imp == &objc_msgSend_stret_fixup) { + msg->imp = &objc_msgSend_stret_fixedup; + } + else if (msg->imp == &objc_msgSendSuper2_stret_fixup) { + msg->imp = &objc_msgSendSuper2_stret_fixedup; + } +#if defined(__i386__) || defined(__x86_64__) + else if (msg->imp == &objc_msgSend_fpret_fixup) { + msg->imp = &objc_msgSend_fpret_fixedup; + } +#endif +#if defined(__x86_64__) + else if (msg->imp == &objc_msgSend_fp2ret_fixup) { + msg->imp = &objc_msgSend_fp2ret_fixedup; + } +#endif +} + +// SUPPORT_FIXUP +#endif + + +// ProKit SPI +static Class setSuperclass(Class cls, Class newSuper) +{ + Class oldSuper; + + runtimeLock.assertWriting(); + + assert(cls->isRealized()); + assert(newSuper->isRealized()); + + oldSuper = cls->superclass; + removeSubclass(oldSuper, cls); + removeSubclass(oldSuper->ISA(), cls->ISA()); + + cls->superclass = newSuper; + cls->ISA()->superclass = newSuper->ISA(); + addSubclass(newSuper, cls); + addSubclass(newSuper->ISA(), cls->ISA()); + + // Flush subclass's method caches. + flushCaches(cls); + flushCaches(cls->ISA()); + + return oldSuper; +} + + +Class class_setSuperclass(Class cls, Class newSuper) +{ + rwlock_writer_t lock(runtimeLock); + return setSuperclass(cls, newSuper); +} + + +// __OBJC2__ +#endif diff --git a/runtime/objc-runtime-old.h b/runtime/objc-runtime-old.h new file mode 100644 index 0000000..5f0ff02 --- /dev/null +++ b/runtime/objc-runtime-old.h @@ -0,0 +1,399 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_RUNTIME_OLD_H +#define _OBJC_RUNTIME_OLD_H + +#include "objc-private.h" + +#define CLS_CLASS 0x1 +#define CLS_META 0x2 +#define CLS_INITIALIZED 0x4 +#define CLS_POSING 0x8 +#define CLS_MAPPED 0x10 +#define CLS_FLUSH_CACHE 0x20 +#define CLS_GROW_CACHE 0x40 +#define CLS_NEED_BIND 0x80 +#define CLS_METHOD_ARRAY 0x100 +// the JavaBridge constructs classes with these markers +#define CLS_JAVA_HYBRID 0x200 +#define CLS_JAVA_CLASS 0x400 +// thread-safe +initialize +#define CLS_INITIALIZING 0x800 +// bundle unloading +#define CLS_FROM_BUNDLE 0x1000 +// C++ ivar support +#define CLS_HAS_CXX_STRUCTORS 0x2000 +// Lazy method list arrays +#define CLS_NO_METHOD_ARRAY 0x4000 +// +load implementation +#define CLS_HAS_LOAD_METHOD 0x8000 +// objc_allocateClassPair API +#define CLS_CONSTRUCTING 0x10000 +// visibility=hidden +#define CLS_HIDDEN 0x20000 +// available for use; was CLS_FINALIZE_ON_MAIN_THREAD +#define CLS_40000 0x40000 +// Lazy property list arrays +#define CLS_NO_PROPERTY_ARRAY 0x80000 +// +load implementation +#define CLS_CONNECTED 0x100000 +#define CLS_LOADED 0x200000 +// objc_allocateClassPair API +#define CLS_CONSTRUCTED 0x400000 +// class is leaf for cache flushing +#define CLS_LEAF 0x800000 +// class instances may have associative references +#define CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS 0x1000000 +// class has instance-specific GC layout +#define CLS_HAS_INSTANCE_SPECIFIC_LAYOUT 0x2000000 +// class compiled with ARC +#define CLS_IS_ARC 0x4000000 +// class is not ARC but has ARC-style weak ivar layout +#define CLS_HAS_WEAK_WITHOUT_ARC 0x8000000 + + +// Terminator for array of method lists +#define END_OF_METHODS_LIST ((struct old_method_list*)-1) + +#define ISCLASS(cls) (((cls)->info & CLS_CLASS) != 0) +#define ISMETA(cls) (((cls)->info & CLS_META) != 0) +#define GETMETA(cls) (ISMETA(cls) ? (cls) : (cls)->ISA()) + + +struct old_class_ext { + uint32_t size; + const uint8_t *weak_ivar_layout; + struct old_property_list **propertyLists; +}; + +struct old_category { + char *category_name; + char *class_name; + struct old_method_list *instance_methods; + struct old_method_list *class_methods; + struct old_protocol_list *protocols; + // Fields below this point are in version 7 or later only. + uint32_t size; + struct old_property_list *instance_properties; + // Check size for fields below this point. + struct old_property_list *class_properties; + + bool hasClassPropertiesField() const { + return size >= offsetof(old_category, class_properties) + sizeof(class_properties); + } +}; + +struct old_ivar { + char *ivar_name; + char *ivar_type; + int ivar_offset; +#ifdef __LP64__ + int space; +#endif +}; + +struct old_ivar_list { + int ivar_count; +#ifdef __LP64__ + int space; +#endif + /* variable length structure */ + struct old_ivar ivar_list[1]; +}; + + +struct old_method { + SEL method_name; + char *method_types; + IMP method_imp; +}; + +struct old_method_list { + void *obsolete; + + int method_count; +#ifdef __LP64__ + int space; +#endif + /* variable length structure */ + struct old_method method_list[1]; +}; + +struct old_protocol { + Class isa; + const char *protocol_name; + struct old_protocol_list *protocol_list; + struct objc_method_description_list *instance_methods; + struct objc_method_description_list *class_methods; +}; + +struct old_protocol_list { + struct old_protocol_list *next; + long count; + struct old_protocol *list[1]; +}; + +struct old_protocol_ext { + uint32_t size; + struct objc_method_description_list *optional_instance_methods; + struct objc_method_description_list *optional_class_methods; + struct old_property_list *instance_properties; + const char **extendedMethodTypes; + struct old_property_list *class_properties; + + bool hasClassPropertiesField() const { + return size >= offsetof(old_protocol_ext, class_properties) + sizeof(class_properties); + } +}; + + +struct old_property { + const char *name; + const char *attributes; +}; + +struct old_property_list { + uint32_t entsize; + uint32_t count; + struct old_property first; +}; + + +struct objc_class : objc_object { + Class superclass; + const char *name; + uint32_t version; + uint32_t info; + uint32_t instance_size; + struct old_ivar_list *ivars; + struct old_method_list **methodLists; + Cache cache; + struct old_protocol_list *protocols; + // CLS_EXT only + const uint8_t *ivar_layout; + struct old_class_ext *ext; + + void setInfo(uint32_t set) { + OSAtomicOr32Barrier(set, (volatile uint32_t *)&info); + } + + void clearInfo(uint32_t clear) { + OSAtomicXor32Barrier(clear, (volatile uint32_t *)&info); + } + + + // set and clear must not overlap + void changeInfo(uint32_t set, uint32_t clear) { + assert((set & clear) == 0); + + uint32_t oldf, newf; + do { + oldf = this->info; + newf = (oldf | set) & ~clear; + } while (!OSAtomicCompareAndSwap32Barrier(oldf, newf, (volatile int32_t *)&info)); + } + + bool hasCxxCtor() { + // set_superclass propagates the flag from the superclass. + return info & CLS_HAS_CXX_STRUCTORS; + } + + bool hasCxxDtor() { + return hasCxxCtor(); // one bit for both ctor and dtor + } + + // Return YES if the class's ivars are managed by ARC, + // or the class is MRC but has ARC-style weak ivars. + bool hasAutomaticIvars() { + return info & (CLS_IS_ARC | CLS_HAS_WEAK_WITHOUT_ARC); + } + + // Return YES if the class's ivars are managed by ARC. + bool isARC() { + return info & CLS_IS_ARC; + } + + bool hasCustomRR() { + return true; + } + void setHasCustomRR(bool = false) { } + void setHasDefaultRR() { } + void printCustomRR(bool) { } + + bool hasCustomAWZ() { + return true; + } + void setHasCustomAWZ(bool = false) { } + void setHasDefaultAWZ() { } + void printCustomAWZ(bool) { } + + bool instancesHaveAssociatedObjects() { + return info & CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS; + } + + void setInstancesHaveAssociatedObjects() { + setInfo(CLS_INSTANCES_HAVE_ASSOCIATED_OBJECTS); + } + + bool shouldGrowCache() { + return info & CLS_GROW_CACHE; + } + + void setShouldGrowCache(bool grow) { + if (grow) setInfo(CLS_GROW_CACHE); + else clearInfo(CLS_GROW_CACHE); + } + + // +initialize bits are stored on the metaclass only + bool isInitializing() { + return getMeta()->info & CLS_INITIALIZING; + } + + // +initialize bits are stored on the metaclass only + void setInitializing() { + getMeta()->setInfo(CLS_INITIALIZING); + } + + // +initialize bits are stored on the metaclass only + bool isInitialized() { + return getMeta()->info & CLS_INITIALIZED; + } + + // +initialize bits are stored on the metaclass only + void setInitialized() { + getMeta()->changeInfo(CLS_INITIALIZED, CLS_INITIALIZING); + } + + bool isLoadable() { + // A class registered for +load is ready for +load to be called + // if it is connected. + return isConnected(); + } + + IMP getLoadMethod(); + + bool isFuture(); + + bool isConnected(); + + const char *mangledName() { return name; } + const char *demangledName() { return name; } + const char *nameForLogging() { return name; } + + bool isMetaClass() { + return info & CLS_META; + } + + // NOT identical to this->ISA() when this is a metaclass + Class getMeta() { + if (isMetaClass()) return (Class)this; + else return this->ISA(); + } + + // May be unaligned depending on class's ivars. + uint32_t unalignedInstanceStart() { + // This is not simply superclass->instance_size. + // superclass->instance_size is padded to its sizeof() boundary, + // which may envelop one of this class's ivars. + // That in turn would break ARC-style ivar layouts. + // Instead, we use the address of this class's first ivar when possible. + if (!superclass) return 0; + if (!ivars || ivars->ivar_count == 0) return superclass->instance_size; + return ivars->ivar_list[0].ivar_offset; + } + + // Class's instance start rounded up to a pointer-size boundary. + // This is used for ARC layout bitmaps. + uint32_t alignedInstanceStart() { + return word_align(unalignedInstanceStart()); + } + + + // May be unaligned depending on class's ivars. + uint32_t unalignedInstanceSize() { + return instance_size; + } + + // Class's ivar size rounded up to a pointer-size boundary. + uint32_t alignedInstanceSize() { + return word_align(unalignedInstanceSize()); + } + + size_t instanceSize(size_t extraBytes) { + size_t size = alignedInstanceSize() + extraBytes; + // CF requires all objects be at least 16 bytes. + if (size < 16) size = 16; + return size; + } + +}; + + +#include "hashtable2.h" + +__BEGIN_DECLS + +#define oldprotocol(proto) ((struct old_protocol *)proto) +#define oldmethod(meth) ((struct old_method *)meth) +#define oldcategory(cat) ((struct old_category *)cat) +#define oldivar(ivar) ((struct old_ivar *)ivar) +#define oldproperty(prop) ((struct old_property *)prop) + +extern NXHashTable *class_hash; + +extern void unload_class(Class cls); + +extern IMP lookupNamedMethodInMethodList(struct old_method_list *mlist, const char *meth_name); +extern void _objc_insertMethods(Class cls, struct old_method_list *mlist, struct old_category *cat); +extern void _objc_removeMethods(Class cls, struct old_method_list *mlist); +extern void _objc_flush_caches (Class cls); +extern bool _class_addProperties(Class cls, struct old_property_list *additions); +extern bool _class_hasLoadMethod(Class cls); +extern void change_class_references(Class imposter, Class original, Class copy, bool changeSuperRefs); +extern void flush_marked_caches(void); +extern void set_superclass(Class cls, Class supercls, bool cls_is_new); +extern void try_free(const void *p); + +extern struct old_property *property_list_nth(const struct old_property_list *plist, uint32_t i); +extern struct old_property **copyPropertyList(struct old_property_list *plist, unsigned int *outCount); + +extern struct objc_method_description * lookup_protocol_method(struct old_protocol *proto, SEL aSel, bool isRequiredMethod, bool isInstanceMethod, bool recursive); + +// used by flush_caches outside objc-cache.m +extern void _cache_flush(Class cls); +#ifdef OBJC_INSTRUMENTED +extern unsigned int LinearFlushCachesCount; +extern unsigned int LinearFlushCachesVisitedCount; +extern unsigned int MaxLinearFlushCachesVisitedCount; +extern unsigned int NonlinearFlushCachesCount; +extern unsigned int NonlinearFlushCachesClassCount; +extern unsigned int NonlinearFlushCachesVisitedCount; +extern unsigned int MaxNonlinearFlushCachesVisitedCount; +extern unsigned int IdealFlushCachesCount; +extern unsigned int MaxIdealFlushCachesCount; +#endif + +__END_DECLS + +#endif diff --git a/runtime/objc-runtime-old.mm b/runtime/objc-runtime-old.mm new file mode 100644 index 0000000..d6b393e --- /dev/null +++ b/runtime/objc-runtime-old.mm @@ -0,0 +1,3233 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-runtime-old.m +* Support for old-ABI classes and images. +**********************************************************************/ + +/*********************************************************************** + * Class loading and connecting (GrP 2004-2-11) + * + * When images are loaded (during program startup or otherwise), the + * runtime needs to load classes and categories from the images, connect + * classes to superclasses and categories to parent classes, and call + * +load methods. + * + * The Objective-C runtime can cope with classes arriving in any order. + * That is, a class may be discovered by the runtime before some + * superclass is known. To handle out-of-order class loads, the + * runtime uses a "pending class" system. + * + * (Historical note) + * Panther and earlier: many classes arrived out-of-order because of + * the poorly-ordered callback from dyld. However, the runtime's + * pending mechanism only handled "missing superclass" and not + * "present superclass but missing higher class". See Radar #3225652. + * Tiger: The runtime's pending mechanism was augmented to handle + * arbitrary missing classes. In addition, dyld was rewritten and + * now sends the callbacks in strictly bottom-up link order. + * The pending mechanism may now be needed only for rare and + * hard to construct programs. + * (End historical note) + * + * A class when first seen in an image is considered "unconnected". + * It is stored in `unconnected_class_hash`. If all of the class's + * superclasses exist and are already "connected", then the new class + * can be connected to its superclasses and moved to `class_hash` for + * normal use. Otherwise, the class waits in `unconnected_class_hash` + * until the superclasses finish connecting. + * + * A "connected" class is + * (1) in `class_hash`, + * (2) connected to its superclasses, + * (3) has no unconnected superclasses, + * (4) is otherwise initialized and ready for use, and + * (5) is eligible for +load if +load has not already been called. + * + * An "unconnected" class is + * (1) in `unconnected_class_hash`, + * (2) not connected to its superclasses, + * (3) has an immediate superclass which is either missing or unconnected, + * (4) is not ready for use, and + * (5) is not yet eligible for +load. + * + * Image mapping is NOT CURRENTLY THREAD-SAFE with respect to just about + * anything. Image mapping IS RE-ENTRANT in several places: superclass + * lookup may cause ZeroLink to load another image, and +load calls may + * cause dyld to load another image. + * + * Image mapping sequence: + * + * Read all classes in all new images. + * Add them all to unconnected_class_hash. + * Note any +load implementations before categories are attached. + * Attach any pending categories. + * Read all categories in all new images. + * Attach categories whose parent class exists (connected or not), + * and pend the rest. + * Mark them all eligible for +load (if implemented), even if the + * parent class is missing. + * Try to connect all classes in all new images. + * If the superclass is missing, pend the class + * If the superclass is unconnected, try to recursively connect it + * If the superclass is connected: + * connect the class + * mark the class eligible for +load, if implemented + * fix up any pended classrefs referring to the class + * connect any pended subclasses of the class + * Resolve selector refs and class refs in all new images. + * Class refs whose classes still do not exist are pended. + * Fix up protocol objects in all new images. + * Call +load for classes and categories. + * May include classes or categories that are not in these images, + * but are newly eligible because of these image. + * Class +loads will be called superclass-first because of the + * superclass-first nature of the connecting process. + * Category +load needs to be deferred until the parent class is + * connected and has had its +load called. + * + * Performance: all classes are read before any categories are read. + * Fewer categories need be pended for lack of a parent class. + * + * Performance: all categories are attempted to be attached before + * any classes are connected. Fewer class caches need be flushed. + * (Unconnected classes and their respective subclasses are guaranteed + * to be un-messageable, so their caches will be empty.) + * + * Performance: all classes are read before any classes are connected. + * Fewer classes need be pended for lack of a superclass. + * + * Correctness: all selector and class refs are fixed before any + * protocol fixups or +load methods. libobjc itself contains selector + * and class refs which are used in protocol fixup and +load. + * + * Correctness: +load methods are scheduled in bottom-up link order. + * This constraint is in addition to superclass order. Some +load + * implementations expect to use another class in a linked-to library, + * even if the two classes don't share a direct superclass relationship. + * + * Correctness: all classes are scanned for +load before any categories + * are attached. Otherwise, if a category implements +load and its class + * has no class methods, the class's +load scan would find the category's + * +load method, which would then be called twice. + * + * Correctness: pended class refs are not fixed up until the class is + * connected. Classes with missing weak superclasses remain unconnected. + * Class refs to classes with missing weak superclasses must be nil. + * Therefore class refs to unconnected classes must remain un-fixed. + * + **********************************************************************/ + +#if !__OBJC2__ + +#include "objc-private.h" +#include "objc-runtime-old.h" +#include "objc-file-old.h" +#include "objc-cache-old.h" +#include "objc-loadmethod.h" + + +typedef struct _objc_unresolved_category +{ + struct _objc_unresolved_category *next; + old_category *cat; // may be nil + long version; +} _objc_unresolved_category; + +typedef struct _PendingSubclass +{ + Class subclass; // subclass to finish connecting; may be nil + struct _PendingSubclass *next; +} PendingSubclass; + +typedef struct _PendingClassRef +{ + Class *ref; // class reference to fix up; may be nil + // (ref & 1) is a metaclass reference + struct _PendingClassRef *next; +} PendingClassRef; + + +static uintptr_t classHash(void *info, Class data); +static int classIsEqual(void *info, Class name, Class cls); +static int _objc_defaultClassHandler(const char *clsName); +static inline NXMapTable *pendingClassRefsMapTable(void); +static inline NXMapTable *pendingSubclassesMapTable(void); +static void pendClassInstallation(Class cls, const char *superName); +static void pendClassReference(Class *ref, const char *className, bool isMeta); +static void resolve_references_to_class(Class cls); +static void resolve_subclasses_of_class(Class cls); +static void really_connect_class(Class cls, Class supercls); +static bool connect_class(Class cls); +static void map_method_descs (struct objc_method_description_list * methods, bool copy); +static void _objcTweakMethodListPointerForClass(Class cls); +static inline void _objc_add_category(Class cls, old_category *category, int version); +static bool _objc_add_category_flush_caches(Class cls, old_category *category, int version); +static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat); +static void resolve_categories_for_class(Class cls); +static bool _objc_register_category(old_category *cat, int version); + + +// Function called when a class is loaded from an image +void (*callbackFunction)(Class, Category) = 0; + +// Hash table of classes +NXHashTable * class_hash = 0; +static NXHashTablePrototype classHashPrototype = +{ + (uintptr_t (*) (const void *, const void *)) classHash, + (int (*)(const void *, const void *, const void *)) classIsEqual, + NXNoEffectFree, 0 +}; + +// Hash table of unconnected classes +static NXHashTable *unconnected_class_hash = nil; + +// Exported copy of class_hash variable (hook for debugging tools) +NXHashTable *_objc_debug_class_hash = nil; + +// Category and class registries +// Keys are COPIES of strings, to prevent stale pointers with unloaded bundles +// Use NXMapKeyCopyingInsert and NXMapKeyFreeingRemove +static NXMapTable * category_hash = nil; + +// Keys are COPIES of strings, to prevent stale pointers with unloaded bundles +// Use NXMapKeyCopyingInsert and NXMapKeyFreeingRemove +static NXMapTable * pendingClassRefsMap = nil; +static NXMapTable * pendingSubclassesMap = nil; + +// Protocols +static NXMapTable *protocol_map = nil; // name -> protocol +static NXMapTable *protocol_ext_map = nil; // protocol -> protocol ext + +// Function pointer objc_getClass calls through when class is not found +static int (*objc_classHandler) (const char *) = _objc_defaultClassHandler; + +// Function pointer called by objc_getClass and objc_lookupClass when +// class is not found. _objc_classLoader is called before objc_classHandler. +static BOOL (*_objc_classLoader)(const char *) = nil; + + +/*********************************************************************** +* objc_dump_class_hash. Log names of all known classes. +**********************************************************************/ +void objc_dump_class_hash(void) +{ + NXHashTable *table; + unsigned count; + Class data; + NXHashState state; + + table = class_hash; + count = 0; + state = NXInitHashState (table); + while (NXNextHashState (table, &state, (void **) &data)) + printf ("class %d: %s\n", ++count, data->nameForLogging()); +} + + +/*********************************************************************** +* _objc_init_class_hash. Return the class lookup table, create it if +* necessary. +**********************************************************************/ +void _objc_init_class_hash(void) +{ + // Do nothing if class hash table already exists + if (class_hash) + return; + + // class_hash starts small, with only enough capacity for libobjc itself. + // If a second library is found by map_images(), class_hash is immediately + // resized to capacity 1024 to cut down on rehashes. + // Old numbers: A smallish Foundation+AppKit program will have + // about 520 classes. Larger apps (like IB or WOB) have more like + // 800 classes. Some customers have massive quantities of classes. + // Foundation-only programs aren't likely to notice the ~6K loss. + class_hash = NXCreateHashTable(classHashPrototype, 16, nil); + _objc_debug_class_hash = class_hash; +} + + +/*********************************************************************** +* objc_getClassList. Return the known classes. +**********************************************************************/ +int objc_getClassList(Class *buffer, int bufferLen) +{ + NXHashState state; + Class cls; + int cnt, num; + + mutex_locker_t lock(classLock); + if (!class_hash) return 0; + + num = NXCountHashTable(class_hash); + if (nil == buffer) return num; + + cnt = 0; + state = NXInitHashState(class_hash); + while (cnt < bufferLen && + NXNextHashState(class_hash, &state, (void **)&cls)) + { + buffer[cnt++] = cls; + } + + return num; +} + + +/*********************************************************************** +* objc_copyClassList +* Returns pointers to all classes. +* This requires all classes be realized, which is regretfully non-lazy. +* +* outCount may be nil. *outCount is the number of classes returned. +* If the returned array is not nil, it is nil-terminated and must be +* freed with free(). +* Locking: acquires classLock +**********************************************************************/ +Class * +objc_copyClassList(unsigned int *outCount) +{ + Class *result; + unsigned int count; + + mutex_locker_t lock(classLock); + result = nil; + count = class_hash ? NXCountHashTable(class_hash) : 0; + + if (count > 0) { + Class cls; + NXHashState state = NXInitHashState(class_hash); + result = (Class *)malloc((1+count) * sizeof(Class)); + count = 0; + while (NXNextHashState(class_hash, &state, (void **)&cls)) { + result[count++] = cls; + } + result[count] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* objc_copyProtocolList +* Returns pointers to all protocols. +* Locking: acquires classLock +**********************************************************************/ +Protocol * __unsafe_unretained * +objc_copyProtocolList(unsigned int *outCount) +{ + int count, i; + Protocol *proto; + const char *name; + NXMapState state; + Protocol **result; + + mutex_locker_t lock(classLock); + + count = NXCountMapTable(protocol_map); + if (count == 0) { + if (outCount) *outCount = 0; + return nil; + } + + result = (Protocol **)calloc(1 + count, sizeof(Protocol *)); + + i = 0; + state = NXInitMapState(protocol_map); + while (NXNextMapState(protocol_map, &state, + (const void **)&name, (const void **)&proto)) + { + result[i++] = proto; + } + + result[i++] = nil; + assert(i == count+1); + + if (outCount) *outCount = count; + return result; +} + + +/*********************************************************************** +* objc_getClasses. Return class lookup table. +* +* NOTE: This function is very dangerous, since you cannot safely use +* the hashtable without locking it, and the lock is private! +**********************************************************************/ +void *objc_getClasses(void) +{ + OBJC_WARN_DEPRECATED; + + // Return the class lookup hash table + return class_hash; +} + + +/*********************************************************************** +* classHash. +**********************************************************************/ +static uintptr_t classHash(void *info, Class data) +{ + // Nil classes hash to zero + if (!data) + return 0; + + // Call through to real hash function + return _objc_strhash (data->mangledName()); +} + +/*********************************************************************** +* classIsEqual. Returns whether the class names match. If we ever +* check more than the name, routines like objc_lookUpClass have to +* change as well. +**********************************************************************/ +static int classIsEqual(void *info, Class name, Class cls) +{ + // Standard string comparison + return strcmp(name->mangledName(), cls->mangledName()) == 0; +} + + +// Unresolved future classes +static NXHashTable *future_class_hash = nil; + +// Resolved future<->original classes +static NXMapTable *future_class_to_original_class_map = nil; +static NXMapTable *original_class_to_future_class_map = nil; + +// CF requests about 20 future classes; HIToolbox requests one. +#define FUTURE_COUNT 32 + + +/*********************************************************************** +* setOriginalClassForFutureClass +* Record resolution of a future class. +**********************************************************************/ +static void setOriginalClassForFutureClass(Class futureClass, + Class originalClass) +{ + if (!future_class_to_original_class_map) { + future_class_to_original_class_map = + NXCreateMapTable(NXPtrValueMapPrototype, FUTURE_COUNT); + original_class_to_future_class_map = + NXCreateMapTable(NXPtrValueMapPrototype, FUTURE_COUNT); + } + + NXMapInsert (future_class_to_original_class_map, + futureClass, originalClass); + NXMapInsert (original_class_to_future_class_map, + originalClass, futureClass); + + if (PrintFuture) { + _objc_inform("FUTURE: using %p instead of %p for %s", (void*)futureClass, (void*)originalClass, originalClass->name); + } +} + +/*********************************************************************** +* getOriginalClassForFutureClass +* getFutureClassForOriginalClass +* Switch between a future class and its corresponding original class. +* The future class is the one actually in use. +* The original class is the one from disk. +**********************************************************************/ +/* +static Class +getOriginalClassForFutureClass(Class futureClass) +{ + if (!future_class_to_original_class_map) return Nil; + return NXMapGet (future_class_to_original_class_map, futureClass); +} +*/ +static Class +getFutureClassForOriginalClass(Class originalClass) +{ + if (!original_class_to_future_class_map) return Nil; + return (Class)NXMapGet(original_class_to_future_class_map, originalClass); +} + + +/*********************************************************************** +* makeFutureClass +* Initialize the memory in *cls with an unresolved future class with the +* given name. The memory is recorded in future_class_hash. +**********************************************************************/ +static void makeFutureClass(Class cls, const char *name) +{ + // CF requests about 20 future classes, plus HIToolbox has one. + if (!future_class_hash) { + future_class_hash = + NXCreateHashTable(classHashPrototype, FUTURE_COUNT, nil); + } + + cls->name = strdup(name); + NXHashInsert(future_class_hash, cls); + + if (PrintFuture) { + _objc_inform("FUTURE: reserving %p for %s", (void*)cls, name); + } +} + + +/*********************************************************************** +* _objc_allocateFutureClass +* Allocate an unresolved future class for the given class name. +* Returns any existing allocation if one was already made. +* Assumes the named class doesn't exist yet. +* Not thread safe. +**********************************************************************/ +Class _objc_allocateFutureClass(const char *name) +{ + Class cls; + + if (future_class_hash) { + objc_class query; + query.name = name; + if ((cls = (Class)NXHashGet(future_class_hash, &query))) { + // Already have a future class for this name. + return cls; + } + } + + cls = _calloc_class(sizeof(objc_class)); + makeFutureClass(cls, name); + return cls; +} + + +/*********************************************************************** +* objc_getFutureClass. Return the id of the named class. +* If the class does not exist, return an uninitialized class +* structure that will be used for the class when and if it +* does get loaded. +* Not thread safe. +**********************************************************************/ +Class objc_getFutureClass(const char *name) +{ + Class cls; + + // YES unconnected, NO class handler + // (unconnected is OK because it will someday be the real class) + cls = look_up_class(name, YES, NO); + if (cls) { + if (PrintFuture) { + _objc_inform("FUTURE: found %p already in use for %s", + (void*)cls, name); + } + return cls; + } + + // No class or future class with that name yet. Make one. + // fixme not thread-safe with respect to + // simultaneous library load or getFutureClass. + return _objc_allocateFutureClass(name); +} + + +BOOL _class_isFutureClass(Class cls) +{ + return cls && cls->isFuture(); +} + +bool objc_class::isFuture() +{ + return future_class_hash && NXHashGet(future_class_hash, this); +} + + +/*********************************************************************** +* _objc_defaultClassHandler. Default objc_classHandler. Does nothing. +**********************************************************************/ +static int _objc_defaultClassHandler(const char *clsName) +{ + // Return zero so objc_getClass doesn't bother re-searching + return 0; +} + +/*********************************************************************** +* objc_setClassHandler. Set objc_classHandler to the specified value. +* +* NOTE: This should probably deal with userSuppliedHandler being nil, +* because the objc_classHandler caller does not check... it would bus +* error. It would make sense to handle nil by restoring the default +* handler. Is anyone hacking with this, though? +**********************************************************************/ +void objc_setClassHandler(int (*userSuppliedHandler)(const char *)) +{ + OBJC_WARN_DEPRECATED; + + objc_classHandler = userSuppliedHandler; +} + + +/*********************************************************************** +* _objc_setClassLoader +* Similar to objc_setClassHandler, but objc_classLoader is used for +* both objc_getClass() and objc_lookupClass(), and objc_classLoader +* pre-empts objc_classHandler. +**********************************************************************/ +void _objc_setClassLoader(BOOL (*newClassLoader)(const char *)) +{ + _objc_classLoader = newClassLoader; +} + + +/*********************************************************************** +* objc_getProtocol +* Get a protocol by name, or nil. +**********************************************************************/ +Protocol *objc_getProtocol(const char *name) +{ + mutex_locker_t lock(classLock); + if (!protocol_map) return nil; + return (Protocol *)NXMapGet(protocol_map, name); +} + + +/*********************************************************************** +* look_up_class +* Map a class name to a class using various methods. +* This is the common implementation of objc_lookUpClass and objc_getClass, +* and is also used internally to get additional search options. +* Sequence: +* 1. class_hash +* 2. unconnected_class_hash (optional) +* 3. classLoader callback +* 4. classHandler callback (optional) +**********************************************************************/ +Class look_up_class(const char *aClassName, bool includeUnconnected, + bool includeClassHandler) +{ + bool includeClassLoader = YES; // class loader cannot be skipped + Class result = nil; + struct objc_class query; + + query.name = aClassName; + + retry: + + if (!result && class_hash) { + // Check ordinary classes + mutex_locker_t lock(classLock); + result = (Class)NXHashGet(class_hash, &query); + } + + if (!result && includeUnconnected && unconnected_class_hash) { + // Check not-yet-connected classes + mutex_locker_t lock(classLock); + result = (Class)NXHashGet(unconnected_class_hash, &query); + } + + if (!result && includeClassLoader && _objc_classLoader) { + // Try class loader callback + if ((*_objc_classLoader)(aClassName)) { + // Re-try lookup without class loader + includeClassLoader = NO; + goto retry; + } + } + + if (!result && includeClassHandler && objc_classHandler) { + // Try class handler callback + if ((*objc_classHandler)(aClassName)) { + // Re-try lookup without class handler or class loader + includeClassLoader = NO; + includeClassHandler = NO; + goto retry; + } + } + + return result; +} + + +/*********************************************************************** +* objc_class::isConnected +* Returns TRUE if class cls is connected. +* A connected class has either a connected superclass or a nil superclass, +* and is present in class_hash. +**********************************************************************/ +bool objc_class::isConnected() +{ + mutex_locker_t lock(classLock); + return NXHashMember(class_hash, this); +} + + +/*********************************************************************** +* pendingClassRefsMapTable. Return a pointer to the lookup table for +* pending class refs. +**********************************************************************/ +static inline NXMapTable *pendingClassRefsMapTable(void) +{ + // Allocate table if needed + if (!pendingClassRefsMap) { + pendingClassRefsMap = NXCreateMapTable(NXStrValueMapPrototype, 10); + } + + // Return table pointer + return pendingClassRefsMap; +} + + +/*********************************************************************** +* pendingSubclassesMapTable. Return a pointer to the lookup table for +* pending subclasses. +**********************************************************************/ +static inline NXMapTable *pendingSubclassesMapTable(void) +{ + // Allocate table if needed + if (!pendingSubclassesMap) { + pendingSubclassesMap = NXCreateMapTable(NXStrValueMapPrototype, 10); + } + + // Return table pointer + return pendingSubclassesMap; +} + + +/*********************************************************************** +* pendClassInstallation +* Finish connecting class cls when its superclass becomes connected. +* Check for multiple pends of the same class because connect_class does not. +**********************************************************************/ +static void pendClassInstallation(Class cls, const char *superName) +{ + NXMapTable *table; + PendingSubclass *pending; + PendingSubclass *oldList; + PendingSubclass *l; + + // Create and/or locate pending class lookup table + table = pendingSubclassesMapTable (); + + // Make sure this class isn't already in the pending list. + oldList = (PendingSubclass *)NXMapGet(table, superName); + for (l = oldList; l != nil; l = l->next) { + if (l->subclass == cls) return; // already here, nothing to do + } + + // Create entry referring to this class + pending = (PendingSubclass *)malloc(sizeof(PendingSubclass)); + pending->subclass = cls; + + // Link new entry into head of list of entries for this class + pending->next = oldList; + + // (Re)place entry list in the table + NXMapKeyCopyingInsert (table, superName, pending); +} + + +/*********************************************************************** +* pendClassReference +* Fix up a class ref when the class with the given name becomes connected. +**********************************************************************/ +static void pendClassReference(Class *ref, const char *className, bool isMeta) +{ + NXMapTable *table; + PendingClassRef *pending; + + // Create and/or locate pending class lookup table + table = pendingClassRefsMapTable (); + + // Create entry containing the class reference + pending = (PendingClassRef *)malloc(sizeof(PendingClassRef)); + pending->ref = ref; + if (isMeta) { + pending->ref = (Class *)((uintptr_t)pending->ref | 1); + } + + // Link new entry into head of list of entries for this class + pending->next = (PendingClassRef *)NXMapGet(table, className); + + // (Re)place entry list in the table + NXMapKeyCopyingInsert (table, className, pending); + + if (PrintConnecting) { + _objc_inform("CONNECT: pended reference to class '%s%s' at %p", + className, isMeta ? " (meta)" : "", (void *)ref); + } +} + + +/*********************************************************************** +* resolve_references_to_class +* Fix up any pending class refs to this class. +**********************************************************************/ +static void resolve_references_to_class(Class cls) +{ + PendingClassRef *pending; + + if (!pendingClassRefsMap) return; // no unresolved refs for any class + + pending = (PendingClassRef *)NXMapGet(pendingClassRefsMap, cls->name); + if (!pending) return; // no unresolved refs for this class + + NXMapKeyFreeingRemove(pendingClassRefsMap, cls->name); + + if (PrintConnecting) { + _objc_inform("CONNECT: resolving references to class '%s'", cls->name); + } + + while (pending) { + PendingClassRef *next = pending->next; + if (pending->ref) { + bool isMeta = (uintptr_t)pending->ref & 1; + Class *ref = + (Class *)((uintptr_t)pending->ref & ~(uintptr_t)1); + *ref = isMeta ? cls->ISA() : cls; + } + free(pending); + pending = next; + } + + if (NXCountMapTable(pendingClassRefsMap) == 0) { + NXFreeMapTable(pendingClassRefsMap); + pendingClassRefsMap = nil; + } +} + + +/*********************************************************************** +* resolve_subclasses_of_class +* Fix up any pending subclasses of this class. +**********************************************************************/ +static void resolve_subclasses_of_class(Class cls) +{ + PendingSubclass *pending; + + if (!pendingSubclassesMap) return; // no unresolved subclasses + + pending = (PendingSubclass *)NXMapGet(pendingSubclassesMap, cls->name); + if (!pending) return; // no unresolved subclasses for this class + + NXMapKeyFreeingRemove(pendingSubclassesMap, cls->name); + + // Destroy the pending table if it's now empty, to save memory. + if (NXCountMapTable(pendingSubclassesMap) == 0) { + NXFreeMapTable(pendingSubclassesMap); + pendingSubclassesMap = nil; + } + + if (PrintConnecting) { + _objc_inform("CONNECT: resolving subclasses of class '%s'", cls->name); + } + + while (pending) { + PendingSubclass *next = pending->next; + if (pending->subclass) connect_class(pending->subclass); + free(pending); + pending = next; + } +} + + +/*********************************************************************** +* really_connect_class +* Connect cls to superclass supercls unconditionally. +* Also adjust the class hash tables and handle pended subclasses. +* +* This should be called from connect_class() ONLY. +**********************************************************************/ +static void really_connect_class(Class cls, + Class supercls) +{ + Class oldCls; + + // Connect superclass pointers. + set_superclass(cls, supercls, YES); + + // Done! + cls->info |= CLS_CONNECTED; + + { + mutex_locker_t lock(classLock); + + // Update hash tables. + NXHashRemove(unconnected_class_hash, cls); + oldCls = (Class)NXHashInsert(class_hash, cls); + + // Delete unconnected_class_hash if it is now empty. + if (NXCountHashTable(unconnected_class_hash) == 0) { + NXFreeHashTable(unconnected_class_hash); + unconnected_class_hash = nil; + } + + // No duplicate classes allowed. + // Duplicates should have been rejected by _objc_read_classes_from_image + assert(!oldCls); + } + + // Fix up pended class refs to this class, if any + resolve_references_to_class(cls); + + // Connect newly-connectable subclasses + resolve_subclasses_of_class(cls); + + // Debugging: if this class has ivars, make sure this class's ivars don't + // overlap with its super's. This catches some broken fragile base classes. + // Do not use super->instance_size vs. self->ivar[0] to check this. + // Ivars may be packed across instance_size boundaries. + if (DebugFragileSuperclasses && cls->ivars && cls->ivars->ivar_count) { + Class ivar_cls = supercls; + + // Find closest superclass that has some ivars, if one exists. + while (ivar_cls && + (!ivar_cls->ivars || ivar_cls->ivars->ivar_count == 0)) + { + ivar_cls = ivar_cls->superclass; + } + + if (ivar_cls) { + // Compare superclass's last ivar to this class's first ivar + old_ivar *super_ivar = + &ivar_cls->ivars->ivar_list[ivar_cls->ivars->ivar_count - 1]; + old_ivar *self_ivar = + &cls->ivars->ivar_list[0]; + + // fixme could be smarter about super's ivar size + if (self_ivar->ivar_offset <= super_ivar->ivar_offset) { + _objc_inform("WARNING: ivars of superclass '%s' and " + "subclass '%s' overlap; superclass may have " + "changed since subclass was compiled", + ivar_cls->name, cls->name); + } + } + } +} + + +/*********************************************************************** +* connect_class +* Connect class cls to its superclasses, if possible. +* If cls becomes connected, move it from unconnected_class_hash +* to connected_class_hash. +* Returns TRUE if cls is connected. +* Returns FALSE if cls could not be connected for some reason +* (missing superclass or still-unconnected superclass) +**********************************************************************/ +static bool connect_class(Class cls) +{ + if (cls->isConnected()) { + // This class is already connected to its superclass. + // Do nothing. + return TRUE; + } + else if (cls->superclass == nil) { + // This class is a root class. + // Connect it to itself. + + if (PrintConnecting) { + _objc_inform("CONNECT: class '%s' now connected (root class)", + cls->name); + } + + really_connect_class(cls, nil); + return TRUE; + } + else { + // This class is not a root class and is not yet connected. + // Connect it if its superclass and root class are already connected. + // Otherwise, add this class to the to-be-connected list, + // pending the completion of its superclass and root class. + + // At this point, cls->superclass and cls->ISA()->ISA() are still STRINGS + char *supercls_name = (char *)cls->superclass; + Class supercls; + + // YES unconnected, YES class handler + if (nil == (supercls = look_up_class(supercls_name, YES, YES))) { + // Superclass does not exist yet. + // pendClassInstallation will handle duplicate pends of this class + pendClassInstallation(cls, supercls_name); + + if (PrintConnecting) { + _objc_inform("CONNECT: class '%s' NOT connected (missing super)", cls->name); + } + return FALSE; + } + + if (! connect_class(supercls)) { + // Superclass exists but is not yet connected. + // pendClassInstallation will handle duplicate pends of this class + pendClassInstallation(cls, supercls_name); + + if (PrintConnecting) { + _objc_inform("CONNECT: class '%s' NOT connected (unconnected super)", cls->name); + } + return FALSE; + } + + // Superclass exists and is connected. + // Connect this class to the superclass. + + if (PrintConnecting) { + _objc_inform("CONNECT: class '%s' now connected", cls->name); + } + + really_connect_class(cls, supercls); + return TRUE; + } +} + + +/*********************************************************************** +* _objc_read_categories_from_image. +* Read all categories from the given image. +* Install them on their parent classes, or register them for later +* installation. +* Returns YES if some method caches now need to be flushed. +**********************************************************************/ +static bool _objc_read_categories_from_image (header_info * hi) +{ + Module mods; + size_t midx; + bool needFlush = NO; + + if (hi->info()->isReplacement()) { + // Ignore any categories in this image + return NO; + } + + // Major loop - process all modules in the header + mods = hi->mod_ptr; + + // NOTE: The module and category lists are traversed backwards + // to preserve the pre-10.4 processing order. Changing the order + // would have a small chance of introducing binary compatibility bugs. + midx = hi->mod_count; + while (midx-- > 0) { + unsigned int index; + unsigned int total; + + // Nothing to do for a module without a symbol table + if (mods[midx].symtab == nil) + continue; + + // Total entries in symbol table (class entries followed + // by category entries) + total = mods[midx].symtab->cls_def_cnt + + mods[midx].symtab->cat_def_cnt; + + // Minor loop - register all categories from given module + index = total; + while (index-- > mods[midx].symtab->cls_def_cnt) { + old_category *cat = (old_category *)mods[midx].symtab->defs[index]; + needFlush |= _objc_register_category(cat, (int)mods[midx].version); + } + } + + return needFlush; +} + + +/*********************************************************************** +* _objc_read_classes_from_image. +* Read classes from the given image, perform assorted minor fixups, +* scan for +load implementation. +* Does not connect classes to superclasses. +* Does attach pended categories to the classes. +* Adds all classes to unconnected_class_hash. class_hash is unchanged. +**********************************************************************/ +static void _objc_read_classes_from_image(header_info *hi) +{ + unsigned int index; + unsigned int midx; + Module mods; + int isBundle = headerIsBundle(hi); + + if (hi->info()->isReplacement()) { + // Ignore any classes in this image + return; + } + + // class_hash starts small, enough only for libobjc itself. + // If other Objective-C libraries are found, immediately resize + // class_hash, assuming that Foundation and AppKit are about + // to add lots of classes. + { + mutex_locker_t lock(classLock); + if (hi->mhdr() != libobjc_header && _NXHashCapacity(class_hash) < 1024) { + _NXHashRehashToCapacity(class_hash, 1024); + } + } + + // Major loop - process all modules in the image + mods = hi->mod_ptr; + for (midx = 0; midx < hi->mod_count; midx += 1) + { + // Skip module containing no classes + if (mods[midx].symtab == nil) + continue; + + // Minor loop - process all the classes in given module + for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1) + { + Class newCls, oldCls; + bool rejected; + + // Locate the class description pointer + newCls = (Class)mods[midx].symtab->defs[index]; + + // Classes loaded from Mach-O bundles can be unloaded later. + // Nothing uses this class yet, so cls->setInfo is not needed. + if (isBundle) newCls->info |= CLS_FROM_BUNDLE; + if (isBundle) newCls->ISA()->info |= CLS_FROM_BUNDLE; + + // Use common static empty cache instead of nil + if (newCls->cache == nil) + newCls->cache = (Cache) &_objc_empty_cache; + if (newCls->ISA()->cache == nil) + newCls->ISA()->cache = (Cache) &_objc_empty_cache; + + // Set metaclass version + newCls->ISA()->version = mods[midx].version; + + // methodLists is nil or a single list, not an array + newCls->info |= CLS_NO_METHOD_ARRAY|CLS_NO_PROPERTY_ARRAY; + newCls->ISA()->info |= CLS_NO_METHOD_ARRAY|CLS_NO_PROPERTY_ARRAY; + + // class has no subclasses for cache flushing + newCls->info |= CLS_LEAF; + newCls->ISA()->info |= CLS_LEAF; + + if (mods[midx].version >= 6) { + // class structure has ivar_layout and ext fields + newCls->info |= CLS_EXT; + newCls->ISA()->info |= CLS_EXT; + } + + // Check for +load implementation before categories are attached + if (_class_hasLoadMethod(newCls)) { + newCls->ISA()->info |= CLS_HAS_LOAD_METHOD; + } + + // Install into unconnected_class_hash. + { + mutex_locker_t lock(classLock); + + if (future_class_hash) { + Class futureCls = (Class) + NXHashRemove(future_class_hash, newCls); + if (futureCls) { + // Another class structure for this class was already + // prepared by objc_getFutureClass(). Use it instead. + free((char *)futureCls->name); + memcpy(futureCls, newCls, sizeof(objc_class)); + setOriginalClassForFutureClass(futureCls, newCls); + newCls = futureCls; + + if (NXCountHashTable(future_class_hash) == 0) { + NXFreeHashTable(future_class_hash); + future_class_hash = nil; + } + } + } + + if (!unconnected_class_hash) { + unconnected_class_hash = + NXCreateHashTable(classHashPrototype, 128, nil); + } + + if ((oldCls = (Class)NXHashGet(class_hash, newCls)) || + (oldCls = (Class)NXHashGet(unconnected_class_hash, newCls))) + { + // Another class with this name exists. Complain and reject. + inform_duplicate(newCls->name, oldCls, newCls); + rejected = YES; + } + else { + NXHashInsert(unconnected_class_hash, newCls); + rejected = NO; + } + } + + if (!rejected) { + // Attach pended categories for this class, if any + resolve_categories_for_class(newCls); + } + } + } +} + + +/*********************************************************************** +* _objc_connect_classes_from_image. +* Connect the classes in the given image to their superclasses, +* or register them for later connection if any superclasses are missing. +**********************************************************************/ +static void _objc_connect_classes_from_image(header_info *hi) +{ + unsigned int index; + unsigned int midx; + Module mods; + bool replacement = hi->info()->isReplacement(); + + // Major loop - process all modules in the image + mods = hi->mod_ptr; + for (midx = 0; midx < hi->mod_count; midx += 1) + { + // Skip module containing no classes + if (mods[midx].symtab == nil) + continue; + + // Minor loop - process all the classes in given module + for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1) + { + Class cls = (Class)mods[midx].symtab->defs[index]; + if (! replacement) { + bool connected; + Class futureCls = getFutureClassForOriginalClass(cls); + if (futureCls) { + // objc_getFutureClass() requested a different class + // struct. Fix up the original struct's superclass + // field for [super ...] use, but otherwise perform + // fixups on the new class struct only. + const char *super_name = (const char *) cls->superclass; + if (super_name) cls->superclass = objc_getClass(super_name); + cls = futureCls; + } + connected = connect_class(cls); + if (connected && callbackFunction) { + (*callbackFunction)(cls, 0); + } + } else { + // Replacement image - fix up superclass only (#3704817) + // And metaclass's superclass (#5351107) + const char *super_name = (const char *) cls->superclass; + if (super_name) { + cls->superclass = objc_getClass(super_name); + // metaclass's superclass is superclass's metaclass + cls->ISA()->superclass = cls->superclass->ISA(); + } else { + // Replacement for a root class + // cls->superclass already nil + // root metaclass's superclass is root class + cls->ISA()->superclass = cls; + } + } + } + } +} + + +/*********************************************************************** +* _objc_map_class_refs_for_image. Convert the class ref entries from +* a class name string pointer to a class pointer. If the class does +* not yet exist, the reference is added to a list of pending references +* to be fixed up at a later date. +**********************************************************************/ +static void fix_class_ref(Class *ref, const char *name, bool isMeta) +{ + Class cls; + + // Get pointer to class of this name + // NO unconnected, YES class loader + // (real class with weak-missing superclass is unconnected now) + cls = look_up_class(name, NO, YES); + if (cls) { + // Referenced class exists. Fix up the reference. + *ref = isMeta ? cls->ISA() : cls; + } else { + // Referenced class does not exist yet. Insert nil for now + // (weak-linking) and fix up the reference if the class arrives later. + pendClassReference (ref, name, isMeta); + *ref = nil; + } +} + +static void _objc_map_class_refs_for_image (header_info * hi) +{ + Class *cls_refs; + size_t count; + unsigned int index; + + // Locate class refs in image + cls_refs = _getObjcClassRefs (hi, &count); + if (cls_refs) { + // Process each class ref + for (index = 0; index < count; index += 1) { + // Ref is initially class name char* + const char *name = (const char *) cls_refs[index]; + if (!name) continue; + fix_class_ref(&cls_refs[index], name, NO /*never meta*/); + } + } +} + + +/*********************************************************************** +* _objc_remove_pending_class_refs_in_image +* Delete any pending class ref fixups for class refs in the given image, +* because the image is about to be unloaded. +**********************************************************************/ +static void removePendingReferences(Class *refs, size_t count) +{ + Class *end = refs + count; + + if (!refs) return; + if (!pendingClassRefsMap) return; + + // Search the pending class ref table for class refs in this range. + // The class refs may have already been stomped with nil, + // so there's no way to recover the original class name. + + { + const char *key; + PendingClassRef *pending; + NXMapState state = NXInitMapState(pendingClassRefsMap); + while(NXNextMapState(pendingClassRefsMap, &state, + (const void **)&key, (const void **)&pending)) + { + for ( ; pending != nil; pending = pending->next) { + if (pending->ref >= refs && pending->ref < end) { + pending->ref = nil; + } + } + } + } +} + +static void _objc_remove_pending_class_refs_in_image(header_info *hi) +{ + Class *cls_refs; + size_t count; + + // Locate class refs in this image + cls_refs = _getObjcClassRefs(hi, &count); + removePendingReferences(cls_refs, count); +} + + +/*********************************************************************** +* map_selrefs. For each selector in the specified array, +* replace the name pointer with a uniqued selector. +* If copy is TRUE, all selector data is always copied. This is used +* for registering selectors from unloadable bundles, so the selector +* can still be used after the bundle's data segment is unmapped. +* Returns YES if dst was written to, NO if it was unchanged. +**********************************************************************/ +static inline void map_selrefs(SEL *sels, size_t count, bool copy) +{ + size_t index; + + if (!sels) return; + + sel_lock(); + + // Process each selector + for (index = 0; index < count; index += 1) + { + SEL sel; + + // Lookup pointer to uniqued string + sel = sel_registerNameNoLock((const char *) sels[index], copy); + + // Replace this selector with uniqued one (avoid + // modifying the VM page if this would be a NOP) + if (sels[index] != sel) { + sels[index] = sel; + } + } + + sel_unlock(); +} + + +/*********************************************************************** +* map_method_descs. For each method in the specified method list, +* replace the name pointer with a uniqued selector. +* If copy is TRUE, all selector data is always copied. This is used +* for registering selectors from unloadable bundles, so the selector +* can still be used after the bundle's data segment is unmapped. +**********************************************************************/ +static void map_method_descs (struct objc_method_description_list * methods, bool copy) +{ + int index; + + if (!methods) return; + + sel_lock(); + + // Process each method + for (index = 0; index < methods->count; index += 1) + { + struct objc_method_description * method; + SEL sel; + + // Get method entry to fix up + method = &methods->list[index]; + + // Lookup pointer to uniqued string + sel = sel_registerNameNoLock((const char *) method->name, copy); + + // Replace this selector with uniqued one (avoid + // modifying the VM page if this would be a NOP) + if (method->name != sel) + method->name = sel; + } + + sel_unlock(); +} + + +/*********************************************************************** +* ext_for_protocol +* Returns the protocol extension for the given protocol. +* Returns nil if the protocol has no extension. +**********************************************************************/ +static old_protocol_ext *ext_for_protocol(old_protocol *proto) +{ + if (!proto) return nil; + if (!protocol_ext_map) return nil; + else return (old_protocol_ext *)NXMapGet(protocol_ext_map, proto); +} + + +/*********************************************************************** +* lookup_method +* Search a protocol method list for a selector. +**********************************************************************/ +static struct objc_method_description * +lookup_method(struct objc_method_description_list *mlist, SEL aSel) +{ + if (mlist) { + int i; + for (i = 0; i < mlist->count; i++) { + if (mlist->list[i].name == aSel) { + return mlist->list+i; + } + } + } + return nil; +} + + +/*********************************************************************** +* lookup_protocol_method +* Search for a selector in a protocol +* (and optionally recursively all incorporated protocols) +**********************************************************************/ +struct objc_method_description * +lookup_protocol_method(old_protocol *proto, SEL aSel, + bool isRequiredMethod, bool isInstanceMethod, + bool recursive) +{ + struct objc_method_description *m = nil; + old_protocol_ext *ext; + + if (isRequiredMethod) { + if (isInstanceMethod) { + m = lookup_method(proto->instance_methods, aSel); + } else { + m = lookup_method(proto->class_methods, aSel); + } + } else if ((ext = ext_for_protocol(proto))) { + if (isInstanceMethod) { + m = lookup_method(ext->optional_instance_methods, aSel); + } else { + m = lookup_method(ext->optional_class_methods, aSel); + } + } + + if (!m && recursive && proto->protocol_list) { + int i; + for (i = 0; !m && i < proto->protocol_list->count; i++) { + m = lookup_protocol_method(proto->protocol_list->list[i], aSel, + isRequiredMethod,isInstanceMethod,true); + } + } + + return m; +} + + +/*********************************************************************** +* protocol_getName +* Returns the name of the given protocol. +**********************************************************************/ +const char *protocol_getName(Protocol *p) +{ + old_protocol *proto = oldprotocol(p); + if (!proto) return "nil"; + return proto->protocol_name; +} + + +/*********************************************************************** +* protocol_getMethodDescription +* Returns the description of a named method. +* Searches either required or optional methods. +* Searches either instance or class methods. +**********************************************************************/ +struct objc_method_description +protocol_getMethodDescription(Protocol *p, SEL aSel, + BOOL isRequiredMethod, BOOL isInstanceMethod) +{ + struct objc_method_description empty = {nil, nil}; + old_protocol *proto = oldprotocol(p); + struct objc_method_description *desc; + if (!proto) return empty; + + desc = lookup_protocol_method(proto, aSel, + isRequiredMethod, isInstanceMethod, true); + if (desc) return *desc; + else return empty; +} + + +/*********************************************************************** +* protocol_copyMethodDescriptionList +* Returns an array of method descriptions from a protocol. +* Copies either required or optional methods. +* Copies either instance or class methods. +**********************************************************************/ +struct objc_method_description * +protocol_copyMethodDescriptionList(Protocol *p, + BOOL isRequiredMethod, + BOOL isInstanceMethod, + unsigned int *outCount) +{ + struct objc_method_description_list *mlist = nil; + old_protocol *proto = oldprotocol(p); + old_protocol_ext *ext; + unsigned int i, count; + struct objc_method_description *result; + + if (!proto) { + if (outCount) *outCount = 0; + return nil; + } + + if (isRequiredMethod) { + if (isInstanceMethod) { + mlist = proto->instance_methods; + } else { + mlist = proto->class_methods; + } + } else if ((ext = ext_for_protocol(proto))) { + if (isInstanceMethod) { + mlist = ext->optional_instance_methods; + } else { + mlist = ext->optional_class_methods; + } + } + + if (!mlist) { + if (outCount) *outCount = 0; + return nil; + } + + count = mlist->count; + result = (struct objc_method_description *) + calloc(count + 1, sizeof(struct objc_method_description)); + for (i = 0; i < count; i++) { + result[i] = mlist->list[i]; + } + + if (outCount) *outCount = count; + return result; +} + + +objc_property_t +protocol_getProperty(Protocol *p, const char *name, + BOOL isRequiredProperty, BOOL isInstanceProperty) +{ + old_protocol *proto = oldprotocol(p); + old_protocol_ext *ext; + old_protocol_list *proto_list; + + if (!proto || !name) return nil; + + if (!isRequiredProperty) { + // Only required properties are currently supported + return nil; + } + + if ((ext = ext_for_protocol(proto))) { + old_property_list *plist; + if (isInstanceProperty) plist = ext->instance_properties; + else if (ext->hasClassPropertiesField()) plist = ext->class_properties; + else plist = nil; + + if (plist) { + uint32_t i; + for (i = 0; i < plist->count; i++) { + old_property *prop = property_list_nth(plist, i); + if (0 == strcmp(name, prop->name)) { + return (objc_property_t)prop; + } + } + } + } + + if ((proto_list = proto->protocol_list)) { + int i; + for (i = 0; i < proto_list->count; i++) { + objc_property_t prop = + protocol_getProperty((Protocol *)proto_list->list[i], name, + isRequiredProperty, isInstanceProperty); + if (prop) return prop; + } + } + + return nil; +} + + +objc_property_t * +protocol_copyPropertyList2(Protocol *p, unsigned int *outCount, + BOOL isRequiredProperty, BOOL isInstanceProperty) +{ + old_property **result = nil; + old_protocol_ext *ext; + old_property_list *plist; + + old_protocol *proto = oldprotocol(p); + if (! (ext = ext_for_protocol(proto)) || !isRequiredProperty) { + // Only required properties are currently supported. + if (outCount) *outCount = 0; + return nil; + } + + if (isInstanceProperty) plist = ext->instance_properties; + else if (ext->hasClassPropertiesField()) plist = ext->class_properties; + else plist = nil; + + result = copyPropertyList(plist, outCount); + + return (objc_property_t *)result; +} + +objc_property_t *protocol_copyPropertyList(Protocol *p, unsigned int *outCount) +{ + return protocol_copyPropertyList2(p, outCount, YES, YES); +} + + +/*********************************************************************** +* protocol_copyProtocolList +* Copies this protocol's incorporated protocols. +* Does not copy those protocol's incorporated protocols in turn. +**********************************************************************/ +Protocol * __unsafe_unretained * +protocol_copyProtocolList(Protocol *p, unsigned int *outCount) +{ + unsigned int count = 0; + Protocol **result = nil; + old_protocol *proto = oldprotocol(p); + + if (!proto) { + if (outCount) *outCount = 0; + return nil; + } + + if (proto->protocol_list) { + count = (unsigned int)proto->protocol_list->count; + } + if (count > 0) { + unsigned int i; + result = (Protocol **)malloc((count+1) * sizeof(Protocol *)); + + for (i = 0; i < count; i++) { + result[i] = (Protocol *)proto->protocol_list->list[i]; + } + result[i] = nil; + } + + if (outCount) *outCount = count; + return result; +} + + +BOOL protocol_conformsToProtocol(Protocol *self_gen, Protocol *other_gen) +{ + old_protocol *self = oldprotocol(self_gen); + old_protocol *other = oldprotocol(other_gen); + + if (!self || !other) { + return NO; + } + + if (0 == strcmp(self->protocol_name, other->protocol_name)) { + return YES; + } + + if (self->protocol_list) { + int i; + for (i = 0; i < self->protocol_list->count; i++) { + old_protocol *proto = self->protocol_list->list[i]; + if (0 == strcmp(other->protocol_name, proto->protocol_name)) { + return YES; + } + if (protocol_conformsToProtocol((Protocol *)proto, other_gen)) { + return YES; + } + } + } + + return NO; +} + + +BOOL protocol_isEqual(Protocol *self, Protocol *other) +{ + if (self == other) return YES; + if (!self || !other) return NO; + + if (!protocol_conformsToProtocol(self, other)) return NO; + if (!protocol_conformsToProtocol(other, self)) return NO; + + return YES; +} + + +/*********************************************************************** +* _protocol_getMethodTypeEncoding +* Return the @encode string for the requested protocol method. +* Returns nil if the compiler did not emit any extended @encode data. +* Locking: runtimeLock must not be held by the caller +**********************************************************************/ +const char * +_protocol_getMethodTypeEncoding(Protocol *proto_gen, SEL sel, + BOOL isRequiredMethod, BOOL isInstanceMethod) +{ + old_protocol *proto = oldprotocol(proto_gen); + if (!proto) return nil; + old_protocol_ext *ext = ext_for_protocol(proto); + if (!ext) return nil; + if (ext->size < offsetof(old_protocol_ext, extendedMethodTypes) + sizeof(ext->extendedMethodTypes)) return nil; + if (! ext->extendedMethodTypes) return nil; + + struct objc_method_description *m = + lookup_protocol_method(proto, sel, + isRequiredMethod, isInstanceMethod, false); + if (!m) { + // No method with that name. Search incorporated protocols. + if (proto->protocol_list) { + for (int i = 0; i < proto->protocol_list->count; i++) { + const char *enc = + _protocol_getMethodTypeEncoding((Protocol *)proto->protocol_list->list[i], sel, isRequiredMethod, isInstanceMethod); + if (enc) return enc; + } + } + return nil; + } + + int i = 0; + if (isRequiredMethod && isInstanceMethod) { + i += ((uintptr_t)m - (uintptr_t)proto->instance_methods) / sizeof(proto->instance_methods->list[0]); + goto done; + } else if (proto->instance_methods) { + i += proto->instance_methods->count; + } + + if (isRequiredMethod && !isInstanceMethod) { + i += ((uintptr_t)m - (uintptr_t)proto->class_methods) / sizeof(proto->class_methods->list[0]); + goto done; + } else if (proto->class_methods) { + i += proto->class_methods->count; + } + + if (!isRequiredMethod && isInstanceMethod) { + i += ((uintptr_t)m - (uintptr_t)ext->optional_instance_methods) / sizeof(ext->optional_instance_methods->list[0]); + goto done; + } else if (ext->optional_instance_methods) { + i += ext->optional_instance_methods->count; + } + + if (!isRequiredMethod && !isInstanceMethod) { + i += ((uintptr_t)m - (uintptr_t)ext->optional_class_methods) / sizeof(ext->optional_class_methods->list[0]); + goto done; + } else if (ext->optional_class_methods) { + i += ext->optional_class_methods->count; + } + + done: + return ext->extendedMethodTypes[i]; +} + + +/*********************************************************************** +* objc_allocateProtocol +* Creates a new protocol. The protocol may not be used until +* objc_registerProtocol() is called. +* Returns nil if a protocol with the same name already exists. +* Locking: acquires classLock +**********************************************************************/ +Protocol * +objc_allocateProtocol(const char *name) +{ + Class cls = objc_getClass("__IncompleteProtocol"); + assert(cls); + + mutex_locker_t lock(classLock); + + if (NXMapGet(protocol_map, name)) return nil; + + old_protocol *result = (old_protocol *) + calloc(1, sizeof(old_protocol) + + sizeof(old_protocol_ext)); + old_protocol_ext *ext = (old_protocol_ext *)(result+1); + + result->isa = cls; + result->protocol_name = strdup(name); + ext->size = sizeof(old_protocol_ext); + + // fixme reserve name without installing + + NXMapInsert(protocol_ext_map, result, result+1); + + return (Protocol *)result; +} + + +/*********************************************************************** +* objc_registerProtocol +* Registers a newly-constructed protocol. The protocol is now +* ready for use and immutable. +* Locking: acquires classLock +**********************************************************************/ +void objc_registerProtocol(Protocol *proto_gen) +{ + old_protocol *proto = oldprotocol(proto_gen); + + Class oldcls = objc_getClass("__IncompleteProtocol"); + Class cls = objc_getClass("Protocol"); + + mutex_locker_t lock(classLock); + + if (proto->isa == cls) { + _objc_inform("objc_registerProtocol: protocol '%s' was already " + "registered!", proto->protocol_name); + return; + } + if (proto->isa != oldcls) { + _objc_inform("objc_registerProtocol: protocol '%s' was not allocated " + "with objc_allocateProtocol!", proto->protocol_name); + return; + } + + proto->isa = cls; + + NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto); +} + + +/*********************************************************************** +* protocol_addProtocol +* Adds an incorporated protocol to another protocol. +* No method enforcement is performed. +* `proto` must be under construction. `addition` must not. +* Locking: acquires classLock +**********************************************************************/ +void +protocol_addProtocol(Protocol *proto_gen, Protocol *addition_gen) +{ + old_protocol *proto = oldprotocol(proto_gen); + old_protocol *addition = oldprotocol(addition_gen); + + Class cls = objc_getClass("__IncompleteProtocol"); + + if (!proto_gen) return; + if (!addition_gen) return; + + mutex_locker_t lock(classLock); + + if (proto->isa != cls) { + _objc_inform("protocol_addProtocol: modified protocol '%s' is not " + "under construction!", proto->protocol_name); + return; + } + if (addition->isa == cls) { + _objc_inform("protocol_addProtocol: added protocol '%s' is still " + "under construction!", addition->protocol_name); + return; + } + + old_protocol_list *protolist = proto->protocol_list; + if (protolist) { + size_t size = sizeof(old_protocol_list) + + protolist->count * sizeof(protolist->list[0]); + protolist = (old_protocol_list *) + realloc(protolist, size); + } else { + protolist = (old_protocol_list *) + calloc(1, sizeof(old_protocol_list)); + } + + protolist->list[protolist->count++] = addition; + proto->protocol_list = protolist; +} + + +/*********************************************************************** +* protocol_addMethodDescription +* Adds a method to a protocol. The protocol must be under construction. +* Locking: acquires classLock +**********************************************************************/ +static void +_protocol_addMethod(struct objc_method_description_list **list, SEL name, const char *types) +{ + if (!*list) { + *list = (struct objc_method_description_list *) + calloc(sizeof(struct objc_method_description_list), 1); + } else { + size_t size = sizeof(struct objc_method_description_list) + + (*list)->count * sizeof(struct objc_method_description); + *list = (struct objc_method_description_list *) + realloc(*list, size); + } + + struct objc_method_description *desc = &(*list)->list[(*list)->count++]; + desc->name = name; + desc->types = strdup(types ?: ""); +} + +void +protocol_addMethodDescription(Protocol *proto_gen, SEL name, const char *types, + BOOL isRequiredMethod, BOOL isInstanceMethod) +{ + old_protocol *proto = oldprotocol(proto_gen); + + Class cls = objc_getClass("__IncompleteProtocol"); + + if (!proto_gen) return; + + mutex_locker_t lock(classLock); + + if (proto->isa != cls) { + _objc_inform("protocol_addMethodDescription: protocol '%s' is not " + "under construction!", proto->protocol_name); + return; + } + + if (isRequiredMethod && isInstanceMethod) { + _protocol_addMethod(&proto->instance_methods, name, types); + } else if (isRequiredMethod && !isInstanceMethod) { + _protocol_addMethod(&proto->class_methods, name, types); + } else if (!isRequiredMethod && isInstanceMethod) { + old_protocol_ext *ext = (old_protocol_ext *)(proto+1); + _protocol_addMethod(&ext->optional_instance_methods, name, types); + } else /* !isRequiredMethod && !isInstanceMethod) */ { + old_protocol_ext *ext = (old_protocol_ext *)(proto+1); + _protocol_addMethod(&ext->optional_class_methods, name, types); + } +} + + +/*********************************************************************** +* protocol_addProperty +* Adds a property to a protocol. The protocol must be under construction. +* Locking: acquires classLock +**********************************************************************/ +static void +_protocol_addProperty(old_property_list **plist, const char *name, + const objc_property_attribute_t *attrs, + unsigned int count) +{ + if (!*plist) { + *plist = (old_property_list *) + calloc(sizeof(old_property_list), 1); + (*plist)->entsize = sizeof(old_property); + } else { + *plist = (old_property_list *) + realloc(*plist, sizeof(old_property_list) + + (*plist)->count * (*plist)->entsize); + } + + old_property *prop = property_list_nth(*plist, (*plist)->count++); + prop->name = strdup(name); + prop->attributes = copyPropertyAttributeString(attrs, count); +} + +void +protocol_addProperty(Protocol *proto_gen, const char *name, + const objc_property_attribute_t *attrs, + unsigned int count, + BOOL isRequiredProperty, BOOL isInstanceProperty) +{ + old_protocol *proto = oldprotocol(proto_gen); + + Class cls = objc_getClass("__IncompleteProtocol"); + + if (!proto) return; + if (!name) return; + + mutex_locker_t lock(classLock); + + if (proto->isa != cls) { + _objc_inform("protocol_addProperty: protocol '%s' is not " + "under construction!", proto->protocol_name); + return; + } + + old_protocol_ext *ext = ext_for_protocol(proto); + + if (isRequiredProperty && isInstanceProperty) { + _protocol_addProperty(&ext->instance_properties, name, attrs, count); + } + else if (isRequiredProperty && !isInstanceProperty) { + _protocol_addProperty(&ext->class_properties, name, attrs, count); + } + // else if (!isRequiredProperty && isInstanceProperty) { + // _protocol_addProperty(&ext->optional_instance_properties, name, attrs, count); + //} + // else /* !isRequiredProperty && !isInstanceProperty) */ { + // _protocol_addProperty(&ext->optional_class_properties, name, attrs, count); + //} +} + + +/*********************************************************************** +* _objc_fixup_protocol_objects_for_image. For each protocol in the +* specified image, selectorize the method names and add to the protocol hash. +**********************************************************************/ + +static bool versionIsExt(uintptr_t version, const char *names, size_t size) +{ + // CodeWarrior used isa field for string "Protocol" + // from section __OBJC,__class_names. rdar://4951638 + // gcc (10.4 and earlier) used isa field for version number; + // the only version number used on Mac OS X was 2. + // gcc (10.5 and later) uses isa field for ext pointer + + if (version < 4096 /* not PAGE_SIZE */) { + return NO; + } + + if (version >= (uintptr_t)names && version < (uintptr_t)(names + size)) { + return NO; + } + + return YES; +} + +static void fix_protocol(old_protocol *proto, Class protocolClass, + bool isBundle, const char *names, size_t names_size) +{ + uintptr_t version; + if (!proto) return; + + version = (uintptr_t)proto->isa; + + // Set the protocol's isa + proto->isa = protocolClass; + + // Fix up method lists + // fixme share across duplicates + map_method_descs (proto->instance_methods, isBundle); + map_method_descs (proto->class_methods, isBundle); + + // Fix up ext, if any + if (versionIsExt(version, names, names_size)) { + old_protocol_ext *ext = (old_protocol_ext *)version; + NXMapInsert(protocol_ext_map, proto, ext); + map_method_descs (ext->optional_instance_methods, isBundle); + map_method_descs (ext->optional_class_methods, isBundle); + } + + // Record the protocol it if we don't have one with this name yet + // fixme bundles - copy protocol + // fixme unloading + if (!NXMapGet(protocol_map, proto->protocol_name)) { + NXMapKeyCopyingInsert(protocol_map, proto->protocol_name, proto); + if (PrintProtocols) { + _objc_inform("PROTOCOLS: protocol at %p is %s", + proto, proto->protocol_name); + } + } else { + // duplicate - do nothing + if (PrintProtocols) { + _objc_inform("PROTOCOLS: protocol at %p is %s (duplicate)", + proto, proto->protocol_name); + } + } +} + +static void _objc_fixup_protocol_objects_for_image (header_info * hi) +{ + Class protocolClass = objc_getClass("Protocol"); + size_t count, i; + old_protocol **protos; + int isBundle = headerIsBundle(hi); + const char *names; + size_t names_size; + + mutex_locker_t lock(classLock); + + // Allocate the protocol registry if necessary. + if (!protocol_map) { + protocol_map = + NXCreateMapTable(NXStrValueMapPrototype, 32); + } + if (!protocol_ext_map) { + protocol_ext_map = + NXCreateMapTable(NXPtrValueMapPrototype, 32); + } + + protos = _getObjcProtocols(hi, &count); + names = _getObjcClassNames(hi, &names_size); + for (i = 0; i < count; i++) { + fix_protocol(protos[i], protocolClass, isBundle, names, names_size); + } +} + + +/*********************************************************************** +* _objc_fixup_selector_refs. Register all of the selectors in each +* image, and fix them all up. +**********************************************************************/ +static void _objc_fixup_selector_refs (const header_info *hi) +{ + size_t count; + SEL *sels; + + bool preoptimized = hi->isPreoptimized(); + + if (PrintPreopt) { + if (preoptimized) { + _objc_inform("PREOPTIMIZATION: honoring preoptimized selectors in %s", + hi->fname()); + } + else if (hi->info()->optimizedByDyld()) { + _objc_inform("PREOPTIMIZATION: IGNORING preoptimized selectors in %s", + hi->fname()); + } + } + + if (preoptimized) return; + + sels = _getObjcSelectorRefs (hi, &count); + + map_selrefs(sels, count, headerIsBundle(hi)); +} + +static inline bool _is_threaded() { +#if TARGET_OS_WIN32 + return YES; +#else + return pthread_is_threaded_np() != 0; +#endif +} + +#if !TARGET_OS_WIN32 +/*********************************************************************** +* unmap_image +* Process the given image which is about to be unmapped by dyld. +* mh is mach_header instead of headerType because that's what +* dyld_priv.h says even for 64-bit. +**********************************************************************/ +void +unmap_image(const char *path __unused, const struct mach_header *mh) +{ + recursive_mutex_locker_t lock(loadMethodLock); + unmap_image_nolock(mh); +} + + +/*********************************************************************** +* map_images +* Process the given images which are being mapped in by dyld. +* Calls ABI-agnostic code after taking ABI-specific locks. +**********************************************************************/ +void +map_2_images(unsigned count, const char * const paths[], + const struct mach_header * const mhdrs[]) +{ + recursive_mutex_locker_t lock(loadMethodLock); + map_images_nolock(count, paths, mhdrs); +} + + +/*********************************************************************** +* load_images +* Process +load in the given images which are being mapped in by dyld. +* +* Locking: acquires classLock and loadMethodLock +**********************************************************************/ +extern void prepare_load_methods(const headerType *mhdr); + +void +load_images(const char *path __unused, const struct mach_header *mh) +{ + recursive_mutex_locker_t lock(loadMethodLock); + + // Discover +load methods + prepare_load_methods((const headerType *)mh); + + // Call +load methods (without classLock - re-entrant) + call_load_methods(); +} +#endif + + +/*********************************************************************** +* _read_images +* Perform metadata processing for hCount images starting with firstNewHeader +**********************************************************************/ +void _read_images(header_info **hList, uint32_t hCount, int totalClasses, int unoptimizedTotalClass) +{ + uint32_t i; + bool categoriesLoaded = NO; + + if (!class_hash) _objc_init_class_hash(); + + // Parts of this order are important for correctness or performance. + + // Read classes from all images. + for (i = 0; i < hCount; i++) { + _objc_read_classes_from_image(hList[i]); + } + + // Read categories from all images. + // But not if any other threads are running - they might + // call a category method before the fixups below are complete. + if (!_is_threaded()) { + bool needFlush = NO; + for (i = 0; i < hCount; i++) { + needFlush |= _objc_read_categories_from_image(hList[i]); + } + if (needFlush) flush_marked_caches(); + categoriesLoaded = YES; + } + + // Connect classes from all images. + for (i = 0; i < hCount; i++) { + _objc_connect_classes_from_image(hList[i]); + } + + // Fix up class refs, selector refs, and protocol objects from all images. + for (i = 0; i < hCount; i++) { + _objc_map_class_refs_for_image(hList[i]); + _objc_fixup_selector_refs(hList[i]); + _objc_fixup_protocol_objects_for_image(hList[i]); + } + + // Read categories from all images. + // But not if this is the only thread - it's more + // efficient to attach categories earlier if safe. + if (!categoriesLoaded) { + bool needFlush = NO; + for (i = 0; i < hCount; i++) { + needFlush |= _objc_read_categories_from_image(hList[i]); + } + if (needFlush) flush_marked_caches(); + } + + // Multi-threaded category load MUST BE LAST to avoid a race. +} + + +/*********************************************************************** +* prepare_load_methods +* Schedule +load for classes in this image, any un-+load-ed +* superclasses in other images, and any categories in this image. +**********************************************************************/ +// Recursively schedule +load for cls and any un-+load-ed superclasses. +// cls must already be connected. +static void schedule_class_load(Class cls) +{ + if (cls->info & CLS_LOADED) return; + if (cls->superclass) schedule_class_load(cls->superclass); + add_class_to_loadable_list(cls); + cls->info |= CLS_LOADED; +} + +void prepare_load_methods(const headerType *mhdr) +{ + Module mods; + unsigned int midx; + + header_info *hi; + for (hi = FirstHeader; hi; hi = hi->getNext()) { + if (mhdr == hi->mhdr()) break; + } + if (!hi) return; + + if (hi->info()->isReplacement()) { + // Ignore any classes in this image + return; + } + + // Major loop - process all modules in the image + mods = hi->mod_ptr; + for (midx = 0; midx < hi->mod_count; midx += 1) + { + unsigned int index; + + // Skip module containing no classes + if (mods[midx].symtab == nil) + continue; + + // Minor loop - process all the classes in given module + for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1) + { + // Locate the class description pointer + Class cls = (Class)mods[midx].symtab->defs[index]; + if (cls->info & CLS_CONNECTED) { + schedule_class_load(cls); + } + } + } + + + // Major loop - process all modules in the header + mods = hi->mod_ptr; + + // NOTE: The module and category lists are traversed backwards + // to preserve the pre-10.4 processing order. Changing the order + // would have a small chance of introducing binary compatibility bugs. + midx = (unsigned int)hi->mod_count; + while (midx-- > 0) { + unsigned int index; + unsigned int total; + Symtab symtab = mods[midx].symtab; + + // Nothing to do for a module without a symbol table + if (mods[midx].symtab == nil) + continue; + // Total entries in symbol table (class entries followed + // by category entries) + total = mods[midx].symtab->cls_def_cnt + + mods[midx].symtab->cat_def_cnt; + + // Minor loop - register all categories from given module + index = total; + while (index-- > mods[midx].symtab->cls_def_cnt) { + old_category *cat = (old_category *)symtab->defs[index]; + add_category_to_loadable_list((Category)cat); + } + } +} + + +#if TARGET_OS_WIN32 + +void unload_class(Class cls) +{ +} + +#else + +/*********************************************************************** +* _objc_remove_classes_in_image +* Remove all classes in the given image from the runtime, because +* the image is about to be unloaded. +* Things to clean up: +* class_hash +* unconnected_class_hash +* pending subclasses list (only if class is still unconnected) +* loadable class list +* class's method caches +* class refs in all other images +**********************************************************************/ +// Re-pend any class references in refs that point into [start..end) +static void rependClassReferences(Class *refs, size_t count, + uintptr_t start, uintptr_t end) +{ + size_t i; + + if (!refs) return; + + // Process each class ref + for (i = 0; i < count; i++) { + if ((uintptr_t)(refs[i]) >= start && (uintptr_t)(refs[i]) < end) { + pendClassReference(&refs[i], refs[i]->name, + refs[i]->info & CLS_META); + refs[i] = nil; + } + } +} + + +void try_free(const void *p) +{ + if (p && malloc_size(p)) free((void *)p); +} + +// Deallocate all memory in a method list +static void unload_mlist(old_method_list *mlist) +{ + int i; + for (i = 0; i < mlist->method_count; i++) { + try_free(mlist->method_list[i].method_types); + } + try_free(mlist); +} + +static void unload_property_list(old_property_list *proplist) +{ + uint32_t i; + + if (!proplist) return; + + for (i = 0; i < proplist->count; i++) { + old_property *prop = property_list_nth(proplist, i); + try_free(prop->name); + try_free(prop->attributes); + } + try_free(proplist); +} + + +// Deallocate all memory in a class. +void unload_class(Class cls) +{ + // Free method cache + // This dereferences the cache contents; do this before freeing methods + if (cls->cache && cls->cache != &_objc_empty_cache) { + _cache_free(cls->cache); + } + + // Free ivar lists + if (cls->ivars) { + int i; + for (i = 0; i < cls->ivars->ivar_count; i++) { + try_free(cls->ivars->ivar_list[i].ivar_name); + try_free(cls->ivars->ivar_list[i].ivar_type); + } + try_free(cls->ivars); + } + + // Free fixed-up method lists and method list array + if (cls->methodLists) { + // more than zero method lists + if (cls->info & CLS_NO_METHOD_ARRAY) { + // one method list + unload_mlist((old_method_list *)cls->methodLists); + } + else { + // more than one method list + old_method_list **mlistp; + for (mlistp = cls->methodLists; + *mlistp != nil && *mlistp != END_OF_METHODS_LIST; + mlistp++) + { + unload_mlist(*mlistp); + } + free(cls->methodLists); + } + } + + // Free protocol list + old_protocol_list *protos = cls->protocols; + while (protos) { + old_protocol_list *dead = protos; + protos = protos->next; + try_free(dead); + } + + if ((cls->info & CLS_EXT)) { + if (cls->ext) { + // Free property lists and property list array + if (cls->ext->propertyLists) { + // more than zero property lists + if (cls->info & CLS_NO_PROPERTY_ARRAY) { + // one property list + old_property_list *proplist = + (old_property_list *)cls->ext->propertyLists; + unload_property_list(proplist); + } else { + // more than one property list + old_property_list **plistp; + for (plistp = cls->ext->propertyLists; + *plistp != nil; + plistp++) + { + unload_property_list(*plistp); + } + try_free(cls->ext->propertyLists); + } + } + + // Free weak ivar layout + try_free(cls->ext->weak_ivar_layout); + + // Free ext + try_free(cls->ext); + } + + // Free non-weak ivar layout + try_free(cls->ivar_layout); + } + + // Free class name + try_free(cls->name); + + // Free cls + try_free(cls); +} + + +static void _objc_remove_classes_in_image(header_info *hi) +{ + unsigned int index; + unsigned int midx; + Module mods; + + mutex_locker_t lock(classLock); + + // Major loop - process all modules in the image + mods = hi->mod_ptr; + for (midx = 0; midx < hi->mod_count; midx += 1) + { + // Skip module containing no classes + if (mods[midx].symtab == nil) + continue; + + // Minor loop - process all the classes in given module + for (index = 0; index < mods[midx].symtab->cls_def_cnt; index += 1) + { + Class cls; + + // Locate the class description pointer + cls = (Class)mods[midx].symtab->defs[index]; + + // Remove from loadable class list, if present + remove_class_from_loadable_list(cls); + + // Remove from unconnected_class_hash and pending subclasses + if (unconnected_class_hash && NXHashMember(unconnected_class_hash, cls)) { + NXHashRemove(unconnected_class_hash, cls); + if (pendingSubclassesMap) { + // Find this class in its superclass's pending list + char *supercls_name = (char *)cls->superclass; + PendingSubclass *pending = (PendingSubclass *) + NXMapGet(pendingSubclassesMap, supercls_name); + for ( ; pending != nil; pending = pending->next) { + if (pending->subclass == cls) { + pending->subclass = Nil; + break; + } + } + } + } + + // Remove from class_hash + NXHashRemove(class_hash, cls); + + // Free heap memory pointed to by the class + unload_class(cls->ISA()); + unload_class(cls); + } + } + + + // Search all other images for class refs that point back to this range. + // Un-fix and re-pend any such class refs. + + // Get the location of the dying image's __OBJC segment + uintptr_t seg; + unsigned long seg_size; + seg = (uintptr_t)getsegmentdata(hi->mhdr(), "__OBJC", &seg_size); + + header_info *other_hi; + for (other_hi = FirstHeader; other_hi != nil; other_hi = other_hi->getNext()) { + Class *other_refs; + size_t count; + if (other_hi == hi) continue; // skip the image being unloaded + + // Fix class refs in the other image + other_refs = _getObjcClassRefs(other_hi, &count); + rependClassReferences(other_refs, count, seg, seg+seg_size); + } +} + + +/*********************************************************************** +* _objc_remove_categories_in_image +* Remove all categories in the given image from the runtime, because +* the image is about to be unloaded. +* Things to clean up: +* unresolved category list +* loadable category list +**********************************************************************/ +static void _objc_remove_categories_in_image(header_info *hi) +{ + Module mods; + unsigned int midx; + + // Major loop - process all modules in the header + mods = hi->mod_ptr; + + for (midx = 0; midx < hi->mod_count; midx++) { + unsigned int index; + unsigned int total; + Symtab symtab = mods[midx].symtab; + + // Nothing to do for a module without a symbol table + if (symtab == nil) continue; + + // Total entries in symbol table (class entries followed + // by category entries) + total = symtab->cls_def_cnt + symtab->cat_def_cnt; + + // Minor loop - check all categories from given module + for (index = symtab->cls_def_cnt; index < total; index++) { + old_category *cat = (old_category *)symtab->defs[index]; + + // Clean up loadable category list + remove_category_from_loadable_list((Category)cat); + + // Clean up category_hash + if (category_hash) { + _objc_unresolved_category *cat_entry = (_objc_unresolved_category *)NXMapGet(category_hash, cat->class_name); + for ( ; cat_entry != nil; cat_entry = cat_entry->next) { + if (cat_entry->cat == cat) { + cat_entry->cat = nil; + break; + } + } + } + } + } +} + + +/*********************************************************************** +* unload_paranoia +* Various paranoid debugging checks that look for poorly-behaving +* unloadable bundles. +* Called by _objc_unmap_image when OBJC_UNLOAD_DEBUG is set. +**********************************************************************/ +static void unload_paranoia(header_info *hi) +{ + // Get the location of the dying image's __OBJC segment + uintptr_t seg; + unsigned long seg_size; + seg = (uintptr_t)getsegmentdata(hi->mhdr(), "__OBJC", &seg_size); + + _objc_inform("UNLOAD DEBUG: unloading image '%s' [%p..%p]", + hi->fname(), (void *)seg, (void*)(seg+seg_size)); + + mutex_locker_t lock(classLock); + + // Make sure the image contains no categories on surviving classes. + { + Module mods; + unsigned int midx; + + // Major loop - process all modules in the header + mods = hi->mod_ptr; + + for (midx = 0; midx < hi->mod_count; midx++) { + unsigned int index; + unsigned int total; + Symtab symtab = mods[midx].symtab; + + // Nothing to do for a module without a symbol table + if (symtab == nil) continue; + + // Total entries in symbol table (class entries followed + // by category entries) + total = symtab->cls_def_cnt + symtab->cat_def_cnt; + + // Minor loop - check all categories from given module + for (index = symtab->cls_def_cnt; index < total; index++) { + old_category *cat = (old_category *)symtab->defs[index]; + struct objc_class query; + + query.name = cat->class_name; + if (NXHashMember(class_hash, &query)) { + _objc_inform("UNLOAD DEBUG: dying image contains category '%s(%s)' on surviving class '%s'!", cat->class_name, cat->category_name, cat->class_name); + } + } + } + } + + // Make sure no surviving class is in the dying image. + // Make sure no surviving class has a superclass in the dying image. + // fixme check method implementations too + { + Class cls; + NXHashState state; + + state = NXInitHashState(class_hash); + while (NXNextHashState(class_hash, &state, (void **)&cls)) { + if ((vm_address_t)cls >= seg && + (vm_address_t)cls < seg+seg_size) + { + _objc_inform("UNLOAD DEBUG: dying image contains surviving class '%s'!", cls->name); + } + + if ((vm_address_t)cls->superclass >= seg && + (vm_address_t)cls->superclass < seg+seg_size) + { + _objc_inform("UNLOAD DEBUG: dying image contains superclass '%s' of surviving class '%s'!", cls->superclass->name, cls->name); + } + } + } +} + + +/*********************************************************************** +* _unload_image +* Only handles MH_BUNDLE for now. +* Locking: loadMethodLock acquired by unmap_image +**********************************************************************/ +void _unload_image(header_info *hi) +{ + loadMethodLock.assertLocked(); + + // Cleanup: + // Remove image's classes from the class list and free auxiliary data. + // Remove image's unresolved or loadable categories and free auxiliary data + // Remove image's unresolved class refs. + _objc_remove_classes_in_image(hi); + _objc_remove_categories_in_image(hi); + _objc_remove_pending_class_refs_in_image(hi); + if (hi->proto_refs) try_free(hi->proto_refs); + + // Perform various debugging checks if requested. + if (DebugUnload) unload_paranoia(hi); +} + +#endif + + +/*********************************************************************** +* objc_addClass. Add the specified class to the table of known classes, +* after doing a little verification and fixup. +**********************************************************************/ +void objc_addClass (Class cls) +{ + OBJC_WARN_DEPRECATED; + + // Synchronize access to hash table + mutex_locker_t lock(classLock); + + // Make sure both the class and the metaclass have caches! + // Clear all bits of the info fields except CLS_CLASS and CLS_META. + // Normally these bits are already clear but if someone tries to cons + // up their own class on the fly they might need to be cleared. + if (cls->cache == nil) { + cls->cache = (Cache) &_objc_empty_cache; + cls->info = CLS_CLASS; + } + + if (cls->ISA()->cache == nil) { + cls->ISA()->cache = (Cache) &_objc_empty_cache; + cls->ISA()->info = CLS_META; + } + + // methodLists should be: + // 1. nil (Tiger and later only) + // 2. A -1 terminated method list array + // In either case, CLS_NO_METHOD_ARRAY remains clear. + // If the user manipulates the method list directly, + // they must use the magic private format. + + // Add the class to the table + (void) NXHashInsert (class_hash, cls); + + // Superclass is no longer a leaf for cache flushing + if (cls->superclass && (cls->superclass->info & CLS_LEAF)) { + cls->superclass->clearInfo(CLS_LEAF); + cls->superclass->ISA()->clearInfo(CLS_LEAF); + } +} + +/*********************************************************************** +* _objcTweakMethodListPointerForClass. +* Change the class's method list pointer to a method list array. +* Does nothing if the method list pointer is already a method list array. +* If the class is currently in use, methodListLock must be held by the caller. +**********************************************************************/ +static void _objcTweakMethodListPointerForClass(Class cls) +{ + old_method_list * originalList; + const int initialEntries = 4; + size_t mallocSize; + old_method_list ** ptr; + + // Do nothing if methodLists is already an array. + if (cls->methodLists && !(cls->info & CLS_NO_METHOD_ARRAY)) return; + + // Remember existing list + originalList = (old_method_list *) cls->methodLists; + + // Allocate and zero a method list array + mallocSize = sizeof(old_method_list *) * initialEntries; + ptr = (old_method_list **) calloc(1, mallocSize); + + // Insert the existing list into the array + ptr[initialEntries - 1] = END_OF_METHODS_LIST; + ptr[0] = originalList; + + // Replace existing list with array + cls->methodLists = ptr; + cls->clearInfo(CLS_NO_METHOD_ARRAY); +} + + +/*********************************************************************** +* _objc_insertMethods. +* Adds methods to a class. +* Does not flush any method caches. +* Does not take any locks. +* If the class is already in use, use class_addMethods() instead. +**********************************************************************/ +void _objc_insertMethods(Class cls, old_method_list *mlist, old_category *cat) +{ + old_method_list ***list; + old_method_list **ptr; + ptrdiff_t endIndex; + size_t oldSize; + size_t newSize; + + if (!cls->methodLists) { + // cls has no methods - simply use this method list + cls->methodLists = (old_method_list **)mlist; + cls->setInfo(CLS_NO_METHOD_ARRAY); + return; + } + + // Log any existing methods being replaced + if (PrintReplacedMethods) { + int i; + for (i = 0; i < mlist->method_count; i++) { + extern IMP findIMPInClass(Class cls, SEL sel); + SEL sel = sel_registerName((char *)mlist->method_list[i].method_name); + IMP newImp = mlist->method_list[i].method_imp; + IMP oldImp; + + if ((oldImp = findIMPInClass(cls, sel))) { + logReplacedMethod(cls->name, sel, ISMETA(cls), + cat ? cat->category_name : nil, + oldImp, newImp); + } + } + } + + // Create method list array if necessary + _objcTweakMethodListPointerForClass(cls); + + list = &cls->methodLists; + + // Locate unused entry for insertion point + ptr = *list; + while ((*ptr != 0) && (*ptr != END_OF_METHODS_LIST)) + ptr += 1; + + // If array is full, add to it + if (*ptr == END_OF_METHODS_LIST) + { + // Calculate old and new dimensions + endIndex = ptr - *list; + oldSize = (endIndex + 1) * sizeof(void *); + newSize = oldSize + sizeof(old_method_list *); // only increase by 1 + + // Grow the method list array by one. + *list = (old_method_list **)realloc(*list, newSize); + + // Zero out addition part of new array + bzero (&((*list)[endIndex]), newSize - oldSize); + + // Place new end marker + (*list)[(newSize/sizeof(void *)) - 1] = END_OF_METHODS_LIST; + + // Insertion point corresponds to old array end + ptr = &((*list)[endIndex]); + } + + // Right shift existing entries by one + bcopy (*list, (*list) + 1, (uint8_t *)ptr - (uint8_t *)*list); + + // Insert at method list at beginning of array + **list = mlist; +} + +/*********************************************************************** +* _objc_removeMethods. +* Remove methods from a class. +* Does not take any locks. +* Does not flush any method caches. +* If the class is currently in use, use class_removeMethods() instead. +**********************************************************************/ +void _objc_removeMethods(Class cls, old_method_list *mlist) +{ + old_method_list ***list; + old_method_list **ptr; + + if (cls->methodLists == nil) { + // cls has no methods + return; + } + if (cls->methodLists == (old_method_list **)mlist) { + // mlist is the class's only method list - erase it + cls->methodLists = nil; + return; + } + if (cls->info & CLS_NO_METHOD_ARRAY) { + // cls has only one method list, and this isn't it - do nothing + return; + } + + // cls has a method list array - search it + + list = &cls->methodLists; + + // Locate list in the array + ptr = *list; + while (*ptr != mlist) { + // fix for radar # 2538790 + if ( *ptr == END_OF_METHODS_LIST ) return; + ptr += 1; + } + + // Remove this entry + *ptr = 0; + + // Left shift the following entries + while (*(++ptr) != END_OF_METHODS_LIST) + *(ptr-1) = *ptr; + *(ptr-1) = 0; +} + +/*********************************************************************** +* _objc_add_category. Install the specified category's methods and +* protocols into the class it augments. +* The class is assumed not to be in use yet: no locks are taken and +* no method caches are flushed. +**********************************************************************/ +static inline void _objc_add_category(Class cls, old_category *category, int version) +{ + if (PrintConnecting) { + _objc_inform("CONNECT: attaching category '%s (%s)'", cls->name, category->category_name); + } + + // Augment instance methods + if (category->instance_methods) + _objc_insertMethods (cls, category->instance_methods, category); + + // Augment class methods + if (category->class_methods) + _objc_insertMethods (cls->ISA(), category->class_methods, category); + + // Augment protocols + if ((version >= 5) && category->protocols) + { + if (cls->ISA()->version >= 5) + { + category->protocols->next = cls->protocols; + cls->protocols = category->protocols; + cls->ISA()->protocols = category->protocols; + } + else + { + _objc_inform ("unable to add protocols from category %s...\n", category->category_name); + _objc_inform ("class `%s' must be recompiled\n", category->class_name); + } + } + + // Augment instance properties + if (version >= 7 && category->instance_properties) { + if (cls->ISA()->version >= 6) { + _class_addProperties(cls, category->instance_properties); + } else { + _objc_inform ("unable to add instance properties from category %s...\n", category->category_name); + _objc_inform ("class `%s' must be recompiled\n", category->class_name); + } + } + + // Augment class properties + if (version >= 7 && category->hasClassPropertiesField() && + category->class_properties) + { + if (cls->ISA()->version >= 6) { + _class_addProperties(cls->ISA(), category->class_properties); + } else { + _objc_inform ("unable to add class properties from category %s...\n", category->category_name); + _objc_inform ("class `%s' must be recompiled\n", category->class_name); + } + } +} + +/*********************************************************************** +* _objc_add_category_flush_caches. Install the specified category's +* methods into the class it augments, and flush the class' method cache. +* Return YES if some method caches now need to be flushed. +**********************************************************************/ +static bool _objc_add_category_flush_caches(Class cls, old_category *category, int version) +{ + bool needFlush = NO; + + // Install the category's methods into its intended class + { + mutex_locker_t lock(methodListLock); + _objc_add_category (cls, category, version); + } + + // Queue for cache flushing so category's methods can get called + if (category->instance_methods) { + cls->setInfo(CLS_FLUSH_CACHE); + needFlush = YES; + } + if (category->class_methods) { + cls->ISA()->setInfo(CLS_FLUSH_CACHE); + needFlush = YES; + } + + return needFlush; +} + + +/*********************************************************************** +* reverse_cat +* Reverse the given linked list of pending categories. +* The pending category list is built backwards, and needs to be +* reversed before actually attaching the categories to a class. +* Returns the head of the new linked list. +**********************************************************************/ +static _objc_unresolved_category *reverse_cat(_objc_unresolved_category *cat) +{ + _objc_unresolved_category *prev; + _objc_unresolved_category *cur; + _objc_unresolved_category *ahead; + + if (!cat) return nil; + + prev = nil; + cur = cat; + ahead = cat->next; + + while (cur) { + ahead = cur->next; + cur->next = prev; + prev = cur; + cur = ahead; + } + + return prev; +} + + +/*********************************************************************** +* resolve_categories_for_class. +* Install all existing categories intended for the specified class. +* cls must be a true class and not a metaclass. +**********************************************************************/ +static void resolve_categories_for_class(Class cls) +{ + _objc_unresolved_category * pending; + _objc_unresolved_category * next; + + // Nothing to do if there are no categories at all + if (!category_hash) return; + + // Locate and remove first element in category list + // associated with this class + pending = (_objc_unresolved_category *) + NXMapKeyFreeingRemove (category_hash, cls->name); + + // Traverse the list of categories, if any, registered for this class + + // The pending list is built backwards. Reverse it and walk forwards. + pending = reverse_cat(pending); + + while (pending) { + if (pending->cat) { + // Install the category + // use the non-flush-cache version since we are only + // called from the class intialization code + _objc_add_category(cls, pending->cat, (int)pending->version); + } + + // Delink and reclaim this registration + next = pending->next; + free(pending); + pending = next; + } +} + + +/*********************************************************************** +* _objc_resolve_categories_for_class. +* Public version of resolve_categories_for_class. This was +* exported pre-10.4 for Omni et al. to workaround a problem +* with too-lazy category attachment. +* cls should be a class, but this function can also cope with metaclasses. +**********************************************************************/ +void _objc_resolve_categories_for_class(Class cls) +{ + // If cls is a metaclass, get the class. + // resolve_categories_for_class() requires a real class to work correctly. + if (ISMETA(cls)) { + if (strncmp(cls->name, "_%", 2) == 0) { + // Posee's meta's name is smashed and isn't in the class_hash, + // so objc_getClass doesn't work. + const char *baseName = strchr(cls->name, '%'); // get posee's real name + cls = objc_getClass(baseName); + } else { + cls = objc_getClass(cls->name); + } + } + + resolve_categories_for_class(cls); +} + + +/*********************************************************************** +* _objc_register_category. +* Process a category read from an image. +* If the category's class exists, attach the category immediately. +* Classes that need cache flushing are marked but not flushed. +* If the category's class does not exist yet, pend the category for +* later attachment. Pending categories are attached in the order +* they were discovered. +* Returns YES if some method caches now need to be flushed. +**********************************************************************/ +static bool _objc_register_category(old_category *cat, int version) +{ + _objc_unresolved_category * new_cat; + _objc_unresolved_category * old; + Class theClass; + + // If the category's class exists, attach the category. + if ((theClass = objc_lookUpClass(cat->class_name))) { + return _objc_add_category_flush_caches(theClass, cat, version); + } + + // If the category's class exists but is unconnected, + // then attach the category to the class but don't bother + // flushing any method caches (because they must be empty). + // YES unconnected, NO class_handler + if ((theClass = look_up_class(cat->class_name, YES, NO))) { + _objc_add_category(theClass, cat, version); + return NO; + } + + + // Category's class does not exist yet. + // Save the category for later attachment. + + if (PrintConnecting) { + _objc_inform("CONNECT: pending category '%s (%s)'", cat->class_name, cat->category_name); + } + + // Create category lookup table if needed + if (!category_hash) + category_hash = NXCreateMapTable(NXStrValueMapPrototype, 128); + + // Locate an existing list of categories, if any, for the class. + old = (_objc_unresolved_category *) + NXMapGet (category_hash, cat->class_name); + + // Register the category to be fixed up later. + // The category list is built backwards, and is reversed again + // by resolve_categories_for_class(). + new_cat = (_objc_unresolved_category *) + malloc(sizeof(_objc_unresolved_category)); + new_cat->next = old; + new_cat->cat = cat; + new_cat->version = version; + (void) NXMapKeyCopyingInsert (category_hash, cat->class_name, new_cat); + + return NO; +} + + +const char ** +_objc_copyClassNamesForImage(header_info *hi, unsigned int *outCount) +{ + Module mods; + unsigned int m; + const char **list; + int count; + int allocated; + + list = nil; + count = 0; + allocated = 0; + + mods = hi->mod_ptr; + for (m = 0; m < hi->mod_count; m++) { + int d; + + if (!mods[m].symtab) continue; + + for (d = 0; d < mods[m].symtab->cls_def_cnt; d++) { + Class cls = (Class)mods[m].symtab->defs[d]; + // fixme what about future-ified classes? + if (cls->isConnected()) { + if (count == allocated) { + allocated = allocated*2 + 16; + list = (const char **) + realloc((void *)list, allocated * sizeof(char *)); + } + list[count++] = cls->name; + } + } + } + + if (count > 0) { + // nil-terminate non-empty list + if (count == allocated) { + allocated = allocated+1; + list = (const char **) + realloc((void *)list, allocated * sizeof(char *)); + } + list[count] = nil; + } + + if (outCount) *outCount = count; + return list; +} + +Class gdb_class_getClass(Class cls) +{ + const char *className = cls->name; + if(!className || !strlen(className)) return Nil; + Class rCls = look_up_class(className, NO, NO); + return rCls; + +} + +Class gdb_object_getClass(id obj) +{ + if (!obj) return nil; + return gdb_class_getClass(obj->getIsa()); +} + + +/*********************************************************************** +* Lock management +**********************************************************************/ +rwlock_t selLock; +mutex_t classLock; +mutex_t methodListLock; +mutex_t cacheUpdateLock; +recursive_mutex_t loadMethodLock; + +void lock_init(void) +{ +} + + +#endif diff --git a/runtime/objc-runtime.h b/runtime/objc-runtime.h new file mode 100644 index 0000000..4599f08 --- /dev/null +++ b/runtime/objc-runtime.h @@ -0,0 +1,2 @@ +#include +#include diff --git a/runtime/objc-runtime.mm b/runtime/objc-runtime.mm new file mode 100644 index 0000000..e9efe55 --- /dev/null +++ b/runtime/objc-runtime.mm @@ -0,0 +1,1033 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/*********************************************************************** +* objc-runtime.m +* Copyright 1988-1996, NeXT Software, Inc. +* Author: s. naroff +* +**********************************************************************/ + + + +/*********************************************************************** +* Imports. +**********************************************************************/ + +#include "objc-private.h" +#include "objc-loadmethod.h" +#include "message.h" + +OBJC_EXPORT Class getOriginalClassForPosingClass(Class); + + +/*********************************************************************** +* Exports. +**********************************************************************/ + +// Settings from environment variables +#define OPTION(var, env, help) bool var = false; +#include "objc-env.h" +#undef OPTION + +struct option_t { + bool* var; + const char *env; + const char *help; + size_t envlen; +}; + +const option_t Settings[] = { +#define OPTION(var, env, help) option_t{&var, #env, help, strlen(#env)}, +#include "objc-env.h" +#undef OPTION +}; + + +// objc's key for pthread_getspecific +static tls_key_t _objc_pthread_key; + +// Selectors +SEL SEL_load = NULL; +SEL SEL_initialize = NULL; +SEL SEL_resolveInstanceMethod = NULL; +SEL SEL_resolveClassMethod = NULL; +SEL SEL_cxx_construct = NULL; +SEL SEL_cxx_destruct = NULL; +SEL SEL_retain = NULL; +SEL SEL_release = NULL; +SEL SEL_autorelease = NULL; +SEL SEL_retainCount = NULL; +SEL SEL_alloc = NULL; +SEL SEL_allocWithZone = NULL; +SEL SEL_dealloc = NULL; +SEL SEL_copy = NULL; +SEL SEL_new = NULL; +SEL SEL_forwardInvocation = NULL; +SEL SEL_tryRetain = NULL; +SEL SEL_isDeallocating = NULL; +SEL SEL_retainWeakReference = NULL; +SEL SEL_allowsWeakReference = NULL; + + +header_info *FirstHeader = 0; // NULL means empty list +header_info *LastHeader = 0; // NULL means invalid; recompute it +int HeaderCount = 0; + + +/*********************************************************************** +* objc_getClass. Return the id of the named class. If the class does +* not exist, call _objc_classLoader and then objc_classHandler, either of +* which may create a new class. +* Warning: doesn't work if aClassName is the name of a posed-for class's isa! +**********************************************************************/ +Class objc_getClass(const char *aClassName) +{ + if (!aClassName) return Nil; + + // NO unconnected, YES class handler + return look_up_class(aClassName, NO, YES); +} + + +/*********************************************************************** +* objc_getRequiredClass. +* Same as objc_getClass, but kills the process if the class is not found. +* This is used by ZeroLink, where failing to find a class would be a +* compile-time link error without ZeroLink. +**********************************************************************/ +Class objc_getRequiredClass(const char *aClassName) +{ + Class cls = objc_getClass(aClassName); + if (!cls) _objc_fatal("link error: class '%s' not found.", aClassName); + return cls; +} + + +/*********************************************************************** +* objc_lookUpClass. Return the id of the named class. +* If the class does not exist, call _objc_classLoader, which may create +* a new class. +* +* Formerly objc_getClassWithoutWarning () +**********************************************************************/ +Class objc_lookUpClass(const char *aClassName) +{ + if (!aClassName) return Nil; + + // NO unconnected, NO class handler + return look_up_class(aClassName, NO, NO); +} + + +/*********************************************************************** +* objc_getMetaClass. Return the id of the meta class the named class. +* Warning: doesn't work if aClassName is the name of a posed-for class's isa! +**********************************************************************/ +Class objc_getMetaClass(const char *aClassName) +{ + Class cls; + + if (!aClassName) return Nil; + + cls = objc_getClass (aClassName); + if (!cls) + { + _objc_inform ("class `%s' not linked into application", aClassName); + return Nil; + } + + return cls->ISA(); +} + + +/*********************************************************************** +* appendHeader. Add a newly-constructed header_info to the list. +**********************************************************************/ +void appendHeader(header_info *hi) +{ + // Add the header to the header list. + // The header is appended to the list, to preserve the bottom-up order. + HeaderCount++; + hi->setNext(NULL); + if (!FirstHeader) { + // list is empty + FirstHeader = LastHeader = hi; + } else { + if (!LastHeader) { + // list is not empty, but LastHeader is invalid - recompute it + LastHeader = FirstHeader; + while (LastHeader->getNext()) LastHeader = LastHeader->getNext(); + } + // LastHeader is now valid + LastHeader->setNext(hi); + LastHeader = hi; + } +} + + +/*********************************************************************** +* removeHeader +* Remove the given header from the header list. +* FirstHeader is updated. +* LastHeader is set to NULL. Any code that uses LastHeader must +* detect this NULL and recompute LastHeader by traversing the list. +**********************************************************************/ +void removeHeader(header_info *hi) +{ + header_info *prev = NULL; + header_info *current = NULL; + + for (current = FirstHeader; current != NULL; current = current->getNext()) { + if (current == hi) { + header_info *deadHead = current; + + // Remove from the linked list. + if (prev) + prev->setNext(current->getNext()); + else + FirstHeader = current->getNext(); // no prev so removing head + + // Update LastHeader if necessary. + if (LastHeader == deadHead) { + LastHeader = NULL; // will be recomputed next time it's used + } + + HeaderCount--; + break; + } + prev = current; + } +} + + +/*********************************************************************** +* environ_init +* Read environment variables that affect the runtime. +* Also print environment variable help, if requested. +**********************************************************************/ +void environ_init(void) +{ + if (issetugid()) { + // All environment variables are silently ignored when setuid or setgid + // This includes OBJC_HELP and OBJC_PRINT_OPTIONS themselves. + return; + } + + bool PrintHelp = false; + bool PrintOptions = false; + bool maybeMallocDebugging = false; + + // Scan environ[] directly instead of calling getenv() a lot. + // This optimizes the case where none are set. + for (char **p = *_NSGetEnviron(); *p != nil; p++) { + if (0 == strncmp(*p, "Malloc", 6) || 0 == strncmp(*p, "DYLD", 4) || + 0 == strncmp(*p, "NSZombiesEnabled", 16)) + { + maybeMallocDebugging = true; + } + + if (0 != strncmp(*p, "OBJC_", 5)) continue; + + if (0 == strncmp(*p, "OBJC_HELP=", 10)) { + PrintHelp = true; + continue; + } + if (0 == strncmp(*p, "OBJC_PRINT_OPTIONS=", 19)) { + PrintOptions = true; + continue; + } + + const char *value = strchr(*p, '='); + if (!*value) continue; + value++; + + for (size_t i = 0; i < sizeof(Settings)/sizeof(Settings[0]); i++) { + const option_t *opt = &Settings[i]; + if ((size_t)(value - *p) == 1+opt->envlen && + 0 == strncmp(*p, opt->env, opt->envlen)) + { + *opt->var = (0 == strcmp(value, "YES")); + break; + } + } + } + + // Special case: enable some autorelease pool debugging + // when some malloc debugging is enabled + // and OBJC_DEBUG_POOL_ALLOCATION is not set to something other than NO. + if (maybeMallocDebugging) { + const char *insert = getenv("DYLD_INSERT_LIBRARIES"); + const char *zombie = getenv("NSZombiesEnabled"); + const char *pooldebug = getenv("OBJC_DEBUG_POOL_ALLOCATION"); + if ((getenv("MallocStackLogging") + || getenv("MallocStackLoggingNoCompact") + || (zombie && (*zombie == 'Y' || *zombie == 'y')) + || (insert && strstr(insert, "libgmalloc"))) + && + (!pooldebug || 0 == strcmp(pooldebug, "YES"))) + { + DebugPoolAllocation = true; + } + } + + // Print OBJC_HELP and OBJC_PRINT_OPTIONS output. + if (PrintHelp || PrintOptions) { + if (PrintHelp) { + _objc_inform("Objective-C runtime debugging. Set variable=YES to enable."); + _objc_inform("OBJC_HELP: describe available environment variables"); + if (PrintOptions) { + _objc_inform("OBJC_HELP is set"); + } + _objc_inform("OBJC_PRINT_OPTIONS: list which options are set"); + } + if (PrintOptions) { + _objc_inform("OBJC_PRINT_OPTIONS is set"); + } + + for (size_t i = 0; i < sizeof(Settings)/sizeof(Settings[0]); i++) { + const option_t *opt = &Settings[i]; + if (PrintHelp) _objc_inform("%s: %s", opt->env, opt->help); + if (PrintOptions && *opt->var) _objc_inform("%s is set", opt->env); + } + } +} + + +/*********************************************************************** +* logReplacedMethod +* OBJC_PRINT_REPLACED_METHODS implementation +**********************************************************************/ +void +logReplacedMethod(const char *className, SEL s, + bool isMeta, const char *catName, + IMP oldImp, IMP newImp) +{ + const char *oldImage = "??"; + const char *newImage = "??"; + + // Silently ignore +load replacement because category +load is special + if (s == SEL_load) return; + +#if TARGET_OS_WIN32 + // don't know dladdr()/dli_fname equivalent +#else + Dl_info dl; + + if (dladdr((void*)oldImp, &dl) && dl.dli_fname) oldImage = dl.dli_fname; + if (dladdr((void*)newImp, &dl) && dl.dli_fname) newImage = dl.dli_fname; +#endif + + _objc_inform("REPLACED: %c[%s %s] %s%s (IMP was %p (%s), now %p (%s))", + isMeta ? '+' : '-', className, sel_getName(s), + catName ? "by category " : "", catName ? catName : "", + oldImp, oldImage, newImp, newImage); +} + + + +/*********************************************************************** +* objc_setMultithreaded. +**********************************************************************/ +void objc_setMultithreaded (BOOL flag) +{ + OBJC_WARN_DEPRECATED; + + // Nothing here. Thread synchronization in the runtime is always active. +} + + +/*********************************************************************** +* _objc_fetch_pthread_data +* Fetch objc's pthread data for this thread. +* If the data doesn't exist yet and create is NO, return NULL. +* If the data doesn't exist yet and create is YES, allocate and return it. +**********************************************************************/ +_objc_pthread_data *_objc_fetch_pthread_data(bool create) +{ + _objc_pthread_data *data; + + data = (_objc_pthread_data *)tls_get(_objc_pthread_key); + if (!data && create) { + data = (_objc_pthread_data *) + calloc(1, sizeof(_objc_pthread_data)); + tls_set(_objc_pthread_key, data); + } + + return data; +} + + +/*********************************************************************** +* _objc_pthread_destroyspecific +* Destructor for objc's per-thread data. +* arg shouldn't be NULL, but we check anyway. +**********************************************************************/ +extern void _destroyInitializingClassList(struct _objc_initializing_classes *list); +void _objc_pthread_destroyspecific(void *arg) +{ + _objc_pthread_data *data = (_objc_pthread_data *)arg; + if (data != NULL) { + _destroyInitializingClassList(data->initializingClasses); + _destroySyncCache(data->syncCache); + _destroyAltHandlerList(data->handlerList); + for (int i = 0; i < (int)countof(data->printableNames); i++) { + if (data->printableNames[i]) { + free(data->printableNames[i]); + } + } + + // add further cleanup here... + + free(data); + } +} + + +void tls_init(void) +{ +#if SUPPORT_DIRECT_THREAD_KEYS + _objc_pthread_key = TLS_DIRECT_KEY; + pthread_key_init_np(TLS_DIRECT_KEY, &_objc_pthread_destroyspecific); +#else + _objc_pthread_key = tls_create(&_objc_pthread_destroyspecific); +#endif +} + + +/*********************************************************************** +* _objcInit +* Former library initializer. This function is now merely a placeholder +* for external callers. All runtime initialization has now been moved +* to map_images() and _objc_init. +**********************************************************************/ +void _objcInit(void) +{ + // do nothing +} + + +/*********************************************************************** +* objc_setForwardHandler +**********************************************************************/ + +#if !__OBJC2__ + +// Default forward handler (nil) goes to forward:: dispatch. +void *_objc_forward_handler = nil; +void *_objc_forward_stret_handler = nil; + +#else + +// Default forward handler halts the process. +__attribute__((noreturn)) void +objc_defaultForwardHandler(id self, SEL sel) +{ + _objc_fatal("%c[%s %s]: unrecognized selector sent to instance %p " + "(no message forward handler is installed)", + class_isMetaClass(object_getClass(self)) ? '+' : '-', + object_getClassName(self), sel_getName(sel), self); +} +void *_objc_forward_handler = (void*)objc_defaultForwardHandler; + +#if SUPPORT_STRET +struct stret { int i[100]; }; +__attribute__((noreturn)) struct stret +objc_defaultForwardStretHandler(id self, SEL sel) +{ + objc_defaultForwardHandler(self, sel); +} +void *_objc_forward_stret_handler = (void*)objc_defaultForwardStretHandler; +#endif + +#endif + +void objc_setForwardHandler(void *fwd, void *fwd_stret) +{ + _objc_forward_handler = fwd; +#if SUPPORT_STRET + _objc_forward_stret_handler = fwd_stret; +#endif +} + + +#if !__OBJC2__ +// GrP fixme +extern "C" Class _objc_getOrigClass(const char *name); +#endif +const char *class_getImageName(Class cls) +{ +#if TARGET_OS_WIN32 + TCHAR *szFileName; + DWORD charactersCopied; + Class origCls; + HMODULE classModule; + bool res; +#endif + if (!cls) return NULL; + +#if !__OBJC2__ + cls = _objc_getOrigClass(cls->demangledName()); +#endif +#if TARGET_OS_WIN32 + charactersCopied = 0; + szFileName = malloc(MAX_PATH * sizeof(TCHAR)); + + origCls = objc_getOrigClass(cls->demangledName()); + classModule = NULL; + res = GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS, (LPCTSTR)origCls, &classModule); + if (res && classModule) { + charactersCopied = GetModuleFileName(classModule, szFileName, MAX_PATH * sizeof(TCHAR)); + } + if (classModule) FreeLibrary(classModule); + if (charactersCopied) { + return (const char *)szFileName; + } else { + free(szFileName); + } + return NULL; +#else + return dyld_image_path_containing_address(cls); +#endif +} + + +const char **objc_copyImageNames(unsigned int *outCount) +{ + header_info *hi; + int count = 0; + int max = HeaderCount; +#if TARGET_OS_WIN32 + const TCHAR **names = (const TCHAR **)calloc(max+1, sizeof(TCHAR *)); +#else + const char **names = (const char **)calloc(max+1, sizeof(char *)); +#endif + + for (hi = FirstHeader; hi != NULL && count < max; hi = hi->getNext()) { +#if TARGET_OS_WIN32 + if (hi->moduleName) { + names[count++] = hi->moduleName; + } +#else + const char *fname = hi->fname(); + if (fname) { + names[count++] = fname; + } +#endif + } + names[count] = NULL; + + if (count == 0) { + // Return NULL instead of empty list if there are no images + free((void *)names); + names = NULL; + } + + if (outCount) *outCount = count; + return names; +} + + +/********************************************************************** +* +**********************************************************************/ +const char ** +objc_copyClassNamesForImage(const char *image, unsigned int *outCount) +{ + header_info *hi; + + if (!image) { + if (outCount) *outCount = 0; + return NULL; + } + + // Find the image. + for (hi = FirstHeader; hi != NULL; hi = hi->getNext()) { +#if TARGET_OS_WIN32 + if (0 == wcscmp((TCHAR *)image, hi->moduleName)) break; +#else + if (0 == strcmp(image, hi->fname())) break; +#endif + } + + if (!hi) { + if (outCount) *outCount = 0; + return NULL; + } + + return _objc_copyClassNamesForImage(hi, outCount); +} + + +/********************************************************************** +* Fast Enumeration Support +**********************************************************************/ + +static void (*enumerationMutationHandler)(id); + +/********************************************************************** +* objc_enumerationMutation +* called by compiler when a mutation is detected during foreach iteration +**********************************************************************/ +void objc_enumerationMutation(id object) { + if (enumerationMutationHandler == nil) { + _objc_fatal("mutation detected during 'for(... in ...)' enumeration of object %p.", (void*)object); + } + (*enumerationMutationHandler)(object); +} + + +/********************************************************************** +* objc_setEnumerationMutationHandler +* an entry point to customize mutation error handing +**********************************************************************/ +void objc_setEnumerationMutationHandler(void (*handler)(id)) { + enumerationMutationHandler = handler; +} + + +/********************************************************************** +* Associative Reference Support +**********************************************************************/ + +id objc_getAssociatedObject(id object, const void *key) { + return _object_get_associative_reference(object, (void *)key); +} + + +void objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy) { + _object_set_associative_reference(object, (void *)key, value, policy); +} + + +void objc_removeAssociatedObjects(id object) +{ + if (object && object->hasAssociatedObjects()) { + _object_remove_assocations(object); + } +} + + + +#if SUPPORT_GC_COMPAT + +#include + +// GC preflight for an app executable. + +enum GCness { + WithGC = 1, + WithoutGC = 0, + Error = -1 +}; + +// Overloaded template wrappers around clang's overflow-checked arithmetic. + +template bool uadd_overflow(T x, T y, T* sum); +template bool usub_overflow(T x, T y, T* diff); +template bool umul_overflow(T x, T y, T* prod); + +template bool sadd_overflow(T x, T y, T* sum); +template bool ssub_overflow(T x, T y, T* diff); +template bool smul_overflow(T x, T y, T* prod); + +template <> bool uadd_overflow(unsigned x, unsigned y, unsigned* sum) { return __builtin_uadd_overflow(x, y, sum); } +template <> bool uadd_overflow(unsigned long x, unsigned long y, unsigned long* sum) { return __builtin_uaddl_overflow(x, y, sum); } +template <> bool uadd_overflow(unsigned long long x, unsigned long long y, unsigned long long* sum) { return __builtin_uaddll_overflow(x, y, sum); } + +template <> bool usub_overflow(unsigned x, unsigned y, unsigned* diff) { return __builtin_usub_overflow(x, y, diff); } +template <> bool usub_overflow(unsigned long x, unsigned long y, unsigned long* diff) { return __builtin_usubl_overflow(x, y, diff); } +template <> bool usub_overflow(unsigned long long x, unsigned long long y, unsigned long long* diff) { return __builtin_usubll_overflow(x, y, diff); } + +template <> bool umul_overflow(unsigned x, unsigned y, unsigned* prod) { return __builtin_umul_overflow(x, y, prod); } +template <> bool umul_overflow(unsigned long x, unsigned long y, unsigned long* prod) { return __builtin_umull_overflow(x, y, prod); } +template <> bool umul_overflow(unsigned long long x, unsigned long long y, unsigned long long* prod) { return __builtin_umulll_overflow(x, y, prod); } + +template <> bool sadd_overflow(signed x, signed y, signed* sum) { return __builtin_sadd_overflow(x, y, sum); } +template <> bool sadd_overflow(signed long x, signed long y, signed long* sum) { return __builtin_saddl_overflow(x, y, sum); } +template <> bool sadd_overflow(signed long long x, signed long long y, signed long long* sum) { return __builtin_saddll_overflow(x, y, sum); } + +template <> bool ssub_overflow(signed x, signed y, signed* diff) { return __builtin_ssub_overflow(x, y, diff); } +template <> bool ssub_overflow(signed long x, signed long y, signed long* diff) { return __builtin_ssubl_overflow(x, y, diff); } +template <> bool ssub_overflow(signed long long x, signed long long y, signed long long* diff) { return __builtin_ssubll_overflow(x, y, diff); } + +template <> bool smul_overflow(signed x, signed y, signed* prod) { return __builtin_smul_overflow(x, y, prod); } +template <> bool smul_overflow(signed long x, signed long y, signed long* prod) { return __builtin_smull_overflow(x, y, prod); } +template <> bool smul_overflow(signed long long x, signed long long y, signed long long* prod) { return __builtin_smulll_overflow(x, y, prod); } + + +// Range-checking subview of a file. +class FileSlice { + int fd; + uint64_t sliceOffset; + uint64_t sliceSize; + +public: + FileSlice() : fd(-1), sliceOffset(0), sliceSize(0) { } + + FileSlice(int newfd, uint64_t newOffset, uint64_t newSize) + : fd(newfd) , sliceOffset(newOffset) , sliceSize(newSize) { } + + // Read bytes from this slice. + // Returns YES if all bytes were read successfully. + bool pread(void *buf, uint64_t readSize, uint64_t readOffset = 0) { + uint64_t readEnd; + if (uadd_overflow(readOffset, readSize, &readEnd)) return NO; + if (readEnd > sliceSize) return NO; + + uint64_t preadOffset; + if (uadd_overflow(sliceOffset, readOffset, &preadOffset)) return NO; + + int64_t readed = ::pread(fd, buf, (size_t)readSize, preadOffset); + if (readed < 0 || (uint64_t)readed != readSize) return NO; + return YES; + } + + // Create a new slice that is a subset of this slice. + // Returnes YES if successful. + bool slice(uint64_t newOffset, uint64_t newSize, FileSlice& result) { + // fixme arithmetic overflow + uint64_t newEnd; + if (uadd_overflow(newOffset, newSize, &newEnd)) return NO; + if (newEnd > sliceSize) return NO; + + if (uadd_overflow(sliceOffset, newOffset, &result.sliceOffset)) { + return NO; + } + result.sliceSize = newSize; + result.fd = fd; + return YES; + } + + // Shorten this slice in place by removing a range from the start. + bool advance(uint64_t distance) { + if (distance > sliceSize) return NO; + if (uadd_overflow(sliceOffset, distance, &sliceOffset)) return NO; + if (usub_overflow(sliceSize, distance, &sliceSize)) return NO; + return YES; + } +}; + + +// Arch32 and Arch64 are used to specialize sliceRequiresGC() +// to interrogate old-ABI i386 and new-ABI x86_64 files. + +struct Arch32 { + using mh_t = struct mach_header; + using segment_command_t = struct segment_command; + using section_t = struct section; + + enum : cpu_type_t { cputype = CPU_TYPE_X86 }; + enum : int { segment_cmd = LC_SEGMENT }; + + static bool isObjCSegment(const char *segname) { + return segnameEquals(segname, "__OBJC"); + } + + static bool isImageInfoSection(const char *sectname) { + return sectnameEquals(sectname, "__image_info"); + } + + static bool countClasses(FileSlice file, section_t& sect, + int& classCount, int& classrefCount) + { + if (sectnameEquals(sect.sectname, "__cls_refs")) { + classrefCount += sect.size / 4; + } + else if (sectnameEquals(sect.sectname, "__module_info")) { + struct module_t { + uint32_t version; + uint32_t size; + uint32_t name; // not bound + uint32_t symtab; // not bound + }; + size_t mod_count = sect.size / sizeof(module_t); + if (mod_count == 0) { + // no classes defined + } else if (mod_count > 1) { + // AppleScriptObjC apps only have one module. + // Disqualify this app by setting classCount to non-zero. + // We don't actually need an accurate count. + classCount = 1; + } else if (mod_count == 1) { + FileSlice moduleSlice; + if (!file.slice(sect.offset, sect.size, moduleSlice)) return NO; + module_t module; + if (!moduleSlice.pread(&module, sizeof(module))) return NO; + if (module.symtab) { + // AppleScriptObjC apps only have a module with no symtab. + // Disqualify this app by setting classCount to non-zero. + // We don't actually need an accurate count. + classCount = 1; + } + } + + } + return YES; + } + +}; + +struct Arch64 { + using mh_t = struct mach_header_64; + using segment_command_t = struct segment_command_64; + using section_t = struct section_64; + + enum : cpu_type_t { cputype = CPU_TYPE_X86_64 }; + enum : int { segment_cmd = LC_SEGMENT_64 }; + + static bool isObjCSegment(const char *segname) { + return + segnameEquals(segname, "__DATA") || + segnameEquals(segname, "__DATA_CONST") || + segnameEquals(segname, "__DATA_DIRTY"); + } + + static bool isImageInfoSection(const char *sectname) { + return sectnameEquals(sectname, "__objc_imageinfo"); + } + + static bool countClasses(FileSlice, section_t& sect, + int& classCount, int& classrefCount) + { + if (sectnameEquals(sect.sectname, "__objc_classlist")) { + classCount += sect.size / 8; + } + else if (sectnameEquals(sect.sectname, "__objc_classrefs")) { + classrefCount += sect.size / 8; + } + return YES; + } +}; + + +#define SANE_HEADER_SIZE (32*1024) + +template +static int sliceRequiresGC(typename Arch::mh_t mh, FileSlice file) +{ + // We assume there is only one arch per pointer size that can support GC. + // (i386 and x86_64) + if (mh.cputype != Arch::cputype) return 0; + + // We only check the main executable. + if (mh.filetype != MH_EXECUTE) return 0; + + // Look for ObjC segment. + // Look for AppleScriptObjC linkage. + FileSlice cmds; + if (!file.slice(sizeof(mh), mh.sizeofcmds, cmds)) return Error; + + // Exception: Some AppleScriptObjC apps built for GC can run without GC. + // 1. executable defines no classes + // 2. executable references NSBundle only + // 3. executable links to AppleScriptObjC.framework + // Note that shouldRejectGCApp() also knows about this. + bool wantsGC = NO; + bool linksToAppleScriptObjC = NO; + int classCount = 0; + int classrefCount = 0; + + // Disallow abusively-large executables that could hang this checker. + // dyld performs similar checks (MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE) + if (mh.sizeofcmds > SANE_HEADER_SIZE) return Error; + if (mh.ncmds > mh.sizeofcmds / sizeof(struct load_command)) return Error; + + for (uint32_t cmdindex = 0; cmdindex < mh.ncmds; cmdindex++) { + struct load_command lc; + if (!cmds.pread(&lc, sizeof(lc))) return Error; + + // Disallow abusively-small load commands that could hang this checker. + // dyld performs a similar check. + if (lc.cmdsize < sizeof(lc)) return Error; + + if (lc.cmd == LC_LOAD_DYLIB || lc.cmd == LC_LOAD_UPWARD_DYLIB || + lc.cmd == LC_LOAD_WEAK_DYLIB || lc.cmd == LC_REEXPORT_DYLIB) + { + // Look for AppleScriptObjC linkage. + FileSlice dylibSlice; + if (!cmds.slice(0, lc.cmdsize, dylibSlice)) return Error; + struct dylib_command dylib; + if (!dylibSlice.pread(&dylib, sizeof(dylib))) return Error; + + const char *asoFramework = + "/System/Library/Frameworks/AppleScriptObjC.framework" + "/Versions/A/AppleScriptObjC"; + size_t asoLen = strlen(asoFramework); + + FileSlice nameSlice; + if (dylibSlice.slice(dylib.dylib.name.offset, asoLen, nameSlice)) { + char name[asoLen]; + if (!nameSlice.pread(name, asoLen)) return Error; + if (0 == memcmp(name, asoFramework, asoLen)) { + linksToAppleScriptObjC = YES; + } + } + } + else if (lc.cmd == Arch::segment_cmd) { + typename Arch::segment_command_t seg; + if (!cmds.pread(&seg, sizeof(seg))) return Error; + + if (Arch::isObjCSegment(seg.segname)) { + // ObjC segment. + // Look for image info section. + // Look for class implementations and class references. + FileSlice sections; + if (!cmds.slice(0, seg.cmdsize, sections)) return Error; + if (!sections.advance(sizeof(seg))) return Error; + + for (uint32_t segindex = 0; segindex < seg.nsects; segindex++) { + typename Arch::section_t sect; + if (!sections.pread(§, sizeof(sect))) return Error; + if (!Arch::isObjCSegment(sect.segname)) return Error; + + if (!Arch::countClasses(file, sect, + classCount, classrefCount)) + { + return Error; + } + + if ((sect.flags & SECTION_TYPE) == S_REGULAR && + Arch::isImageInfoSection(sect.sectname)) + { + // ObjC image info section. + // Check its contents. + FileSlice section; + if (!file.slice(sect.offset, sect.size, section)) { + return Error; + } + // The subset of objc_image_info that was in use for GC. + struct { + uint32_t version; + uint32_t flags; + } ii; + if (!section.pread(&ii, sizeof(ii))) return Error; + if (ii.flags & (1<<1)) { + // App wants GC. + // Don't return yet because we need to + // check the AppleScriptObjC exception. + wantsGC = YES; + } + } + + if (!sections.advance(sizeof(sect))) return Error; + } + } + } + + if (!cmds.advance(lc.cmdsize)) return Error; + } + + if (!wantsGC) { + // No GC bit set. + return WithoutGC; + } + else if (linksToAppleScriptObjC && classCount == 0 && classrefCount == 1) { + // Has GC bit but falls under the AppleScriptObjC exception. + return WithoutGC; + } + else { + // Has GC bit and is not AppleScriptObjC. + return WithGC; + } +} + + +static int sliceRequiresGC(FileSlice file) +{ + // Read mach-o header. + struct mach_header_64 mh; + if (!file.pread(&mh, sizeof(mh))) return Error; + + // Check header magic. We assume only host-endian slices can support GC. + switch (mh.magic) { + case MH_MAGIC: + return sliceRequiresGC(*(struct mach_header *)&mh, file); + case MH_MAGIC_64: + return sliceRequiresGC(mh, file); + default: + return WithoutGC; + } +} + + +// Returns 1 if any slice requires GC. +// Returns 0 if no slice requires GC. +// Returns -1 on any I/O or file format error. +int objc_appRequiresGC(int fd) +{ + struct stat st; + if (fstat(fd, &st) < 0) return Error; + + FileSlice file(fd, 0, st.st_size); + + // Read fat header, if any. + struct fat_header fh; + + if (! file.pread(&fh, sizeof(fh))) return Error; + + int result; + + if (OSSwapBigToHostInt32(fh.magic) == FAT_MAGIC) { + // Fat header. + + size_t nfat_arch = OSSwapBigToHostInt32(fh.nfat_arch); + // Disallow abusively-large files that could hang this checker. + if (nfat_arch > SANE_HEADER_SIZE/sizeof(struct fat_arch)) return Error; + + size_t fat_size; + if (umul_overflow(nfat_arch, sizeof(struct fat_arch), &fat_size)) { + return Error; + } + + FileSlice archlist; + if (!file.slice(sizeof(fh), fat_size, archlist)) return Error; + + result = WithoutGC; + for (size_t i = 0; i < nfat_arch; i++) { + struct fat_arch fa; + if (!archlist.pread(&fa, sizeof(fa))) return Error; + if (!archlist.advance(sizeof(fa))) return Error; + + FileSlice thin; + if (!file.slice(OSSwapBigToHostInt32(fa.offset), + OSSwapBigToHostInt32(fa.size), thin)) + { + return Error; + } + switch (sliceRequiresGC(thin)) { + case WithoutGC: break; // no change + case WithGC: if (result != Error) result = WithGC; break; + case Error: result = Error; break; + } + } + } + else { + // Thin header or not a header. + result = sliceRequiresGC(file); + } + + return result; +} + +// SUPPORT_GC_COMPAT +#endif diff --git a/runtime/objc-sel-old.mm b/runtime/objc-sel-old.mm new file mode 100644 index 0000000..c956494 --- /dev/null +++ b/runtime/objc-sel-old.mm @@ -0,0 +1,215 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * Utilities for registering and looking up selectors. The sole + * purpose of the selector tables is a registry whereby there is + * exactly one address (selector) associated with a given string + * (method name). + */ + +#if !__OBJC2__ + +#include "objc-private.h" +#include "objc-sel-set.h" + +#if SUPPORT_PREOPT +#include +static const objc_selopt_t *builtins = NULL; +#endif + +__BEGIN_DECLS + +static size_t SelrefCount = 0; + +static const char *_objc_empty_selector = ""; +static struct __objc_sel_set *_objc_selectors = NULL; + + +static SEL _objc_search_builtins(const char *key) +{ +#if defined(DUMP_SELECTORS) + if (NULL != key) printf("\t\"%s\",\n", key); +#endif + + if (!key) return (SEL)0; + if ('\0' == *key) return (SEL)_objc_empty_selector; + +#if SUPPORT_PREOPT + if (builtins) return (SEL)builtins->get(key); +#endif + + return (SEL)0; +} + + +const char *sel_getName(SEL sel) { + return sel ? (const char *)sel : ""; +} + + +BOOL sel_isMapped(SEL name) +{ + SEL sel; + + if (!name) return NO; + + sel = _objc_search_builtins((const char *)name); + if (sel) return YES; + + rwlock_reader_t lock(selLock); + if (_objc_selectors) { + sel = __objc_sel_set_get(_objc_selectors, name); + } + return bool(sel); +} + +static SEL __sel_registerName(const char *name, int lock, int copy) +{ + SEL result = 0; + + if (lock) selLock.assertUnlocked(); + else selLock.assertWriting(); + + if (!name) return (SEL)0; + result = _objc_search_builtins(name); + if (result) return result; + + if (lock) selLock.read(); + if (_objc_selectors) { + result = __objc_sel_set_get(_objc_selectors, (SEL)name); + } + if (lock) selLock.unlockRead(); + if (result) return result; + + // No match. Insert. + + if (lock) selLock.write(); + + if (!_objc_selectors) { + _objc_selectors = __objc_sel_set_create(SelrefCount); + } + if (lock) { + // Rescan in case it was added while we dropped the lock + result = __objc_sel_set_get(_objc_selectors, (SEL)name); + } + if (!result) { + result = (SEL)(copy ? strdup(name) : name); + __objc_sel_set_add(_objc_selectors, result); +#if defined(DUMP_UNKNOWN_SELECTORS) + printf("\t\"%s\",\n", name); +#endif + } + + if (lock) selLock.unlockWrite(); + return result; +} + + +SEL sel_registerName(const char *name) { + return __sel_registerName(name, 1, 1); // YES lock, YES copy +} + +SEL sel_registerNameNoLock(const char *name, bool copy) { + return __sel_registerName(name, 0, copy); // NO lock, maybe copy +} + +void sel_lock(void) +{ + selLock.write(); +} + +void sel_unlock(void) +{ + selLock.unlockWrite(); +} + + +// 2001/1/24 +// the majority of uses of this function (which used to return NULL if not found) +// did not check for NULL, so, in fact, never return NULL +// +SEL sel_getUid(const char *name) { + return __sel_registerName(name, 2, 1); // YES lock, YES copy +} + + +BOOL sel_isEqual(SEL lhs, SEL rhs) +{ + return bool(lhs == rhs); +} + + +/*********************************************************************** +* sel_init +* Initialize selector tables and register selectors used internally. +**********************************************************************/ +void sel_init(size_t selrefCount) +{ + // save this value for later + SelrefCount = selrefCount; + +#if SUPPORT_PREOPT + builtins = preoptimizedSelectors(); +#endif + + // Register selectors used by libobjc + +#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO) +#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO) + + sel_lock(); + + s(load); + s(initialize); + t(resolveInstanceMethod:, resolveInstanceMethod); + t(resolveClassMethod:, resolveClassMethod); + t(.cxx_construct, cxx_construct); + t(.cxx_destruct, cxx_destruct); + s(retain); + s(release); + s(autorelease); + s(retainCount); + s(alloc); + t(allocWithZone:, allocWithZone); + s(dealloc); + s(copy); + s(new); + t(forwardInvocation:, forwardInvocation); + t(_tryRetain, tryRetain); + t(_isDeallocating, isDeallocating); + s(retainWeakReference); + s(allowsWeakReference); + + extern SEL FwdSel; + FwdSel = sel_registerNameNoLock("forward::", NO); + + sel_unlock(); + +#undef s +#undef t +} + +__END_DECLS + +#endif diff --git a/runtime/objc-sel-set.h b/runtime/objc-sel-set.h new file mode 100644 index 0000000..f4540db --- /dev/null +++ b/runtime/objc-sel-set.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2004 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * objc-sel-set.h + * A set of SELs used for SEL uniquing. + */ + +#ifndef _OBJC_SEL_SET_H_ +#define _OBJC_SEL_SET_H_ + +#if !__OBJC2__ + +#include +#include "objc-os.h" + +__BEGIN_DECLS + +struct __objc_sel_set; + +extern struct __objc_sel_set *__objc_sel_set_create(size_t selrefCount); +extern SEL __objc_sel_set_get(struct __objc_sel_set *sset, SEL candidate); +extern void __objc_sel_set_add(struct __objc_sel_set *sset, SEL value); + +__END_DECLS + +#endif + +#endif diff --git a/runtime/objc-sel-set.mm b/runtime/objc-sel-set.mm new file mode 100644 index 0000000..79ca591 --- /dev/null +++ b/runtime/objc-sel-set.mm @@ -0,0 +1,176 @@ +/* + * Copyright (c) 1999-2004,2008 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/* + * objc-sel-set.h + * A cut-down copy of CFSet used for SEL uniquing. + */ + + +// NOTE: even on a 64-bit system, the implementation is still limited +// to 32-bit integers (like, the count), but SEL can be any size. + +#include +#include "objc-private.h" +#include "objc-sel-set.h" + +#if !__OBJC2__ + + +#if !SUPPORT_MOD +// mod-free power of 2 version + +#define CONSTRAIN(val, range) ((val) & ((range)-1)) +#define SIZE 27 + +static const uint32_t __objc_sel_set_capacities[SIZE+1] = { + 3, 6, 12, 24, 48, 96, 192, 384, 768, 1536, 3072, 6144, 12288, 24576, + 49152, 98304, 196608, 393216, 786432, 1572864, 3145728, 6291456, + 12582912, 25165824, 50331648, 100663296, 201326592, UINT32_MAX +}; + +static const uint32_t __objc_sel_set_buckets[SIZE] = { // powers of 2 + 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, + 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, + 16777216, 33554432, 67108864, 134217728, 268435456 +}; + +#else +// prime version + +#define CONSTRAIN(val, range) ((val) % (range)) +#define SIZE 42 + +static const uint32_t __objc_sel_set_capacities[SIZE+1] = { + 4, 8, 17, 29, 47, 76, 123, 199, 322, 521, 843, 1364, 2207, 3571, + 5778, 9349, 15127, 24476, 39603, 64079, 103682, 167761, 271443, + 439204, 710647, 1149851, 1860498, 3010349, 4870847, 7881196, 12752043, + 20633239, 33385282, 54018521, 87403803, 141422324, 228826127, 370248451, + 599074578, 969323029, 1568397607, 2537720636U, UINT32_MAX +}; + +static const uint32_t __objc_sel_set_buckets[SIZE] = { // primes + 5, 11, 23, 41, 67, 113, 199, 317, 521, 839, 1361, 2207, 3571, 5779, + 9349, 15121, 24473, 39607, 64081, 103681, 167759, 271429, 439199, + 710641, 1149857, 1860503, 3010349, 4870843, 7881193, 12752029, 20633237, + 33385273, 54018521, 87403763, 141422317, 228826121, 370248451, 599074561, + 969323023, 1568397599, 2537720629U, 4106118251U +}; + +#endif + +struct __objc_sel_set { + uint32_t _count; /* number of slots used */ + uint32_t _capacity; /* maximum number of used slots */ + uint32_t _bucketsNum; /* number of slots */ + SEL *_buckets; /* can be NULL if not allocated yet */ +}; + +struct __objc_sel_set_finds { + SEL match; + uint32_t nomatch; +}; + +// candidate may not be 0; match is 0 if not present +static struct __objc_sel_set_finds __objc_sel_set_findBuckets(struct __objc_sel_set *sset, SEL candidate) { + struct __objc_sel_set_finds ret = {0, 0xffffffff}; + uint32_t probe = CONSTRAIN((uint32_t)_objc_strhash((const char *)candidate), sset->_bucketsNum); + for (;;) { + SEL currentSel = sset->_buckets[probe]; + if (!currentSel) { + ret.nomatch = probe; + return ret; + } else if (!ret.match && 0 == strcmp((const char *)currentSel, (const char *)candidate)) { + ret.match = currentSel; + } + probe++; + if (sset->_bucketsNum <= probe) { + probe -= sset->_bucketsNum; + } + } +} + +// create a set with given starting capacity, will resize as needed +struct __objc_sel_set *__objc_sel_set_create(size_t selrefs) { + uint32_t idx; + + struct __objc_sel_set *sset = (struct __objc_sel_set *) + malloc(sizeof(struct __objc_sel_set)); + if (!sset) _objc_fatal("objc_sel_set failure"); + sset->_count = 0; + + // heuristic to convert executable's selrefs count to table size +#if TARGET_OS_IPHONE + for (idx = 0; __objc_sel_set_capacities[idx] < selrefs; idx++); + if (idx > 0 && selrefs < 1536) idx--; +#else + if (selrefs < 1024) selrefs = 1024; + for (idx = 0; __objc_sel_set_capacities[idx] < selrefs; idx++); + idx++; +#endif + + if (SIZE <= idx) _objc_fatal("objc_sel_set failure"); + sset->_capacity = __objc_sel_set_capacities[idx]; + sset->_bucketsNum = __objc_sel_set_buckets[idx]; + sset->_buckets = (SEL *)calloc(sset->_bucketsNum, sizeof(SEL)); + if (!sset->_buckets) _objc_fatal("objc_sel_set failure"); + return sset; +} + +// returns 0 on failure; candidate may not be 0 +SEL __objc_sel_set_get(struct __objc_sel_set *sset, SEL candidate) { + return __objc_sel_set_findBuckets(sset, candidate).match; +} + +// value may not be 0; should not be called unless it is known the value is not in the set +void __objc_sel_set_add(struct __objc_sel_set *sset, SEL value) { + if (sset->_count == sset->_capacity) { + SEL *oldbuckets = sset->_buckets; + uint32_t oldnbuckets = sset->_bucketsNum; + uint32_t idx, capacity = sset->_count + 1; + for (idx = 0; __objc_sel_set_capacities[idx] < capacity; idx++); + if (SIZE <= idx) _objc_fatal("objc_sel_set failure"); + sset->_capacity = __objc_sel_set_capacities[idx]; + sset->_bucketsNum = __objc_sel_set_buckets[idx]; + sset->_buckets = (SEL *) + calloc(sset->_bucketsNum, sizeof(SEL)); + if (!sset->_buckets) _objc_fatal("objc_sel_set failure"); + for (idx = 0; idx < oldnbuckets; idx++) { + SEL currentSel = oldbuckets[idx]; + if (currentSel) { + uint32_t nomatch = __objc_sel_set_findBuckets(sset, currentSel).nomatch; + sset->_buckets[nomatch] = currentSel; + } + } + free(oldbuckets); + } + { + uint32_t nomatch = __objc_sel_set_findBuckets(sset, value).nomatch; + sset->_buckets[nomatch] = value; + sset->_count++; + } +} + + +// !__OBJC2__ +#endif diff --git a/runtime/objc-sel-table.s b/runtime/objc-sel-table.s new file mode 100644 index 0000000..359df9b --- /dev/null +++ b/runtime/objc-sel-table.s @@ -0,0 +1,69 @@ +#include +#include + +#if __LP64__ +# define PTR(x) .quad x +#else +# define PTR(x) .long x +#endif + +.section __TEXT,__objc_opt_ro +.align 3 +.private_extern __objc_opt_data +__objc_opt_data: +.long 15 /* table.version */ +.long 0 /* table.flags */ +.long 0 /* table.selopt_offset */ +.long 0 /* table.headeropt_ro_offset */ +.long 0 /* table.clsopt_offset */ +.long 0 /* table.protocolopt_offset */ +.long 0 /* table.headeropt_rw_offset */ +.space PAGE_MAX_SIZE-28 + +/* space for selopt, smax/capacity=524288, blen/mask=262143+1 */ +.space 262144 /* mask tab */ +.space 524288 /* checkbytes */ +.space 524288*4 /* offsets */ + +/* space for clsopt, smax/capacity=65536, blen/mask=16383+1 */ +.space 16384 /* mask tab */ +.space 65536 /* checkbytes */ +.space 65536*12 /* offsets to name and class and header_info */ +.space PAGE_MAX_SIZE /* some duplicate classes */ + +/* space for protocolopt, smax/capacity=8192, blen/mask=4095+1 */ +.space 4096 /* mask tab */ +.space 8192 /* checkbytes */ +.space 8192*4 /* offsets */ + +/* space for header_info (RO) structures */ +.space 16384 + +.section __DATA,__objc_opt_rw +.align 3 +.private_extern __objc_opt_rw_data +__objc_opt_rw_data: +/* space for header_info (RW) structures */ +.space 16384 + +/* space for 8192 protocols */ +#if __LP64__ +.space 8192 * 11 * 8 +#else +.space 8192 * 11 * 4 +#endif + + +/* section of pointers that the shared cache optimizer wants to know about */ +.section __DATA,__objc_opt_ptrs +.align 3 + +#if TARGET_OS_OSX && __i386__ +// old ABI +.globl .objc_class_name_Protocol +PTR(.objc_class_name_Protocol) +#else +// new ABI +.globl _OBJC_CLASS_$_Protocol +PTR(_OBJC_CLASS_$_Protocol) +#endif diff --git a/runtime/objc-sel.mm b/runtime/objc-sel.mm new file mode 100644 index 0000000..4c7fd29 --- /dev/null +++ b/runtime/objc-sel.mm @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2012 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#if __OBJC2__ + +#include "objc-private.h" +#include "objc-cache.h" + +#if SUPPORT_PREOPT +static const objc_selopt_t *builtins = NULL; +#endif + + +static size_t SelrefCount = 0; + +static NXMapTable *namedSelectors; + +static SEL search_builtins(const char *key); + + +/*********************************************************************** +* sel_init +* Initialize selector tables and register selectors used internally. +**********************************************************************/ +void sel_init(size_t selrefCount) +{ + // save this value for later + SelrefCount = selrefCount; + +#if SUPPORT_PREOPT + builtins = preoptimizedSelectors(); + + if (PrintPreopt && builtins) { + uint32_t occupied = builtins->occupied; + uint32_t capacity = builtins->capacity; + + _objc_inform("PREOPTIMIZATION: using selopt at %p", builtins); + _objc_inform("PREOPTIMIZATION: %u selectors", occupied); + _objc_inform("PREOPTIMIZATION: %u/%u (%u%%) hash table occupancy", + occupied, capacity, + (unsigned)(occupied/(double)capacity*100)); + } +#endif + + // Register selectors used by libobjc + +#define s(x) SEL_##x = sel_registerNameNoLock(#x, NO) +#define t(x,y) SEL_##y = sel_registerNameNoLock(#x, NO) + + sel_lock(); + + s(load); + s(initialize); + t(resolveInstanceMethod:, resolveInstanceMethod); + t(resolveClassMethod:, resolveClassMethod); + t(.cxx_construct, cxx_construct); + t(.cxx_destruct, cxx_destruct); + s(retain); + s(release); + s(autorelease); + s(retainCount); + s(alloc); + t(allocWithZone:, allocWithZone); + s(dealloc); + s(copy); + s(new); + t(forwardInvocation:, forwardInvocation); + t(_tryRetain, tryRetain); + t(_isDeallocating, isDeallocating); + s(retainWeakReference); + s(allowsWeakReference); + + sel_unlock(); + +#undef s +#undef t +} + + +static SEL sel_alloc(const char *name, bool copy) +{ + selLock.assertWriting(); + return (SEL)(copy ? strdupIfMutable(name) : name); +} + + +const char *sel_getName(SEL sel) +{ + if (!sel) return ""; + return (const char *)(const void*)sel; +} + + +BOOL sel_isMapped(SEL sel) +{ + if (!sel) return NO; + + const char *name = (const char *)(void *)sel; + + if (sel == search_builtins(name)) return YES; + + rwlock_reader_t lock(selLock); + if (namedSelectors) { + return (sel == (SEL)NXMapGet(namedSelectors, name)); + } + return false; +} + + +static SEL search_builtins(const char *name) +{ +#if SUPPORT_PREOPT + if (builtins) return (SEL)builtins->get(name); +#endif + return nil; +} + + +static SEL __sel_registerName(const char *name, int lock, int copy) +{ + SEL result = 0; + + if (lock) selLock.assertUnlocked(); + else selLock.assertWriting(); + + if (!name) return (SEL)0; + + result = search_builtins(name); + if (result) return result; + + if (lock) selLock.read(); + if (namedSelectors) { + result = (SEL)NXMapGet(namedSelectors, name); + } + if (lock) selLock.unlockRead(); + if (result) return result; + + // No match. Insert. + + if (lock) selLock.write(); + + if (!namedSelectors) { + namedSelectors = NXCreateMapTable(NXStrValueMapPrototype, + (unsigned)SelrefCount); + } + if (lock) { + // Rescan in case it was added while we dropped the lock + result = (SEL)NXMapGet(namedSelectors, name); + } + if (!result) { + result = sel_alloc(name, copy); + // fixme choose a better container (hash not map for starters) + NXMapInsert(namedSelectors, sel_getName(result), result); + } + + if (lock) selLock.unlockWrite(); + return result; +} + + +SEL sel_registerName(const char *name) { + return __sel_registerName(name, 1, 1); // YES lock, YES copy +} + +SEL sel_registerNameNoLock(const char *name, bool copy) { + return __sel_registerName(name, 0, copy); // NO lock, maybe copy +} + +void sel_lock(void) +{ + selLock.write(); +} + +void sel_unlock(void) +{ + selLock.unlockWrite(); +} + + +// 2001/1/24 +// the majority of uses of this function (which used to return NULL if not found) +// did not check for NULL, so, in fact, never return NULL +// +SEL sel_getUid(const char *name) { + return __sel_registerName(name, 2, 1); // YES lock, YES copy +} + + +BOOL sel_isEqual(SEL lhs, SEL rhs) +{ + return bool(lhs == rhs); +} + + +#endif diff --git a/runtime/objc-sync.h b/runtime/objc-sync.h new file mode 100644 index 0000000..93a96fc --- /dev/null +++ b/runtime/objc-sync.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2002, 2006 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef __OBJC_SNYC_H_ +#define __OBJC_SNYC_H_ + +#include + + +/** + * Begin synchronizing on 'obj'. + * Allocates recursive pthread_mutex associated with 'obj' if needed. + * + * @param obj The object to begin synchronizing on. + * + * @return OBJC_SYNC_SUCCESS once lock is acquired. + */ +OBJC_EXPORT int objc_sync_enter(id obj) + OBJC_AVAILABLE(10.3, 2.0, 9.0, 1.0); + +/** + * End synchronizing on 'obj'. + * + * @param obj The objet to end synchronizing on. + * + * @return OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR + */ +OBJC_EXPORT int objc_sync_exit(id obj) + OBJC_AVAILABLE(10.3, 2.0, 9.0, 1.0); + +// The wait/notify functions have never worked correctly and no longer exist. +OBJC_EXPORT int objc_sync_wait(id obj, long long milliSecondsMaxWait) + UNAVAILABLE_ATTRIBUTE; +OBJC_EXPORT int objc_sync_notify(id obj) + UNAVAILABLE_ATTRIBUTE; +OBJC_EXPORT int objc_sync_notifyAll(id obj) + UNAVAILABLE_ATTRIBUTE; + +enum { + OBJC_SYNC_SUCCESS = 0, + OBJC_SYNC_NOT_OWNING_THREAD_ERROR = -1, + OBJC_SYNC_TIMED_OUT = -2, + OBJC_SYNC_NOT_INITIALIZED = -3 +}; + + +#endif // __OBJC_SNYC_H_ diff --git a/runtime/objc-sync.mm b/runtime/objc-sync.mm new file mode 100644 index 0000000..cb69981 --- /dev/null +++ b/runtime/objc-sync.mm @@ -0,0 +1,327 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "objc-private.h" +#include "objc-sync.h" + +// +// Allocate a lock only when needed. Since few locks are needed at any point +// in time, keep them on a single list. +// + + +typedef struct SyncData { + struct SyncData* nextData; + DisguisedPtr object; + int32_t threadCount; // number of THREADS using this block + recursive_mutex_t mutex; +} SyncData; + +typedef struct { + SyncData *data; + unsigned int lockCount; // number of times THIS THREAD locked this block +} SyncCacheItem; + +typedef struct SyncCache { + unsigned int allocated; + unsigned int used; + SyncCacheItem list[0]; +} SyncCache; + +/* + Fast cache: two fixed pthread keys store a single SyncCacheItem. + This avoids malloc of the SyncCache for threads that only synchronize + a single object at a time. + SYNC_DATA_DIRECT_KEY == SyncCacheItem.data + SYNC_COUNT_DIRECT_KEY == SyncCacheItem.lockCount + */ + +struct SyncList { + SyncData *data; + spinlock_t lock; + + SyncList() : data(nil) { } +}; + +// Use multiple parallel lists to decrease contention among unrelated objects. +#define LOCK_FOR_OBJ(obj) sDataLists[obj].lock +#define LIST_FOR_OBJ(obj) sDataLists[obj].data +static StripedMap sDataLists; + + +enum usage { ACQUIRE, RELEASE, CHECK }; + +static SyncCache *fetch_cache(bool create) +{ + _objc_pthread_data *data; + + data = _objc_fetch_pthread_data(create); + if (!data) return NULL; + + if (!data->syncCache) { + if (!create) { + return NULL; + } else { + int count = 4; + data->syncCache = (SyncCache *) + calloc(1, sizeof(SyncCache) + count*sizeof(SyncCacheItem)); + data->syncCache->allocated = count; + } + } + + // Make sure there's at least one open slot in the list. + if (data->syncCache->allocated == data->syncCache->used) { + data->syncCache->allocated *= 2; + data->syncCache = (SyncCache *) + realloc(data->syncCache, sizeof(SyncCache) + + data->syncCache->allocated * sizeof(SyncCacheItem)); + } + + return data->syncCache; +} + + +void _destroySyncCache(struct SyncCache *cache) +{ + if (cache) free(cache); +} + + +static SyncData* id2data(id object, enum usage why) +{ + spinlock_t *lockp = &LOCK_FOR_OBJ(object); + SyncData **listp = &LIST_FOR_OBJ(object); + SyncData* result = NULL; + +#if SUPPORT_DIRECT_THREAD_KEYS + // Check per-thread single-entry fast cache for matching object + bool fastCacheOccupied = NO; + SyncData *data = (SyncData *)tls_get_direct(SYNC_DATA_DIRECT_KEY); + if (data) { + fastCacheOccupied = YES; + + if (data->object == object) { + // Found a match in fast cache. + uintptr_t lockCount; + + result = data; + lockCount = (uintptr_t)tls_get_direct(SYNC_COUNT_DIRECT_KEY); + if (result->threadCount <= 0 || lockCount <= 0) { + _objc_fatal("id2data fastcache is buggy"); + } + + switch(why) { + case ACQUIRE: { + lockCount++; + tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount); + break; + } + case RELEASE: + lockCount--; + tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)lockCount); + if (lockCount == 0) { + // remove from fast cache + tls_set_direct(SYNC_DATA_DIRECT_KEY, NULL); + // atomic because may collide with concurrent ACQUIRE + OSAtomicDecrement32Barrier(&result->threadCount); + } + break; + case CHECK: + // do nothing + break; + } + + return result; + } + } +#endif + + // Check per-thread cache of already-owned locks for matching object + SyncCache *cache = fetch_cache(NO); + if (cache) { + unsigned int i; + for (i = 0; i < cache->used; i++) { + SyncCacheItem *item = &cache->list[i]; + if (item->data->object != object) continue; + + // Found a match. + result = item->data; + if (result->threadCount <= 0 || item->lockCount <= 0) { + _objc_fatal("id2data cache is buggy"); + } + + switch(why) { + case ACQUIRE: + item->lockCount++; + break; + case RELEASE: + item->lockCount--; + if (item->lockCount == 0) { + // remove from per-thread cache + cache->list[i] = cache->list[--cache->used]; + // atomic because may collide with concurrent ACQUIRE + OSAtomicDecrement32Barrier(&result->threadCount); + } + break; + case CHECK: + // do nothing + break; + } + + return result; + } + } + + // Thread cache didn't find anything. + // Walk in-use list looking for matching object + // Spinlock prevents multiple threads from creating multiple + // locks for the same new object. + // We could keep the nodes in some hash table if we find that there are + // more than 20 or so distinct locks active, but we don't do that now. + + lockp->lock(); + + { + SyncData* p; + SyncData* firstUnused = NULL; + for (p = *listp; p != NULL; p = p->nextData) { + if ( p->object == object ) { + result = p; + // atomic because may collide with concurrent RELEASE + OSAtomicIncrement32Barrier(&result->threadCount); + goto done; + } + if ( (firstUnused == NULL) && (p->threadCount == 0) ) + firstUnused = p; + } + + // no SyncData currently associated with object + if ( (why == RELEASE) || (why == CHECK) ) + goto done; + + // an unused one was found, use it + if ( firstUnused != NULL ) { + result = firstUnused; + result->object = (objc_object *)object; + result->threadCount = 1; + goto done; + } + } + + // malloc a new SyncData and add to list. + // XXX calling malloc with a global lock held is bad practice, + // might be worth releasing the lock, mallocing, and searching again. + // But since we never free these guys we won't be stuck in malloc very often. + result = (SyncData*)calloc(sizeof(SyncData), 1); + result->object = (objc_object *)object; + result->threadCount = 1; + new (&result->mutex) recursive_mutex_t(); + result->nextData = *listp; + *listp = result; + + done: + lockp->unlock(); + if (result) { + // Only new ACQUIRE should get here. + // All RELEASE and CHECK and recursive ACQUIRE are + // handled by the per-thread caches above. + if (why == RELEASE) { + // Probably some thread is incorrectly exiting + // while the object is held by another thread. + return nil; + } + if (why != ACQUIRE) _objc_fatal("id2data is buggy"); + if (result->object != object) _objc_fatal("id2data is buggy"); + +#if SUPPORT_DIRECT_THREAD_KEYS + if (!fastCacheOccupied) { + // Save in fast thread cache + tls_set_direct(SYNC_DATA_DIRECT_KEY, result); + tls_set_direct(SYNC_COUNT_DIRECT_KEY, (void*)1); + } else +#endif + { + // Save in thread cache + if (!cache) cache = fetch_cache(YES); + cache->list[cache->used].data = result; + cache->list[cache->used].lockCount = 1; + cache->used++; + } + } + + return result; +} + + +BREAKPOINT_FUNCTION( + void objc_sync_nil(void) +); + + +// Begin synchronizing on 'obj'. +// Allocates recursive mutex associated with 'obj' if needed. +// Returns OBJC_SYNC_SUCCESS once lock is acquired. +int objc_sync_enter(id obj) +{ + int result = OBJC_SYNC_SUCCESS; + + if (obj) { + SyncData* data = id2data(obj, ACQUIRE); + assert(data); + data->mutex.lock(); + } else { + // @synchronized(nil) does nothing + if (DebugNilSync) { + _objc_inform("NIL SYNC DEBUG: @synchronized(nil); set a breakpoint on objc_sync_nil to debug"); + } + objc_sync_nil(); + } + + return result; +} + + +// End synchronizing on 'obj'. +// Returns OBJC_SYNC_SUCCESS or OBJC_SYNC_NOT_OWNING_THREAD_ERROR +int objc_sync_exit(id obj) +{ + int result = OBJC_SYNC_SUCCESS; + + if (obj) { + SyncData* data = id2data(obj, RELEASE); + if (!data) { + result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR; + } else { + bool okay = data->mutex.tryUnlock(); + if (!okay) { + result = OBJC_SYNC_NOT_OWNING_THREAD_ERROR; + } + } + } else { + // @synchronized(nil) does nothing + } + + + return result; +} + diff --git a/runtime/objc-typeencoding.mm b/runtime/objc-typeencoding.mm new file mode 100644 index 0000000..14edccb --- /dev/null +++ b/runtime/objc-typeencoding.mm @@ -0,0 +1,365 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +/*********************************************************************** +* objc-typeencoding.m +* Parsing of old-style type strings. +**********************************************************************/ + +#include "objc-private.h" + +/*********************************************************************** +* SubtypeUntil. +* +* Delegation. +**********************************************************************/ +static int SubtypeUntil (const char * type, + char end) +{ + int level = 0; + const char * head = type; + + // + while (*type) + { + if (!*type || (!level && (*type == end))) + return (int)(type - head); + + switch (*type) + { + case ']': case '}': case ')': level--; break; + case '[': case '{': case '(': level += 1; break; + } + + type += 1; + } + + _objc_fatal ("Object: SubtypeUntil: end of type encountered prematurely\n"); + return 0; +} + + +/*********************************************************************** +* SkipFirstType. +**********************************************************************/ +static const char * SkipFirstType (const char * type) +{ + while (1) + { + switch (*type++) + { + case 'O': /* bycopy */ + case 'n': /* in */ + case 'o': /* out */ + case 'N': /* inout */ + case 'r': /* const */ + case 'V': /* oneway */ + case '^': /* pointers */ + break; + + case '@': /* objects */ + if (type[0] == '?') type++; /* Blocks */ + return type; + + /* arrays */ + case '[': + while ((*type >= '0') && (*type <= '9')) + type += 1; + return type + SubtypeUntil (type, ']') + 1; + + /* structures */ + case '{': + return type + SubtypeUntil (type, '}') + 1; + + /* unions */ + case '(': + return type + SubtypeUntil (type, ')') + 1; + + /* basic types */ + default: + return type; + } + } +} + + +/*********************************************************************** +* encoding_getNumberOfArguments. +**********************************************************************/ +unsigned int +encoding_getNumberOfArguments(const char *typedesc) +{ + unsigned nargs; + + // First, skip the return type + typedesc = SkipFirstType (typedesc); + + // Next, skip stack size + while ((*typedesc >= '0') && (*typedesc <= '9')) + typedesc += 1; + + // Now, we have the arguments - count how many + nargs = 0; + while (*typedesc) + { + // Traverse argument type + typedesc = SkipFirstType (typedesc); + + // Skip GNU runtime's register parameter hint + if (*typedesc == '+') typedesc++; + + // Traverse (possibly negative) argument offset + if (*typedesc == '-') + typedesc += 1; + while ((*typedesc >= '0') && (*typedesc <= '9')) + typedesc += 1; + + // Made it past an argument + nargs += 1; + } + + return nargs; +} + +/*********************************************************************** +* encoding_getSizeOfArguments. +**********************************************************************/ +unsigned +encoding_getSizeOfArguments(const char *typedesc) +{ + unsigned stack_size; + + // Get our starting points + stack_size = 0; + + // Skip the return type + typedesc = SkipFirstType (typedesc); + + // Convert ASCII number string to integer + while ((*typedesc >= '0') && (*typedesc <= '9')) + stack_size = (stack_size * 10) + (*typedesc++ - '0'); + + return stack_size; +} + + +/*********************************************************************** +* encoding_getArgumentInfo. +**********************************************************************/ +unsigned int +encoding_getArgumentInfo(const char *typedesc, unsigned int arg, + const char **type, int *offset) +{ + unsigned nargs = 0; + int self_offset = 0; + bool offset_is_negative = NO; + + // First, skip the return type + typedesc = SkipFirstType (typedesc); + + // Next, skip stack size + while ((*typedesc >= '0') && (*typedesc <= '9')) + typedesc += 1; + + // Now, we have the arguments - position typedesc to the appropriate argument + while (*typedesc && nargs != arg) + { + + // Skip argument type + typedesc = SkipFirstType (typedesc); + + if (nargs == 0) + { + // Skip GNU runtime's register parameter hint + if (*typedesc == '+') typedesc++; + + // Skip negative sign in offset + if (*typedesc == '-') + { + offset_is_negative = YES; + typedesc += 1; + } + else + offset_is_negative = NO; + + while ((*typedesc >= '0') && (*typedesc <= '9')) + self_offset = self_offset * 10 + (*typedesc++ - '0'); + if (offset_is_negative) + self_offset = -(self_offset); + + } + + else + { + // Skip GNU runtime's register parameter hint + if (*typedesc == '+') typedesc++; + + // Skip (possibly negative) argument offset + if (*typedesc == '-') + typedesc += 1; + while ((*typedesc >= '0') && (*typedesc <= '9')) + typedesc += 1; + } + + nargs += 1; + } + + if (*typedesc) + { + int arg_offset = 0; + + *type = typedesc; + typedesc = SkipFirstType (typedesc); + + if (arg == 0) + { + *offset = 0; + } + + else + { + // Skip GNU register parameter hint + if (*typedesc == '+') typedesc++; + + // Pick up (possibly negative) argument offset + if (*typedesc == '-') + { + offset_is_negative = YES; + typedesc += 1; + } + else + offset_is_negative = NO; + + while ((*typedesc >= '0') && (*typedesc <= '9')) + arg_offset = arg_offset * 10 + (*typedesc++ - '0'); + if (offset_is_negative) + arg_offset = - arg_offset; + + *offset = arg_offset - self_offset; + } + + } + + else + { + *type = 0; + *offset = 0; + } + + return nargs; +} + + +void +encoding_getReturnType(const char *t, char *dst, size_t dst_len) +{ + size_t len; + const char *end; + + if (!dst) return; + if (!t) { + strncpy(dst, "", dst_len); + return; + } + + end = SkipFirstType(t); + len = end - t; + strncpy(dst, t, MIN(len, dst_len)); + if (len < dst_len) memset(dst+len, 0, dst_len - len); +} + +/*********************************************************************** +* encoding_copyReturnType. Returns the method's return type string +* on the heap. +**********************************************************************/ +char * +encoding_copyReturnType(const char *t) +{ + size_t len; + const char *end; + char *result; + + if (!t) return NULL; + + end = SkipFirstType(t); + len = end - t; + result = (char *)malloc(len + 1); + strncpy(result, t, len); + result[len] = '\0'; + return result; +} + + +void +encoding_getArgumentType(const char *t, unsigned int index, + char *dst, size_t dst_len) +{ + size_t len; + const char *end; + int offset; + + if (!dst) return; + if (!t) { + strncpy(dst, "", dst_len); + return; + } + + encoding_getArgumentInfo(t, index, &t, &offset); + + if (!t) { + strncpy(dst, "", dst_len); + return; + } + + end = SkipFirstType(t); + len = end - t; + strncpy(dst, t, MIN(len, dst_len)); + if (len < dst_len) memset(dst+len, 0, dst_len - len); +} + + +/*********************************************************************** +* encoding_copyArgumentType. Returns a single argument's type string +* on the heap. Argument 0 is `self`; argument 1 is `_cmd`. +**********************************************************************/ +char * +encoding_copyArgumentType(const char *t, unsigned int index) +{ + size_t len; + const char *end; + char *result; + int offset; + + if (!t) return NULL; + + encoding_getArgumentInfo(t, index, &t, &offset); + + if (!t) return NULL; + + end = SkipFirstType(t); + len = end - t; + result = (char *)malloc(len + 1); + strncpy(result, t, len); + result[len] = '\0'; + return result; +} diff --git a/runtime/objc-weak.h b/runtime/objc-weak.h new file mode 100644 index 0000000..8c50050 --- /dev/null +++ b/runtime/objc-weak.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_WEAK_H_ +#define _OBJC_WEAK_H_ + +#include +#include "objc-config.h" + +__BEGIN_DECLS + +/* +The weak table is a hash table governed by a single spin lock. +An allocated blob of memory, most often an object, but under GC any such +allocation, may have its address stored in a __weak marked storage location +through use of compiler generated write-barriers or hand coded uses of the +register weak primitive. Associated with the registration can be a callback +block for the case when one of the allocated chunks of memory is reclaimed. +The table is hashed on the address of the allocated memory. When __weak +marked memory changes its reference, we count on the fact that we can still +see its previous reference. + +So, in the hash table, indexed by the weakly referenced item, is a list of +all locations where this address is currently being stored. + +For ARC, we also keep track of whether an arbitrary object is being +deallocated by briefly placing it in the table just prior to invoking +dealloc, and removing it via objc_clear_deallocating just prior to memory +reclamation. + +*/ + +// The address of a __weak variable. +// These pointers are stored disguised so memory analysis tools +// don't see lots of interior pointers from the weak table into objects. +typedef DisguisedPtr weak_referrer_t; + +#if __LP64__ +#define PTR_MINUS_2 62 +#else +#define PTR_MINUS_2 30 +#endif + +/** + * The internal structure stored in the weak references table. + * It maintains and stores + * a hash set of weak references pointing to an object. + * If out_of_line_ness != REFERRERS_OUT_OF_LINE then the set + * is instead a small inline array. + */ +#define WEAK_INLINE_COUNT 4 + +// out_of_line_ness field overlaps with the low two bits of inline_referrers[1]. +// inline_referrers[1] is a DisguisedPtr of a pointer-aligned address. +// The low two bits of a pointer-aligned DisguisedPtr will always be 0b00 +// (disguised nil or 0x80..00) or 0b11 (any other address). +// Therefore out_of_line_ness == 0b10 is used to mark the out-of-line state. +#define REFERRERS_OUT_OF_LINE 2 + +struct weak_entry_t { + DisguisedPtr referent; + union { + struct { + weak_referrer_t *referrers; + uintptr_t out_of_line_ness : 2; + uintptr_t num_refs : PTR_MINUS_2; + uintptr_t mask; + uintptr_t max_hash_displacement; + }; + struct { + // out_of_line_ness field is low bits of inline_referrers[1] + weak_referrer_t inline_referrers[WEAK_INLINE_COUNT]; + }; + }; + + bool out_of_line() { + return (out_of_line_ness == REFERRERS_OUT_OF_LINE); + } + + weak_entry_t& operator=(const weak_entry_t& other) { + memcpy(this, &other, sizeof(other)); + return *this; + } + + weak_entry_t(objc_object *newReferent, objc_object **newReferrer) + : referent(newReferent) + { + inline_referrers[0] = newReferrer; + for (int i = 1; i < WEAK_INLINE_COUNT; i++) { + inline_referrers[i] = nil; + } + } +}; + +/** + * The global weak references table. Stores object ids as keys, + * and weak_entry_t structs as their values. + */ +struct weak_table_t { + weak_entry_t *weak_entries; + size_t num_entries; + uintptr_t mask; + uintptr_t max_hash_displacement; +}; + +/// Adds an (object, weak pointer) pair to the weak table. +id weak_register_no_lock(weak_table_t *weak_table, id referent, + id *referrer, bool crashIfDeallocating); + +/// Removes an (object, weak pointer) pair from the weak table. +void weak_unregister_no_lock(weak_table_t *weak_table, id referent, id *referrer); + +#if DEBUG +/// Returns true if an object is weakly referenced somewhere. +bool weak_is_registered_no_lock(weak_table_t *weak_table, id referent); +#endif + +/// Called on object destruction. Sets all remaining weak pointers to nil. +void weak_clear_no_lock(weak_table_t *weak_table, id referent); + +__END_DECLS + +#endif /* _OBJC_WEAK_H_ */ diff --git a/runtime/objc-weak.mm b/runtime/objc-weak.mm new file mode 100644 index 0000000..3dd6d0a --- /dev/null +++ b/runtime/objc-weak.mm @@ -0,0 +1,505 @@ +/* + * Copyright (c) 2010-2011 Apple Inc. All rights reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#include "objc-private.h" + +#include "objc-weak.h" + +#include +#include +#include +#include + +#define TABLE_SIZE(entry) (entry->mask ? entry->mask + 1 : 0) + +static void append_referrer(weak_entry_t *entry, objc_object **new_referrer); + +BREAKPOINT_FUNCTION( + void objc_weak_error(void) +); + +static void bad_weak_table(weak_entry_t *entries) +{ + _objc_fatal("bad weak table at %p. This may be a runtime bug or a " + "memory error somewhere else.", entries); +} + +/** + * Unique hash function for object pointers only. + * + * @param key The object pointer + * + * @return Size unrestricted hash of pointer. + */ +static inline uintptr_t hash_pointer(objc_object *key) { + return ptr_hash((uintptr_t)key); +} + +/** + * Unique hash function for weak object pointers only. + * + * @param key The weak object pointer. + * + * @return Size unrestricted hash of pointer. + */ +static inline uintptr_t w_hash_pointer(objc_object **key) { + return ptr_hash((uintptr_t)key); +} + +/** + * Grow the entry's hash table of referrers. Rehashes each + * of the referrers. + * + * @param entry Weak pointer hash set for a particular object. + */ +__attribute__((noinline, used)) +static void grow_refs_and_insert(weak_entry_t *entry, + objc_object **new_referrer) +{ + assert(entry->out_of_line()); + + size_t old_size = TABLE_SIZE(entry); + size_t new_size = old_size ? old_size * 2 : 8; + + size_t num_refs = entry->num_refs; + weak_referrer_t *old_refs = entry->referrers; + entry->mask = new_size - 1; + + entry->referrers = (weak_referrer_t *) + calloc(TABLE_SIZE(entry), sizeof(weak_referrer_t)); + entry->num_refs = 0; + entry->max_hash_displacement = 0; + + for (size_t i = 0; i < old_size && num_refs > 0; i++) { + if (old_refs[i] != nil) { + append_referrer(entry, old_refs[i]); + num_refs--; + } + } + // Insert + append_referrer(entry, new_referrer); + if (old_refs) free(old_refs); +} + +/** + * Add the given referrer to set of weak pointers in this entry. + * Does not perform duplicate checking (b/c weak pointers are never + * added to a set twice). + * + * @param entry The entry holding the set of weak pointers. + * @param new_referrer The new weak pointer to be added. + */ +static void append_referrer(weak_entry_t *entry, objc_object **new_referrer) +{ + if (! entry->out_of_line()) { + // Try to insert inline. + for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) { + if (entry->inline_referrers[i] == nil) { + entry->inline_referrers[i] = new_referrer; + return; + } + } + + // Couldn't insert inline. Allocate out of line. + weak_referrer_t *new_referrers = (weak_referrer_t *) + calloc(WEAK_INLINE_COUNT, sizeof(weak_referrer_t)); + // This constructed table is invalid, but grow_refs_and_insert + // will fix it and rehash it. + for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) { + new_referrers[i] = entry->inline_referrers[i]; + } + entry->referrers = new_referrers; + entry->num_refs = WEAK_INLINE_COUNT; + entry->out_of_line_ness = REFERRERS_OUT_OF_LINE; + entry->mask = WEAK_INLINE_COUNT-1; + entry->max_hash_displacement = 0; + } + + assert(entry->out_of_line()); + + if (entry->num_refs >= TABLE_SIZE(entry) * 3/4) { + return grow_refs_and_insert(entry, new_referrer); + } + size_t begin = w_hash_pointer(new_referrer) & (entry->mask); + size_t index = begin; + size_t hash_displacement = 0; + while (entry->referrers[index] != nil) { + hash_displacement++; + index = (index+1) & entry->mask; + if (index == begin) bad_weak_table(entry); + } + if (hash_displacement > entry->max_hash_displacement) { + entry->max_hash_displacement = hash_displacement; + } + weak_referrer_t &ref = entry->referrers[index]; + ref = new_referrer; + entry->num_refs++; +} + +/** + * Remove old_referrer from set of referrers, if it's present. + * Does not remove duplicates, because duplicates should not exist. + * + * @todo this is slow if old_referrer is not present. Is this ever the case? + * + * @param entry The entry holding the referrers. + * @param old_referrer The referrer to remove. + */ +static void remove_referrer(weak_entry_t *entry, objc_object **old_referrer) +{ + if (! entry->out_of_line()) { + for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) { + if (entry->inline_referrers[i] == old_referrer) { + entry->inline_referrers[i] = nil; + return; + } + } + _objc_inform("Attempted to unregister unknown __weak variable " + "at %p. This is probably incorrect use of " + "objc_storeWeak() and objc_loadWeak(). " + "Break on objc_weak_error to debug.\n", + old_referrer); + objc_weak_error(); + return; + } + + size_t begin = w_hash_pointer(old_referrer) & (entry->mask); + size_t index = begin; + size_t hash_displacement = 0; + while (entry->referrers[index] != old_referrer) { + index = (index+1) & entry->mask; + if (index == begin) bad_weak_table(entry); + hash_displacement++; + if (hash_displacement > entry->max_hash_displacement) { + _objc_inform("Attempted to unregister unknown __weak variable " + "at %p. This is probably incorrect use of " + "objc_storeWeak() and objc_loadWeak(). " + "Break on objc_weak_error to debug.\n", + old_referrer); + objc_weak_error(); + return; + } + } + entry->referrers[index] = nil; + entry->num_refs--; +} + +/** + * Add new_entry to the object's table of weak references. + * Does not check whether the referent is already in the table. + */ +static void weak_entry_insert(weak_table_t *weak_table, weak_entry_t *new_entry) +{ + weak_entry_t *weak_entries = weak_table->weak_entries; + assert(weak_entries != nil); + + size_t begin = hash_pointer(new_entry->referent) & (weak_table->mask); + size_t index = begin; + size_t hash_displacement = 0; + while (weak_entries[index].referent != nil) { + index = (index+1) & weak_table->mask; + if (index == begin) bad_weak_table(weak_entries); + hash_displacement++; + } + + weak_entries[index] = *new_entry; + weak_table->num_entries++; + + if (hash_displacement > weak_table->max_hash_displacement) { + weak_table->max_hash_displacement = hash_displacement; + } +} + + +static void weak_resize(weak_table_t *weak_table, size_t new_size) +{ + size_t old_size = TABLE_SIZE(weak_table); + + weak_entry_t *old_entries = weak_table->weak_entries; + weak_entry_t *new_entries = (weak_entry_t *) + calloc(new_size, sizeof(weak_entry_t)); + + weak_table->mask = new_size - 1; + weak_table->weak_entries = new_entries; + weak_table->max_hash_displacement = 0; + weak_table->num_entries = 0; // restored by weak_entry_insert below + + if (old_entries) { + weak_entry_t *entry; + weak_entry_t *end = old_entries + old_size; + for (entry = old_entries; entry < end; entry++) { + if (entry->referent) { + weak_entry_insert(weak_table, entry); + } + } + free(old_entries); + } +} + +// Grow the given zone's table of weak references if it is full. +static void weak_grow_maybe(weak_table_t *weak_table) +{ + size_t old_size = TABLE_SIZE(weak_table); + + // Grow if at least 3/4 full. + if (weak_table->num_entries >= old_size * 3 / 4) { + weak_resize(weak_table, old_size ? old_size*2 : 64); + } +} + +// Shrink the table if it is mostly empty. +static void weak_compact_maybe(weak_table_t *weak_table) +{ + size_t old_size = TABLE_SIZE(weak_table); + + // Shrink if larger than 1024 buckets and at most 1/16 full. + if (old_size >= 1024 && old_size / 16 >= weak_table->num_entries) { + weak_resize(weak_table, old_size / 8); + // leaves new table no more than 1/2 full + } +} + + +/** + * Remove entry from the zone's table of weak references. + */ +static void weak_entry_remove(weak_table_t *weak_table, weak_entry_t *entry) +{ + // remove entry + if (entry->out_of_line()) free(entry->referrers); + bzero(entry, sizeof(*entry)); + + weak_table->num_entries--; + + weak_compact_maybe(weak_table); +} + + +/** + * Return the weak reference table entry for the given referent. + * If there is no entry for referent, return NULL. + * Performs a lookup. + * + * @param weak_table + * @param referent The object. Must not be nil. + * + * @return The table of weak referrers to this object. + */ +static weak_entry_t * +weak_entry_for_referent(weak_table_t *weak_table, objc_object *referent) +{ + assert(referent); + + weak_entry_t *weak_entries = weak_table->weak_entries; + + if (!weak_entries) return nil; + + size_t begin = hash_pointer(referent) & weak_table->mask; + size_t index = begin; + size_t hash_displacement = 0; + while (weak_table->weak_entries[index].referent != referent) { + index = (index+1) & weak_table->mask; + if (index == begin) bad_weak_table(weak_table->weak_entries); + hash_displacement++; + if (hash_displacement > weak_table->max_hash_displacement) { + return nil; + } + } + + return &weak_table->weak_entries[index]; +} + +/** + * Unregister an already-registered weak reference. + * This is used when referrer's storage is about to go away, but referent + * isn't dead yet. (Otherwise, zeroing referrer later would be a + * bad memory access.) + * Does nothing if referent/referrer is not a currently active weak reference. + * Does not zero referrer. + * + * FIXME currently requires old referent value to be passed in (lame) + * FIXME unregistration should be automatic if referrer is collected + * + * @param weak_table The global weak table. + * @param referent The object. + * @param referrer The weak reference. + */ +void +weak_unregister_no_lock(weak_table_t *weak_table, id referent_id, + id *referrer_id) +{ + objc_object *referent = (objc_object *)referent_id; + objc_object **referrer = (objc_object **)referrer_id; + + weak_entry_t *entry; + + if (!referent) return; + + if ((entry = weak_entry_for_referent(weak_table, referent))) { + remove_referrer(entry, referrer); + bool empty = true; + if (entry->out_of_line() && entry->num_refs != 0) { + empty = false; + } + else { + for (size_t i = 0; i < WEAK_INLINE_COUNT; i++) { + if (entry->inline_referrers[i]) { + empty = false; + break; + } + } + } + + if (empty) { + weak_entry_remove(weak_table, entry); + } + } + + // Do not set *referrer = nil. objc_storeWeak() requires that the + // value not change. +} + +/** + * Registers a new (object, weak pointer) pair. Creates a new weak + * object entry if it does not exist. + * + * @param weak_table The global weak table. + * @param referent The object pointed to by the weak reference. + * @param referrer The weak pointer address. + */ +id +weak_register_no_lock(weak_table_t *weak_table, id referent_id, + id *referrer_id, bool crashIfDeallocating) +{ + objc_object *referent = (objc_object *)referent_id; + objc_object **referrer = (objc_object **)referrer_id; + + if (!referent || referent->isTaggedPointer()) return referent_id; + + // ensure that the referenced object is viable + bool deallocating; + if (!referent->ISA()->hasCustomRR()) { + deallocating = referent->rootIsDeallocating(); + } + else { + BOOL (*allowsWeakReference)(objc_object *, SEL) = + (BOOL(*)(objc_object *, SEL)) + object_getMethodImplementation((id)referent, + SEL_allowsWeakReference); + if ((IMP)allowsWeakReference == _objc_msgForward) { + return nil; + } + deallocating = + ! (*allowsWeakReference)(referent, SEL_allowsWeakReference); + } + + if (deallocating) { + if (crashIfDeallocating) { + _objc_fatal("Cannot form weak reference to instance (%p) of " + "class %s. It is possible that this object was " + "over-released, or is in the process of deallocation.", + (void*)referent, object_getClassName((id)referent)); + } else { + return nil; + } + } + + // now remember it and where it is being stored + weak_entry_t *entry; + if ((entry = weak_entry_for_referent(weak_table, referent))) { + append_referrer(entry, referrer); + } + else { + weak_entry_t new_entry(referent, referrer); + weak_grow_maybe(weak_table); + weak_entry_insert(weak_table, &new_entry); + } + + // Do not set *referrer. objc_storeWeak() requires that the + // value not change. + + return referent_id; +} + + +#if DEBUG +bool +weak_is_registered_no_lock(weak_table_t *weak_table, id referent_id) +{ + return weak_entry_for_referent(weak_table, (objc_object *)referent_id); +} +#endif + + +/** + * Called by dealloc; nils out all weak pointers that point to the + * provided object so that they can no longer be used. + * + * @param weak_table + * @param referent The object being deallocated. + */ +void +weak_clear_no_lock(weak_table_t *weak_table, id referent_id) +{ + objc_object *referent = (objc_object *)referent_id; + + weak_entry_t *entry = weak_entry_for_referent(weak_table, referent); + if (entry == nil) { + /// XXX shouldn't happen, but does with mismatched CF/objc + //printf("XXX no entry for clear deallocating %p\n", referent); + return; + } + + // zero out references + weak_referrer_t *referrers; + size_t count; + + if (entry->out_of_line()) { + referrers = entry->referrers; + count = TABLE_SIZE(entry); + } + else { + referrers = entry->inline_referrers; + count = WEAK_INLINE_COUNT; + } + + for (size_t i = 0; i < count; ++i) { + objc_object **referrer = referrers[i]; + if (referrer) { + if (*referrer == referent) { + *referrer = nil; + } + else if (*referrer) { + _objc_inform("__weak variable at %p holds %p instead of %p. " + "This is probably incorrect use of " + "objc_storeWeak() and objc_loadWeak(). " + "Break on objc_weak_error to debug.\n", + referrer, (void*)*referrer, (void*)referent); + objc_weak_error(); + } + } + } + + weak_entry_remove(weak_table, entry); +} + diff --git a/runtime/objc.h b/runtime/objc.h new file mode 100644 index 0000000..7417ebc --- /dev/null +++ b/runtime/objc.h @@ -0,0 +1,234 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ +/* + * objc.h + * Copyright 1988-1996, NeXT Software, Inc. + */ + +#ifndef _OBJC_OBJC_H_ +#define _OBJC_OBJC_H_ + +#include // for __DARWIN_NULL +#include +#include +#include + +#if !OBJC_TYPES_DEFINED +/// An opaque type that represents an Objective-C class. +typedef struct objc_class *Class; + +/// Represents an instance of a class. +struct objc_object { + Class isa OBJC_ISA_AVAILABILITY; +}; + +/// A pointer to an instance of a class. +typedef struct objc_object *id; +#endif + +/// An opaque type that represents a method selector. +typedef struct objc_selector *SEL; + +/// A pointer to the function of a method implementation. +#if !OBJC_OLD_DISPATCH_PROTOTYPES +typedef void (*IMP)(void /* id, SEL, ... */ ); +#else +typedef id (*IMP)(id, SEL, ...); +#endif + +#define OBJC_BOOL_DEFINED + +/// Type to represent a boolean value. +#if (TARGET_OS_IPHONE && __LP64__) || TARGET_OS_WATCH +#define OBJC_BOOL_IS_BOOL 1 +typedef bool BOOL; +#else +#define OBJC_BOOL_IS_CHAR 1 +typedef signed char BOOL; +// BOOL is explicitly signed so @encode(BOOL) == "c" rather than "C" +// even if -funsigned-char is used. +#endif + +#if __has_feature(objc_bool) +#define YES __objc_yes +#define NO __objc_no +#else +#define YES ((BOOL)1) +#define NO ((BOOL)0) +#endif + +#ifndef Nil +# if __has_feature(cxx_nullptr) +# define Nil nullptr +# else +# define Nil __DARWIN_NULL +# endif +#endif + +#ifndef nil +# if __has_feature(cxx_nullptr) +# define nil nullptr +# else +# define nil __DARWIN_NULL +# endif +#endif + +#ifndef __strong +# if !__has_feature(objc_arc) +# define __strong /* empty */ +# endif +#endif + +#ifndef __unsafe_unretained +# if !__has_feature(objc_arc) +# define __unsafe_unretained /* empty */ +# endif +#endif + +#ifndef __autoreleasing +# if !__has_feature(objc_arc) +# define __autoreleasing /* empty */ +# endif +#endif + + +/** + * Returns the name of the method specified by a given selector. + * + * @param sel A pointer of type \c SEL. Pass the selector whose name you wish to determine. + * + * @return A C string indicating the name of the selector. + */ +OBJC_EXPORT const char *sel_getName(SEL sel) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Registers a method with the Objective-C runtime system, maps the method + * name to a selector, and returns the selector value. + * + * @param str A pointer to a C string. Pass the name of the method you wish to register. + * + * @return A pointer of type SEL specifying the selector for the named method. + * + * @note You must register a method name with the Objective-C runtime system to obtain the + * method’s selector before you can add the method to a class definition. If the method name + * has already been registered, this function simply returns the selector. + */ +OBJC_EXPORT SEL sel_registerName(const char *str) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the class name of a given object. + * + * @param obj An Objective-C object. + * + * @return The name of the class of which \e obj is an instance. + */ +OBJC_EXPORT const char *object_getClassName(id obj) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns a pointer to any extra bytes allocated with an instance given object. + * + * @param obj An Objective-C object. + * + * @return A pointer to any extra bytes allocated with \e obj. If \e obj was + * not allocated with any extra bytes, then dereferencing the returned pointer is undefined. + * + * @note This function returns a pointer to any extra bytes allocated with the instance + * (as specified by \c class_createInstance with extraBytes>0). This memory follows the + * object's ordinary ivars, but may not be adjacent to the last ivar. + * @note The returned pointer is guaranteed to be pointer-size aligned, even if the area following + * the object's last ivar is less aligned than that. Alignment greater than pointer-size is never + * guaranteed, even if the area following the object's last ivar is more aligned than that. + * @note In a garbage-collected environment, the memory is scanned conservatively. + */ +OBJC_EXPORT void *object_getIndexedIvars(id obj) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Identifies a selector as being valid or invalid. + * + * @param sel The selector you want to identify. + * + * @return YES if selector is valid and has a function implementation, NO otherwise. + * + * @warning On some platforms, an invalid reference (to invalid memory addresses) can cause + * a crash. + */ +OBJC_EXPORT BOOL sel_isMapped(SEL sel) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Registers a method name with the Objective-C runtime system. + * + * @param str A pointer to a C string. Pass the name of the method you wish to register. + * + * @return A pointer of type SEL specifying the selector for the named method. + * + * @note The implementation of this method is identical to the implementation of \c sel_registerName. + * @note Prior to OS X version 10.0, this method tried to find the selector mapped to the given name + * and returned \c NULL if the selector was not found. This was changed for safety, because it was + * observed that many of the callers of this function did not check the return value for \c NULL. + */ +OBJC_EXPORT SEL sel_getUid(const char *str) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +typedef const void* objc_objectptr_t; + + +// Obsolete ARC conversions. + +OBJC_EXPORT id objc_retainedObject(objc_objectptr_t obj) + OBJC_UNAVAILABLE("use CFBridgingRelease() or a (__bridge_transfer id) cast instead"); +OBJC_EXPORT id objc_unretainedObject(objc_objectptr_t obj) + OBJC_UNAVAILABLE("use a (__bridge id) cast instead"); +OBJC_EXPORT objc_objectptr_t objc_unretainedPointer(id obj) + OBJC_UNAVAILABLE("use a __bridge cast instead"); + + +#if !__OBJC2__ + +// The following declarations are provided here for source compatibility. + +#if defined(__LP64__) + typedef long arith_t; + typedef unsigned long uarith_t; +# define ARITH_SHIFT 32 +#else + typedef int arith_t; + typedef unsigned uarith_t; +# define ARITH_SHIFT 16 +#endif + +typedef char *STR; + +#define ISSELECTOR(sel) sel_isMapped(sel) +#define SELNAME(sel) sel_getName(sel) +#define SELUID(str) sel_getUid(str) +#define NAMEOF(obj) object_getClassName(obj) +#define IV(obj) object_getIndexedIvars(obj) + +#endif + +#endif /* _OBJC_OBJC_H_ */ diff --git a/runtime/objcrt.c b/runtime/objcrt.c new file mode 100644 index 0000000..7570e2d --- /dev/null +++ b/runtime/objcrt.c @@ -0,0 +1,98 @@ +#define WIN32_LEAN_AND_MEAN +#include +#include +#include +#include "objcrt.h" + +// Boundary symbols for metadata sections + +#pragma section(".objc_module_info$A",long,read,write) +#pragma data_seg(".objc_module_info$A") +static uintptr_t __objc_modStart = 0; +#pragma section(".objc_module_info$C",long,read,write) +#pragma data_seg(".objc_module_info$C") +static uintptr_t __objc_modEnd = 0; + +#pragma section(".objc_protocol$A",long,read,write) +#pragma data_seg(".objc_protocol$A") +static uintptr_t __objc_protoStart = 0; +#pragma section(".objc_protocol$C",long,read,write) +#pragma data_seg(".objc_protocol$C") +static uintptr_t __objc_protoEnd = 0; + +#pragma section(".objc_image_info$A",long,read,write) +#pragma data_seg(".objc_image_info$A") +static uintptr_t __objc_iiStart = 0; +#pragma section(".objc_image_info$C",long,read,write) +#pragma data_seg(".objc_image_info$C") +static uintptr_t __objc_iiEnd = 0; + +#pragma section(".objc_message_refs$A",long,read,write) +#pragma data_seg(".objc_message_refs$A") +static uintptr_t __objc_selrefsStart = 0; +#pragma section(".objc_message_refs$C",long,read,write) +#pragma data_seg(".objc_message_refs$C") +static uintptr_t __objc_selrefsEnd = 0; + +#pragma section(".objc_class_refs$A",long,read,write) +#pragma data_seg(".objc_class_refs$A") +static uintptr_t __objc_clsrefsStart = 0; +#pragma section(".objc_class_refs$C",long,read,write) +#pragma data_seg(".objc_class_refs$C") +static uintptr_t __objc_clsrefsEnd = 0; + +#pragma data_seg() + +// Merge all metadata into .data +// fixme order these by usage? +#pragma comment(linker, "/MERGE:.objc_module_info=.data") +#pragma comment(linker, "/MERGE:.objc_protocol=.data") +#pragma comment(linker, "/MERGE:.objc_image_info=.data") +#pragma comment(linker, "/MERGE:.objc_message_refs=.data") +#pragma comment(linker, "/MERGE:.objc_class_refs=.data") + + +// Image initializers + +static void *__hinfo = NULL; // cookie from runtime +extern IMAGE_DOS_HEADER __ImageBase; // this image's header + +static int __objc_init(void) +{ + objc_sections sections = { + 5, + &__objc_modStart, &__objc_modEnd, + &__objc_protoStart, &__objc_protoEnd, + &__objc_iiStart, &__objc_iiEnd, + &__objc_selrefsStart, &__objc_selrefsEnd, + &__objc_clsrefsStart, &__objc_clsrefsEnd, + }; + __hinfo = _objc_init_image((HMODULE)&__ImageBase, §ions); + return 0; +} + +static void __objc_unload(void) +{ + _objc_unload_image((HMODULE)&__ImageBase, __hinfo); +} + +static int __objc_load(void) +{ + _objc_load_image((HMODULE)&__ImageBase, __hinfo); + return 0; +} + +// run _objc_init_image ASAP +#pragma section(".CRT$XIAA",long,read,write) +#pragma data_seg(".CRT$XIAA") +static void *__objc_init_fn = &__objc_init; + +// run _objc_load_image (+load methods) after all other initializers; +// otherwise constant NSStrings are not initialized yet +#pragma section(".CRT$XCUO",long,read,write) +#pragma data_seg(".CRT$XCUO") +static void *__objc_load_fn = &__objc_load; + +// _objc_unload_image is called by atexit(), not by an image terminator + +#pragma data_seg() diff --git a/runtime/objcrt.h b/runtime/objcrt.h new file mode 100644 index 0000000..4e6fefd --- /dev/null +++ b/runtime/objcrt.h @@ -0,0 +1,25 @@ +#ifndef _OBJC_RT_H_ +#define _OBJC_RT_H_ + +#include + + +typedef struct { + int count; // number of pointer pairs that follow + void *modStart; + void *modEnd; + void *protoStart; + void *protoEnd; + void *iiStart; + void *iiEnd; + void *selrefsStart; + void *selrefsEnd; + void *clsrefsStart; + void *clsrefsEnd; +} objc_sections; + +OBJC_EXPORT void *_objc_init_image(HMODULE image, const objc_sections *sects); +OBJC_EXPORT void _objc_load_image(HMODULE image, void *hinfo); +OBJC_EXPORT void _objc_unload_image(HMODULE image, void *hinfo); + +#endif diff --git a/runtime/runtime.h b/runtime/runtime.h new file mode 100644 index 0000000..38e74f1 --- /dev/null +++ b/runtime/runtime.h @@ -0,0 +1,1847 @@ +/* + * Copyright (c) 1999-2007 Apple Inc. All Rights Reserved. + * + * @APPLE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this + * file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, + * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_LICENSE_HEADER_END@ + */ + +#ifndef _OBJC_RUNTIME_H +#define _OBJC_RUNTIME_H + +#include +#include +#include +#include +#include +#include + +#if TARGET_OS_MAC +#include +#endif + + +/* Types */ + +#if !OBJC_TYPES_DEFINED + +/// An opaque type that represents a method in a class definition. +typedef struct objc_method *Method; + +/// An opaque type that represents an instance variable. +typedef struct objc_ivar *Ivar; + +/// An opaque type that represents a category. +typedef struct objc_category *Category; + +/// An opaque type that represents an Objective-C declared property. +typedef struct objc_property *objc_property_t; + +struct objc_class { + Class isa OBJC_ISA_AVAILABILITY; + +#if !__OBJC2__ + Class super_class OBJC2_UNAVAILABLE; + const char *name OBJC2_UNAVAILABLE; + long version OBJC2_UNAVAILABLE; + long info OBJC2_UNAVAILABLE; + long instance_size OBJC2_UNAVAILABLE; + struct objc_ivar_list *ivars OBJC2_UNAVAILABLE; + struct objc_method_list **methodLists OBJC2_UNAVAILABLE; + struct objc_cache *cache OBJC2_UNAVAILABLE; + struct objc_protocol_list *protocols OBJC2_UNAVAILABLE; +#endif + +} OBJC2_UNAVAILABLE; +/* Use `Class` instead of `struct objc_class *` */ + +#endif + +#ifdef __OBJC__ +@class Protocol; +#else +typedef struct objc_object Protocol; +#endif + +/// Defines a method +struct objc_method_description { + SEL name; /**< The name of the method */ + char *types; /**< The types of the method arguments */ +}; + +/// Defines a property attribute +typedef struct { + const char *name; /**< The name of the attribute */ + const char *value; /**< The value of the attribute (usually empty) */ +} objc_property_attribute_t; + + +/* Functions */ + +/* Working with Instances */ + +/** + * Returns a copy of a given object. + * + * @param obj An Objective-C object. + * @param size The size of the object \e obj. + * + * @return A copy of \e obj. + */ +OBJC_EXPORT id object_copy(id obj, size_t size) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; + +/** + * Frees the memory occupied by a given object. + * + * @param obj An Objective-C object. + * + * @return nil + */ +OBJC_EXPORT id object_dispose(id obj) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; + +/** + * Returns the class of an object. + * + * @param obj The object you want to inspect. + * + * @return The class object of which \e object is an instance, + * or \c Nil if \e object is \c nil. + */ +OBJC_EXPORT Class object_getClass(id obj) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Sets the class of an object. + * + * @param obj The object to modify. + * @param cls A class object. + * + * @return The previous value of \e object's class, or \c Nil if \e object is \c nil. + */ +OBJC_EXPORT Class object_setClass(id obj, Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + + +/** + * Returns whether an object is a class object. + * + * @param obj An Objective-C object. + * + * @return true if the object is a class or metaclass, false otherwise. + */ +OBJC_EXPORT BOOL object_isClass(id obj) + OBJC_AVAILABLE(10.10, 8.0, 9.0, 1.0); + + +/** + * Returns the class name of a given object. + * + * @param obj An Objective-C object. + * + * @return The name of the class of which \e obj is an instance. + */ +OBJC_EXPORT const char *object_getClassName(id obj) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns a pointer to any extra bytes allocated with an instance given object. + * + * @param obj An Objective-C object. + * + * @return A pointer to any extra bytes allocated with \e obj. If \e obj was + * not allocated with any extra bytes, then dereferencing the returned pointer is undefined. + * + * @note This function returns a pointer to any extra bytes allocated with the instance + * (as specified by \c class_createInstance with extraBytes>0). This memory follows the + * object's ordinary ivars, but may not be adjacent to the last ivar. + * @note The returned pointer is guaranteed to be pointer-size aligned, even if the area following + * the object's last ivar is less aligned than that. Alignment greater than pointer-size is never + * guaranteed, even if the area following the object's last ivar is more aligned than that. + * @note In a garbage-collected environment, the memory is scanned conservatively. + */ +OBJC_EXPORT void *object_getIndexedIvars(id obj) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; + +/** + * Reads the value of an instance variable in an object. + * + * @param obj The object containing the instance variable whose value you want to read. + * @param ivar The Ivar describing the instance variable whose value you want to read. + * + * @return The value of the instance variable specified by \e ivar, or \c nil if \e object is \c nil. + * + * @note \c object_getIvar is faster than \c object_getInstanceVariable if the Ivar + * for the instance variable is already known. + */ +OBJC_EXPORT id object_getIvar(id obj, Ivar ivar) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Sets the value of an instance variable in an object. + * + * @param obj The object containing the instance variable whose value you want to set. + * @param ivar The Ivar describing the instance variable whose value you want to set. + * @param value The new value for the instance variable. + * + * @note Instance variables with known memory management (such as ARC strong and weak) + * use that memory management. Instance variables with unknown memory management + * are assigned as if they were unsafe_unretained. + * @note \c object_setIvar is faster than \c object_setInstanceVariable if the Ivar + * for the instance variable is already known. + */ +OBJC_EXPORT void object_setIvar(id obj, Ivar ivar, id value) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Sets the value of an instance variable in an object. + * + * @param obj The object containing the instance variable whose value you want to set. + * @param ivar The Ivar describing the instance variable whose value you want to set. + * @param value The new value for the instance variable. + * + * @note Instance variables with known memory management (such as ARC strong and weak) + * use that memory management. Instance variables with unknown memory management + * are assigned as if they were strong. + * @note \c object_setIvar is faster than \c object_setInstanceVariable if the Ivar + * for the instance variable is already known. + */ +OBJC_EXPORT void object_setIvarWithStrongDefault(id obj, Ivar ivar, id value) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +/** + * Changes the value of an instance variable of a class instance. + * + * @param obj A pointer to an instance of a class. Pass the object containing + * the instance variable whose value you wish to modify. + * @param name A C string. Pass the name of the instance variable whose value you wish to modify. + * @param value The new value for the instance variable. + * + * @return A pointer to the \c Ivar data structure that defines the type and + * name of the instance variable specified by \e name. + * + * @note Instance variables with known memory management (such as ARC strong and weak) + * use that memory management. Instance variables with unknown memory management + * are assigned as if they were unsafe_unretained. + */ +OBJC_EXPORT Ivar object_setInstanceVariable(id obj, const char *name, void *value) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; + +/** + * Changes the value of an instance variable of a class instance. + * + * @param obj A pointer to an instance of a class. Pass the object containing + * the instance variable whose value you wish to modify. + * @param name A C string. Pass the name of the instance variable whose value you wish to modify. + * @param value The new value for the instance variable. + * + * @return A pointer to the \c Ivar data structure that defines the type and + * name of the instance variable specified by \e name. + * + * @note Instance variables with known memory management (such as ARC strong and weak) + * use that memory management. Instance variables with unknown memory management + * are assigned as if they were strong. + */ +OBJC_EXPORT Ivar object_setInstanceVariableWithStrongDefault(id obj, const char *name, void *value) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0) + OBJC_ARC_UNAVAILABLE; + +/** + * Obtains the value of an instance variable of a class instance. + * + * @param obj A pointer to an instance of a class. Pass the object containing + * the instance variable whose value you wish to obtain. + * @param name A C string. Pass the name of the instance variable whose value you wish to obtain. + * @param outValue On return, contains a pointer to the value of the instance variable. + * + * @return A pointer to the \c Ivar data structure that defines the type and name of + * the instance variable specified by \e name. + */ +OBJC_EXPORT Ivar object_getInstanceVariable(id obj, const char *name, void **outValue) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0) + OBJC_ARC_UNAVAILABLE; + + +/* Obtaining Class Definitions */ + +/** + * Returns the class definition of a specified class. + * + * @param name The name of the class to look up. + * + * @return The Class object for the named class, or \c nil + * if the class is not registered with the Objective-C runtime. + * + * @note \c objc_getClass is different from \c objc_lookUpClass in that if the class + * is not registered, \c objc_getClass calls the class handler callback and then checks + * a second time to see whether the class is registered. \c objc_lookUpClass does + * not call the class handler callback. + * + * @warning Earlier implementations of this function (prior to OS X v10.0) + * terminate the program if the class does not exist. + */ +OBJC_EXPORT Class objc_getClass(const char *name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the metaclass definition of a specified class. + * + * @param name The name of the class to look up. + * + * @return The \c Class object for the metaclass of the named class, or \c nil if the class + * is not registered with the Objective-C runtime. + * + * @note If the definition for the named class is not registered, this function calls the class handler + * callback and then checks a second time to see if the class is registered. However, every class + * definition must have a valid metaclass definition, and so the metaclass definition is always returned, + * whether it’s valid or not. + */ +OBJC_EXPORT Class objc_getMetaClass(const char *name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the class definition of a specified class. + * + * @param name The name of the class to look up. + * + * @return The Class object for the named class, or \c nil if the class + * is not registered with the Objective-C runtime. + * + * @note \c objc_getClass is different from this function in that if the class is not + * registered, \c objc_getClass calls the class handler callback and then checks a second + * time to see whether the class is registered. This function does not call the class handler callback. + */ +OBJC_EXPORT Class objc_lookUpClass(const char *name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the class definition of a specified class. + * + * @param name The name of the class to look up. + * + * @return The Class object for the named class. + * + * @note This function is the same as \c objc_getClass, but kills the process if the class is not found. + * @note This function is used by ZeroLink, where failing to find a class would be a compile-time link error without ZeroLink. + */ +OBJC_EXPORT Class objc_getRequiredClass(const char *name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Obtains the list of registered class definitions. + * + * @param buffer An array of \c Class values. On output, each \c Class value points to + * one class definition, up to either \e bufferCount or the total number of registered classes, + * whichever is less. You can pass \c NULL to obtain the total number of registered class + * definitions without actually retrieving any class definitions. + * @param bufferCount An integer value. Pass the number of pointers for which you have allocated space + * in \e buffer. On return, this function fills in only this number of elements. If this number is less + * than the number of registered classes, this function returns an arbitrary subset of the registered classes. + * + * @return An integer value indicating the total number of registered classes. + * + * @note The Objective-C runtime library automatically registers all the classes defined in your source code. + * You can create class definitions at runtime and register them with the \c objc_addClass function. + * + * @warning You cannot assume that class objects you get from this function are classes that inherit from \c NSObject, + * so you cannot safely call any methods on such classes without detecting that the method is implemented first. + */ +OBJC_EXPORT int objc_getClassList(Class *buffer, int bufferCount) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Creates and returns a list of pointers to all registered class definitions. + * + * @param outCount An integer pointer used to store the number of classes returned by + * this function in the list. It can be \c nil. + * + * @return A nil terminated array of classes. It must be freed with \c free(). + * + * @see objc_getClassList + */ +OBJC_EXPORT Class *objc_copyClassList(unsigned int *outCount) + OBJC_AVAILABLE(10.7, 3.1, 9.0, 1.0); + + +/* Working with Classes */ + +/** + * Returns the name of a class. + * + * @param cls A class object. + * + * @return The name of the class, or the empty string if \e cls is \c Nil. + */ +OBJC_EXPORT const char *class_getName(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a Boolean value that indicates whether a class object is a metaclass. + * + * @param cls A class object. + * + * @return \c YES if \e cls is a metaclass, \c NO if \e cls is a non-meta class, + * \c NO if \e cls is \c Nil. + */ +OBJC_EXPORT BOOL class_isMetaClass(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the superclass of a class. + * + * @param cls A class object. + * + * @return The superclass of the class, or \c Nil if + * \e cls is a root class, or \c Nil if \e cls is \c Nil. + * + * @note You should usually use \c NSObject's \c superclass method instead of this function. + */ +OBJC_EXPORT Class class_getSuperclass(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Sets the superclass of a given class. + * + * @param cls The class whose superclass you want to set. + * @param newSuper The new superclass for cls. + * + * @return The old superclass for cls. + * + * @warning You should not use this function. + */ +OBJC_EXPORT Class class_setSuperclass(Class cls, Class newSuper) + __OSX_DEPRECATED(10.5, 10.5, "not recommended") + __IOS_DEPRECATED(2.0, 2.0, "not recommended") + __TVOS_DEPRECATED(9.0, 9.0, "not recommended") + __WATCHOS_DEPRECATED(1.0, 1.0, "not recommended"); + +/** + * Returns the version number of a class definition. + * + * @param cls A pointer to a \c Class data structure. Pass + * the class definition for which you wish to obtain the version. + * + * @return An integer indicating the version number of the class definition. + * + * @see class_setVersion + */ +OBJC_EXPORT int class_getVersion(Class cls) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Sets the version number of a class definition. + * + * @param cls A pointer to an Class data structure. + * Pass the class definition for which you wish to set the version. + * @param version An integer. Pass the new version number of the class definition. + * + * @note You can use the version number of the class definition to provide versioning of the + * interface that your class represents to other classes. This is especially useful for object + * serialization (that is, archiving of the object in a flattened form), where it is important to + * recognize changes to the layout of the instance variables in different class-definition versions. + * @note Classes derived from the Foundation framework \c NSObject class can set the class-definition + * version number using the \c setVersion: class method, which is implemented using the \c class_setVersion function. + */ +OBJC_EXPORT void class_setVersion(Class cls, int version) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the size of instances of a class. + * + * @param cls A class object. + * + * @return The size in bytes of instances of the class \e cls, or \c 0 if \e cls is \c Nil. + */ +OBJC_EXPORT size_t class_getInstanceSize(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the \c Ivar for a specified instance variable of a given class. + * + * @param cls The class whose instance variable you wish to obtain. + * @param name The name of the instance variable definition to obtain. + * + * @return A pointer to an \c Ivar data structure containing information about + * the instance variable specified by \e name. + */ +OBJC_EXPORT Ivar class_getInstanceVariable(Class cls, const char *name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the Ivar for a specified class variable of a given class. + * + * @param cls The class definition whose class variable you wish to obtain. + * @param name The name of the class variable definition to obtain. + * + * @return A pointer to an \c Ivar data structure containing information about the class variable specified by \e name. + */ +OBJC_EXPORT Ivar class_getClassVariable(Class cls, const char *name) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Describes the instance variables declared by a class. + * + * @param cls The class to inspect. + * @param outCount On return, contains the length of the returned array. + * If outCount is NULL, the length is not returned. + * + * @return An array of pointers of type Ivar describing the instance variables declared by the class. + * Any instance variables declared by superclasses are not included. The array contains *outCount + * pointers followed by a NULL terminator. You must free the array with free(). + * + * If the class declares no instance variables, or cls is Nil, NULL is returned and *outCount is 0. + */ +OBJC_EXPORT Ivar *class_copyIvarList(Class cls, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a specified instance method for a given class. + * + * @param cls The class you want to inspect. + * @param name The selector of the method you want to retrieve. + * + * @return The method that corresponds to the implementation of the selector specified by + * \e name for the class specified by \e cls, or \c NULL if the specified class or its + * superclasses do not contain an instance method with the specified selector. + * + * @note This function searches superclasses for implementations, whereas \c class_copyMethodList does not. + */ +OBJC_EXPORT Method class_getInstanceMethod(Class cls, SEL name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns a pointer to the data structure describing a given class method for a given class. + * + * @param cls A pointer to a class definition. Pass the class that contains the method you want to retrieve. + * @param name A pointer of type \c SEL. Pass the selector of the method you want to retrieve. + * + * @return A pointer to the \c Method data structure that corresponds to the implementation of the + * selector specified by aSelector for the class specified by aClass, or NULL if the specified + * class or its superclasses do not contain an instance method with the specified selector. + * + * @note Note that this function searches superclasses for implementations, + * whereas \c class_copyMethodList does not. + */ +OBJC_EXPORT Method class_getClassMethod(Class cls, SEL name) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns the function pointer that would be called if a + * particular message were sent to an instance of a class. + * + * @param cls The class you want to inspect. + * @param name A selector. + * + * @return The function pointer that would be called if \c [object name] were called + * with an instance of the class, or \c NULL if \e cls is \c Nil. + * + * @note \c class_getMethodImplementation may be faster than \c method_getImplementation(class_getInstanceMethod(cls, name)). + * @note The function pointer returned may be a function internal to the runtime instead of + * an actual method implementation. For example, if instances of the class do not respond to + * the selector, the function pointer returned will be part of the runtime's message forwarding machinery. + */ +OBJC_EXPORT IMP class_getMethodImplementation(Class cls, SEL name) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the function pointer that would be called if a particular + * message were sent to an instance of a class. + * + * @param cls The class you want to inspect. + * @param name A selector. + * + * @return The function pointer that would be called if \c [object name] were called + * with an instance of the class, or \c NULL if \e cls is \c Nil. + */ +OBJC_EXPORT IMP class_getMethodImplementation_stret(Class cls, SEL name) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0) + OBJC_ARM64_UNAVAILABLE; + +/** + * Returns a Boolean value that indicates whether instances of a class respond to a particular selector. + * + * @param cls The class you want to inspect. + * @param sel A selector. + * + * @return \c YES if instances of the class respond to the selector, otherwise \c NO. + * + * @note You should usually use \c NSObject's \c respondsToSelector: or \c instancesRespondToSelector: + * methods instead of this function. + */ +OBJC_EXPORT BOOL class_respondsToSelector(Class cls, SEL sel) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Describes the instance methods implemented by a class. + * + * @param cls The class you want to inspect. + * @param outCount On return, contains the length of the returned array. + * If outCount is NULL, the length is not returned. + * + * @return An array of pointers of type Method describing the instance methods + * implemented by the class—any instance methods implemented by superclasses are not included. + * The array contains *outCount pointers followed by a NULL terminator. You must free the array with free(). + * + * If cls implements no instance methods, or cls is Nil, returns NULL and *outCount is 0. + * + * @note To get the class methods of a class, use \c class_copyMethodList(object_getClass(cls), &count). + * @note To get the implementations of methods that may be implemented by superclasses, + * use \c class_getInstanceMethod or \c class_getClassMethod. + */ +OBJC_EXPORT Method *class_copyMethodList(Class cls, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a Boolean value that indicates whether a class conforms to a given protocol. + * + * @param cls The class you want to inspect. + * @param protocol A protocol. + * + * @return YES if cls conforms to protocol, otherwise NO. + * + * @note You should usually use NSObject's conformsToProtocol: method instead of this function. + */ +OBJC_EXPORT BOOL class_conformsToProtocol(Class cls, Protocol *protocol) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Describes the protocols adopted by a class. + * + * @param cls The class you want to inspect. + * @param outCount On return, contains the length of the returned array. + * If outCount is NULL, the length is not returned. + * + * @return An array of pointers of type Protocol* describing the protocols adopted + * by the class. Any protocols adopted by superclasses or other protocols are not included. + * The array contains *outCount pointers followed by a NULL terminator. You must free the array with free(). + * + * If cls adopts no protocols, or cls is Nil, returns NULL and *outCount is 0. + */ +OBJC_EXPORT Protocol * __unsafe_unretained *class_copyProtocolList(Class cls, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a property with a given name of a given class. + * + * @param cls The class you want to inspect. + * @param name The name of the property you want to inspect. + * + * @return A pointer of type \c objc_property_t describing the property, or + * \c NULL if the class does not declare a property with that name, + * or \c NULL if \e cls is \c Nil. + */ +OBJC_EXPORT objc_property_t class_getProperty(Class cls, const char *name) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Describes the properties declared by a class. + * + * @param cls The class you want to inspect. + * @param outCount On return, contains the length of the returned array. + * If \e outCount is \c NULL, the length is not returned. + * + * @return An array of pointers of type \c objc_property_t describing the properties + * declared by the class. Any properties declared by superclasses are not included. + * The array contains \c *outCount pointers followed by a \c NULL terminator. You must free the array with \c free(). + * + * If \e cls declares no properties, or \e cls is \c Nil, returns \c NULL and \c *outCount is \c 0. + */ +OBJC_EXPORT objc_property_t *class_copyPropertyList(Class cls, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a description of the \c Ivar layout for a given class. + * + * @param cls The class to inspect. + * + * @return A description of the \c Ivar layout for \e cls. + */ +OBJC_EXPORT const uint8_t *class_getIvarLayout(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a description of the layout of weak Ivars for a given class. + * + * @param cls The class to inspect. + * + * @return A description of the layout of the weak \c Ivars for \e cls. + */ +OBJC_EXPORT const uint8_t *class_getWeakIvarLayout(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Adds a new method to a class with a given name and implementation. + * + * @param cls The class to which to add a method. + * @param name A selector that specifies the name of the method being added. + * @param imp A function which is the implementation of the new method. The function must take at least two arguments—self and _cmd. + * @param types An array of characters that describe the types of the arguments to the method. + * + * @return YES if the method was added successfully, otherwise NO + * (for example, the class already contains a method implementation with that name). + * + * @note class_addMethod will add an override of a superclass's implementation, + * but will not replace an existing implementation in this class. + * To change an existing implementation, use method_setImplementation. + */ +OBJC_EXPORT BOOL class_addMethod(Class cls, SEL name, IMP imp, + const char *types) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Replaces the implementation of a method for a given class. + * + * @param cls The class you want to modify. + * @param name A selector that identifies the method whose implementation you want to replace. + * @param imp The new implementation for the method identified by name for the class identified by cls. + * @param types An array of characters that describe the types of the arguments to the method. + * Since the function must take at least two arguments—self and _cmd, the second and third characters + * must be “@:” (the first character is the return type). + * + * @return The previous implementation of the method identified by \e name for the class identified by \e cls. + * + * @note This function behaves in two different ways: + * - If the method identified by \e name does not yet exist, it is added as if \c class_addMethod were called. + * The type encoding specified by \e types is used as given. + * - If the method identified by \e name does exist, its \c IMP is replaced as if \c method_setImplementation were called. + * The type encoding specified by \e types is ignored. + */ +OBJC_EXPORT IMP class_replaceMethod(Class cls, SEL name, IMP imp, + const char *types) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Adds a new instance variable to a class. + * + * @return YES if the instance variable was added successfully, otherwise NO + * (for example, the class already contains an instance variable with that name). + * + * @note This function may only be called after objc_allocateClassPair and before objc_registerClassPair. + * Adding an instance variable to an existing class is not supported. + * @note The class must not be a metaclass. Adding an instance variable to a metaclass is not supported. + * @note The instance variable's minimum alignment in bytes is 1< Type Encodings. + */ +OBJC_EXPORT const char *ivar_getTypeEncoding(Ivar v) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the offset of an instance variable. + * + * @param v The instance variable you want to enquire about. + * + * @return The offset of \e v. + * + * @note For instance variables of type \c id or other object types, call \c object_getIvar + * and \c object_setIvar instead of using this offset to access the instance variable data directly. + */ +OBJC_EXPORT ptrdiff_t ivar_getOffset(Ivar v) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + + +/* Working with Properties */ + +/** + * Returns the name of a property. + * + * @param property The property you want to inquire about. + * + * @return A C string containing the property's name. + */ +OBJC_EXPORT const char *property_getName(objc_property_t property) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the attribute string of a property. + * + * @param property A property. + * + * @return A C string containing the property's attributes. + * + * @note The format of the attribute string is described in Declared Properties in Objective-C Runtime Programming Guide. + */ +OBJC_EXPORT const char *property_getAttributes(objc_property_t property) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns an array of property attributes for a property. + * + * @param property The property whose attributes you want copied. + * @param outCount The number of attributes returned in the array. + * + * @return An array of property attributes; must be free'd() by the caller. + */ +OBJC_EXPORT objc_property_attribute_t *property_copyAttributeList(objc_property_t property, unsigned int *outCount) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Returns the value of a property attribute given the attribute name. + * + * @param property The property whose attribute value you are interested in. + * @param attributeName C string representing the attribute name. + * + * @return The value string of the attribute \e attributeName if it exists in + * \e property, \c nil otherwise. + */ +OBJC_EXPORT char *property_copyAttributeValue(objc_property_t property, const char *attributeName) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + + +/* Working with Protocols */ + +/** + * Returns a specified protocol. + * + * @param name The name of a protocol. + * + * @return The protocol named \e name, or \c NULL if no protocol named \e name could be found. + * + * @note This function acquires the runtime lock. + */ +OBJC_EXPORT Protocol *objc_getProtocol(const char *name) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns an array of all the protocols known to the runtime. + * + * @param outCount Upon return, contains the number of protocols in the returned array. + * + * @return A C array of all the protocols known to the runtime. The array contains \c *outCount + * pointers followed by a \c NULL terminator. You must free the list with \c free(). + * + * @note This function acquires the runtime lock. + */ +OBJC_EXPORT Protocol * __unsafe_unretained *objc_copyProtocolList(unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a Boolean value that indicates whether one protocol conforms to another protocol. + * + * @param proto A protocol. + * @param other A protocol. + * + * @return \c YES if \e proto conforms to \e other, otherwise \c NO. + * + * @note One protocol can incorporate other protocols using the same syntax + * that classes use to adopt a protocol: + * \code + * @protocol ProtocolName < protocol list > + * \endcode + * All the protocols listed between angle brackets are considered part of the ProtocolName protocol. + */ +OBJC_EXPORT BOOL protocol_conformsToProtocol(Protocol *proto, Protocol *other) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a Boolean value that indicates whether two protocols are equal. + * + * @param proto A protocol. + * @param other A protocol. + * + * @return \c YES if \e proto is the same as \e other, otherwise \c NO. + */ +OBJC_EXPORT BOOL protocol_isEqual(Protocol *proto, Protocol *other) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the name of a protocol. + * + * @param p A protocol. + * + * @return The name of the protocol \e p as a C string. + */ +OBJC_EXPORT const char *protocol_getName(Protocol *p) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns a method description structure for a specified method of a given protocol. + * + * @param p A protocol. + * @param aSel A selector. + * @param isRequiredMethod A Boolean value that indicates whether aSel is a required method. + * @param isInstanceMethod A Boolean value that indicates whether aSel is an instance method. + * + * @return An \c objc_method_description structure that describes the method specified by \e aSel, + * \e isRequiredMethod, and \e isInstanceMethod for the protocol \e p. + * If the protocol does not contain the specified method, returns an \c objc_method_description structure + * with the value \c {NULL, \c NULL}. + * + * @note This function recursively searches any protocols that this protocol conforms to. + */ +OBJC_EXPORT struct objc_method_description protocol_getMethodDescription(Protocol *p, SEL aSel, BOOL isRequiredMethod, BOOL isInstanceMethod) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns an array of method descriptions of methods meeting a given specification for a given protocol. + * + * @param p A protocol. + * @param isRequiredMethod A Boolean value that indicates whether returned methods should + * be required methods (pass YES to specify required methods). + * @param isInstanceMethod A Boolean value that indicates whether returned methods should + * be instance methods (pass YES to specify instance methods). + * @param outCount Upon return, contains the number of method description structures in the returned array. + * + * @return A C array of \c objc_method_description structures containing the names and types of \e p's methods + * specified by \e isRequiredMethod and \e isInstanceMethod. The array contains \c *outCount pointers followed + * by a \c NULL terminator. You must free the list with \c free(). + * If the protocol declares no methods that meet the specification, \c NULL is returned and \c *outCount is 0. + * + * @note Methods in other protocols adopted by this protocol are not included. + */ +OBJC_EXPORT struct objc_method_description *protocol_copyMethodDescriptionList(Protocol *p, BOOL isRequiredMethod, BOOL isInstanceMethod, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the specified property of a given protocol. + * + * @param proto A protocol. + * @param name The name of a property. + * @param isRequiredProperty \c YES searches for a required property, \c NO searches for an optional property. + * @param isInstanceProperty \c YES searches for an instance property, \c NO searches for a class property. + * + * @return The property specified by \e name, \e isRequiredProperty, and \e isInstanceProperty for \e proto, + * or \c NULL if none of \e proto's properties meets the specification. + */ +OBJC_EXPORT objc_property_t protocol_getProperty(Protocol *proto, const char *name, BOOL isRequiredProperty, BOOL isInstanceProperty) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns an array of the required instance properties declared by a protocol. + * + * @note Identical to + * \code + * protocol_copyPropertyList2(proto, outCount, YES, YES); + * \endcode + */ +OBJC_EXPORT objc_property_t *protocol_copyPropertyList(Protocol *proto, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns an array of properties declared by a protocol. + * + * @param proto A protocol. + * @param outCount Upon return, contains the number of elements in the returned array. + * @param isRequiredProperty \c YES returns required properties, \c NO returns optional properties. + * @param isInstanceProperty \c YES returns instance properties, \c NO returns class properties. + * + * @return A C array of pointers of type \c objc_property_t describing the properties declared by \e proto. + * Any properties declared by other protocols adopted by this protocol are not included. The array contains + * \c *outCount pointers followed by a \c NULL terminator. You must free the array with \c free(). + * If the protocol declares no matching properties, \c NULL is returned and \c *outCount is \c 0. + */ +OBJC_EXPORT objc_property_t *protocol_copyPropertyList2(Protocol *proto, unsigned int *outCount, BOOL isRequiredProperty, BOOL isInstanceProperty) + OBJC_AVAILABLE(10.12, 10.0, 10.0, 3.0); + +/** + * Returns an array of the protocols adopted by a protocol. + * + * @param proto A protocol. + * @param outCount Upon return, contains the number of elements in the returned array. + * + * @return A C array of protocols adopted by \e proto. The array contains \e *outCount pointers + * followed by a \c NULL terminator. You must free the array with \c free(). + * If the protocol declares no properties, \c NULL is returned and \c *outCount is \c 0. + */ +OBJC_EXPORT Protocol * __unsafe_unretained *protocol_copyProtocolList(Protocol *proto, unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Creates a new protocol instance that cannot be used until registered with + * \c objc_registerProtocol() + * + * @param name The name of the protocol to create. + * + * @return The Protocol instance on success, \c nil if a protocol + * with the same name already exists. + * @note There is no dispose method for this. + */ +OBJC_EXPORT Protocol *objc_allocateProtocol(const char *name) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Registers a newly constructed protocol with the runtime. The protocol + * will be ready for use and is immutable after this. + * + * @param proto The protocol you want to register. + */ +OBJC_EXPORT void objc_registerProtocol(Protocol *proto) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Adds a method to a protocol. The protocol must be under construction. + * + * @param proto The protocol to add a method to. + * @param name The name of the method to add. + * @param types A C string that represents the method signature. + * @param isRequiredMethod YES if the method is not an optional method. + * @param isInstanceMethod YES if the method is an instance method. + */ +OBJC_EXPORT void protocol_addMethodDescription(Protocol *proto, SEL name, const char *types, BOOL isRequiredMethod, BOOL isInstanceMethod) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Adds an incorporated protocol to another protocol. The protocol being + * added to must still be under construction, while the additional protocol + * must be already constructed. + * + * @param proto The protocol you want to add to, it must be under construction. + * @param addition The protocol you want to incorporate into \e proto, it must be registered. + */ +OBJC_EXPORT void protocol_addProtocol(Protocol *proto, Protocol *addition) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Adds a property to a protocol. The protocol must be under construction. + * + * @param proto The protocol to add a property to. + * @param name The name of the property. + * @param attributes An array of property attributes. + * @param attributeCount The number of attributes in \e attributes. + * @param isRequiredProperty YES if the property (accessor methods) is not optional. + * @param isInstanceProperty YES if the property (accessor methods) are instance methods. + * This is the only case allowed fo a property, as a result, setting this to NO will + * not add the property to the protocol at all. + */ +OBJC_EXPORT void protocol_addProperty(Protocol *proto, const char *name, const objc_property_attribute_t *attributes, unsigned int attributeCount, BOOL isRequiredProperty, BOOL isInstanceProperty) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + + +/* Working with Libraries */ + +/** + * Returns the names of all the loaded Objective-C frameworks and dynamic + * libraries. + * + * @param outCount The number of names returned. + * + * @return An array of C strings of names. Must be free()'d by caller. + */ +OBJC_EXPORT const char **objc_copyImageNames(unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the dynamic library name a class originated from. + * + * @param cls The class you are inquiring about. + * + * @return The name of the library containing this class. + */ +OBJC_EXPORT const char *class_getImageName(Class cls) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Returns the names of all the classes within a library. + * + * @param image The library or framework you are inquiring about. + * @param outCount The number of class names returned. + * + * @return An array of C strings representing the class names. + */ +OBJC_EXPORT const char **objc_copyClassNamesForImage(const char *image, + unsigned int *outCount) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + + +/* Working with Selectors */ + +/** + * Returns the name of the method specified by a given selector. + * + * @param sel A pointer of type \c SEL. Pass the selector whose name you wish to determine. + * + * @return A C string indicating the name of the selector. + */ +OBJC_EXPORT const char *sel_getName(SEL sel) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Registers a method name with the Objective-C runtime system. + * + * @param str A pointer to a C string. Pass the name of the method you wish to register. + * + * @return A pointer of type SEL specifying the selector for the named method. + * + * @note The implementation of this method is identical to the implementation of \c sel_registerName. + * @note Prior to OS X version 10.0, this method tried to find the selector mapped to the given name + * and returned \c NULL if the selector was not found. This was changed for safety, because it was + * observed that many of the callers of this function did not check the return value for \c NULL. + */ +OBJC_EXPORT SEL sel_getUid(const char *str) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Registers a method with the Objective-C runtime system, maps the method + * name to a selector, and returns the selector value. + * + * @param str A pointer to a C string. Pass the name of the method you wish to register. + * + * @return A pointer of type SEL specifying the selector for the named method. + * + * @note You must register a method name with the Objective-C runtime system to obtain the + * method’s selector before you can add the method to a class definition. If the method name + * has already been registered, this function simply returns the selector. + */ +OBJC_EXPORT SEL sel_registerName(const char *str) + OBJC_AVAILABLE(10.0, 2.0, 9.0, 1.0); + +/** + * Returns a Boolean value that indicates whether two selectors are equal. + * + * @param lhs The selector to compare with rhs. + * @param rhs The selector to compare with lhs. + * + * @return \c YES if \e lhs and \e rhs are equal, otherwise \c NO. + * + * @note sel_isEqual is equivalent to ==. + */ +OBJC_EXPORT BOOL sel_isEqual(SEL lhs, SEL rhs) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + + +/* Objective-C Language Features */ + +/** + * This function is inserted by the compiler when a mutation + * is detected during a foreach iteration. It gets called + * when a mutation occurs, and the enumerationMutationHandler + * is enacted if it is set up. A fatal error occurs if a handler is not set up. + * + * @param obj The object being mutated. + * + */ +OBJC_EXPORT void objc_enumerationMutation(id obj) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Sets the current mutation handler. + * + * @param handler Function pointer to the new mutation handler. + */ +OBJC_EXPORT void objc_setEnumerationMutationHandler(void (*handler)(id)) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Set the function to be called by objc_msgForward. + * + * @param fwd Function to be jumped to by objc_msgForward. + * @param fwd_stret Function to be jumped to by objc_msgForward_stret. + * + * @see message.h::_objc_msgForward + */ +OBJC_EXPORT void objc_setForwardHandler(void *fwd, void *fwd_stret) + OBJC_AVAILABLE(10.5, 2.0, 9.0, 1.0); + +/** + * Creates a pointer to a function that will call the block + * when the method is called. + * + * @param block The block that implements this method. Its signature should + * be: method_return_type ^(id self, method_args...). + * The selector is not available as a parameter to this block. + * The block is copied with \c Block_copy(). + * + * @return The IMP that calls this block. Must be disposed of with + * \c imp_removeBlock. + */ +OBJC_EXPORT IMP imp_implementationWithBlock(id block) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Return the block associated with an IMP that was created using + * \c imp_implementationWithBlock. + * + * @param anImp The IMP that calls this block. + * + * @return The block called by \e anImp. + */ +OBJC_EXPORT id imp_getBlock(IMP anImp) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * Disassociates a block from an IMP that was created using + * \c imp_implementationWithBlock and releases the copy of the + * block that was created. + * + * @param anImp An IMP that was created using \c imp_implementationWithBlock. + * + * @return YES if the block was released successfully, NO otherwise. + * (For example, the block might not have been used to create an IMP previously). + */ +OBJC_EXPORT BOOL imp_removeBlock(IMP anImp) + OBJC_AVAILABLE(10.7, 4.3, 9.0, 1.0); + +/** + * This loads the object referenced by a weak pointer and returns it, after + * retaining and autoreleasing the object to ensure that it stays alive + * long enough for the caller to use it. This function would be used + * anywhere a __weak variable is used in an expression. + * + * @param location The weak pointer address + * + * @return The object pointed to by \e location, or \c nil if \e location is \c nil. + */ +OBJC_EXPORT id objc_loadWeak(id *location) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + +/** + * This function stores a new value into a __weak variable. It would + * be used anywhere a __weak variable is the target of an assignment. + * + * @param location The address of the weak pointer itself + * @param obj The new object this weak ptr should now point to + * + * @return The value stored into \e location, i.e. \e obj + */ +OBJC_EXPORT id objc_storeWeak(id *location, id obj) + OBJC_AVAILABLE(10.7, 5.0, 9.0, 1.0); + + +/* Associative References */ + +/** + * Policies related to associative references. + * These are options to objc_setAssociatedObject() + */ +typedef OBJC_ENUM(uintptr_t, objc_AssociationPolicy) { + OBJC_ASSOCIATION_ASSIGN = 0, /**< Specifies a weak reference to the associated object. */ + OBJC_ASSOCIATION_RETAIN_NONATOMIC = 1, /**< Specifies a strong reference to the associated object. + * The association is not made atomically. */ + OBJC_ASSOCIATION_COPY_NONATOMIC = 3, /**< Specifies that the associated object is copied. + * The association is not made atomically. */ + OBJC_ASSOCIATION_RETAIN = 01401, /**< Specifies a strong reference to the associated object. + * The association is made atomically. */ + OBJC_ASSOCIATION_COPY = 01403 /**< Specifies that the associated object is copied. + * The association is made atomically. */ +}; + +/** + * Sets an associated value for a given object using a given key and association policy. + * + * @param object The source object for the association. + * @param key The key for the association. + * @param value The value to associate with the key key for object. Pass nil to clear an existing association. + * @param policy The policy for the association. For possible values, see “Associative Object Behaviors.” + * + * @see objc_setAssociatedObject + * @see objc_removeAssociatedObjects + */ +OBJC_EXPORT void objc_setAssociatedObject(id object, const void *key, id value, objc_AssociationPolicy policy) + OBJC_AVAILABLE(10.6, 3.1, 9.0, 1.0); + +/** + * Returns the value associated with a given object for a given key. + * + * @param object The source object for the association. + * @param key The key for the association. + * + * @return The value associated with the key \e key for \e object. + * + * @see objc_setAssociatedObject + */ +OBJC_EXPORT id objc_getAssociatedObject(id object, const void *key) + OBJC_AVAILABLE(10.6, 3.1, 9.0, 1.0); + +/** + * Removes all associations for a given object. + * + * @param object An object that maintains associated objects. + * + * @note The main purpose of this function is to make it easy to return an object + * to a "pristine state”. You should not use this function for general removal of + * associations from objects, since it also removes associations that other clients + * may have added to the object. Typically you should use \c objc_setAssociatedObject + * with a nil value to clear an association. + * + * @see objc_setAssociatedObject + * @see objc_getAssociatedObject + */ +OBJC_EXPORT void objc_removeAssociatedObjects(id object) + OBJC_AVAILABLE(10.6, 3.1, 9.0, 1.0); + + +#define _C_ID '@' +#define _C_CLASS '#' +#define _C_SEL ':' +#define _C_CHR 'c' +#define _C_UCHR 'C' +#define _C_SHT 's' +#define _C_USHT 'S' +#define _C_INT 'i' +#define _C_UINT 'I' +#define _C_LNG 'l' +#define _C_ULNG 'L' +#define _C_LNG_LNG 'q' +#define _C_ULNG_LNG 'Q' +#define _C_FLT 'f' +#define _C_DBL 'd' +#define _C_BFLD 'b' +#define _C_BOOL 'B' +#define _C_VOID 'v' +#define _C_UNDEF '?' +#define _C_PTR '^' +#define _C_CHARPTR '*' +#define _C_ATOM '%' +#define _C_ARY_B '[' +#define _C_ARY_E ']' +#define _C_UNION_B '(' +#define _C_UNION_E ')' +#define _C_STRUCT_B '{' +#define _C_STRUCT_E '}' +#define _C_VECTOR '!' +#define _C_CONST 'r' + + +/* Obsolete types */ + +#if !__OBJC2__ + +#define CLS_GETINFO(cls,infomask) ((cls)->info & (infomask)) +#define CLS_SETINFO(cls,infomask) ((cls)->info |= (infomask)) + +// class is not a metaclass +#define CLS_CLASS 0x1 +// class is a metaclass +#define CLS_META 0x2 +// class's +initialize method has completed +#define CLS_INITIALIZED 0x4 +// class is posing +#define CLS_POSING 0x8 +// unused +#define CLS_MAPPED 0x10 +// class and subclasses need cache flush during image loading +#define CLS_FLUSH_CACHE 0x20 +// method cache should grow when full +#define CLS_GROW_CACHE 0x40 +// unused +#define CLS_NEED_BIND 0x80 +// methodLists is array of method lists +#define CLS_METHOD_ARRAY 0x100 +// the JavaBridge constructs classes with these markers +#define CLS_JAVA_HYBRID 0x200 +#define CLS_JAVA_CLASS 0x400 +// thread-safe +initialize +#define CLS_INITIALIZING 0x800 +// bundle unloading +#define CLS_FROM_BUNDLE 0x1000 +// C++ ivar support +#define CLS_HAS_CXX_STRUCTORS 0x2000 +// Lazy method list arrays +#define CLS_NO_METHOD_ARRAY 0x4000 +// +load implementation +#define CLS_HAS_LOAD_METHOD 0x8000 +// objc_allocateClassPair API +#define CLS_CONSTRUCTING 0x10000 +// class compiled with bigger class structure +#define CLS_EXT 0x20000 + + +struct objc_method_description_list { + int count; + struct objc_method_description list[1]; +}; + + +struct objc_protocol_list { + struct objc_protocol_list *next; + long count; + __unsafe_unretained Protocol *list[1]; +}; + + +struct objc_category { + char *category_name OBJC2_UNAVAILABLE; + char *class_name OBJC2_UNAVAILABLE; + struct objc_method_list *instance_methods OBJC2_UNAVAILABLE; + struct objc_method_list *class_methods OBJC2_UNAVAILABLE; + struct objc_protocol_list *protocols OBJC2_UNAVAILABLE; +} OBJC2_UNAVAILABLE; + + +struct objc_ivar { + char *ivar_name OBJC2_UNAVAILABLE; + char *ivar_type OBJC2_UNAVAILABLE; + int ivar_offset OBJC2_UNAVAILABLE; +#ifdef __LP64__ + int space OBJC2_UNAVAILABLE; +#endif +} OBJC2_UNAVAILABLE; + +struct objc_ivar_list { + int ivar_count OBJC2_UNAVAILABLE; +#ifdef __LP64__ + int space OBJC2_UNAVAILABLE; +#endif + /* variable length structure */ + struct objc_ivar ivar_list[1] OBJC2_UNAVAILABLE; +} OBJC2_UNAVAILABLE; + + +struct objc_method { + SEL method_name OBJC2_UNAVAILABLE; + char *method_types OBJC2_UNAVAILABLE; + IMP method_imp OBJC2_UNAVAILABLE; +} OBJC2_UNAVAILABLE; + +struct objc_method_list { + struct objc_method_list *obsolete OBJC2_UNAVAILABLE; + + int method_count OBJC2_UNAVAILABLE; +#ifdef __LP64__ + int space OBJC2_UNAVAILABLE; +#endif + /* variable length structure */ + struct objc_method method_list[1] OBJC2_UNAVAILABLE; +} OBJC2_UNAVAILABLE; + + +typedef struct objc_symtab *Symtab OBJC2_UNAVAILABLE; + +struct objc_symtab { + unsigned long sel_ref_cnt OBJC2_UNAVAILABLE; + SEL *refs OBJC2_UNAVAILABLE; + unsigned short cls_def_cnt OBJC2_UNAVAILABLE; + unsigned short cat_def_cnt OBJC2_UNAVAILABLE; + void *defs[1] /* variable size */ OBJC2_UNAVAILABLE; +} OBJC2_UNAVAILABLE; + + +typedef struct objc_cache *Cache OBJC2_UNAVAILABLE; + +#define CACHE_BUCKET_NAME(B) ((B)->method_name) +#define CACHE_BUCKET_IMP(B) ((B)->method_imp) +#define CACHE_BUCKET_VALID(B) (B) +#ifndef __LP64__ +#define CACHE_HASH(sel, mask) (((uintptr_t)(sel)>>2) & (mask)) +#else +#define CACHE_HASH(sel, mask) (((unsigned int)((uintptr_t)(sel)>>3)) & (mask)) +#endif +struct objc_cache { + unsigned int mask /* total = mask + 1 */ OBJC2_UNAVAILABLE; + unsigned int occupied OBJC2_UNAVAILABLE; + Method buckets[1] OBJC2_UNAVAILABLE; +}; + + +typedef struct objc_module *Module OBJC2_UNAVAILABLE; + +struct objc_module { + unsigned long version OBJC2_UNAVAILABLE; + unsigned long size OBJC2_UNAVAILABLE; + const char *name OBJC2_UNAVAILABLE; + Symtab symtab OBJC2_UNAVAILABLE; +} OBJC2_UNAVAILABLE; + +#else + +struct objc_method_list; + +#endif + + +/* Obsolete functions */ + +OBJC_EXPORT IMP class_lookupMethod(Class cls, SEL sel) + __OSX_DEPRECATED(10.0, 10.5, "use class_getMethodImplementation instead") + __IOS_DEPRECATED(2.0, 2.0, "use class_getMethodImplementation instead") + __TVOS_DEPRECATED(9.0, 9.0, "use class_getMethodImplementation instead") + __WATCHOS_DEPRECATED(1.0, 1.0, "use class_getMethodImplementation instead"); +OBJC_EXPORT BOOL class_respondsToMethod(Class cls, SEL sel) + __OSX_DEPRECATED(10.0, 10.5, "use class_respondsToSelector instead") + __IOS_DEPRECATED(2.0, 2.0, "use class_respondsToSelector instead") + __TVOS_DEPRECATED(9.0, 9.0, "use class_respondsToSelector instead") + __WATCHOS_DEPRECATED(1.0, 1.0, "use class_respondsToSelector instead"); +OBJC_EXPORT void _objc_flush_caches(Class cls) + __OSX_DEPRECATED(10.0, 10.5, "not recommended") + __IOS_DEPRECATED(2.0, 2.0, "not recommended") + __TVOS_DEPRECATED(9.0, 9.0, "not recommended") + __WATCHOS_DEPRECATED(1.0, 1.0, "not recommended"); + +OBJC_EXPORT id object_copyFromZone(id anObject, size_t nBytes, void *z) + __OSX_DEPRECATED(10.0, 10.5, "use object_copy instead") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE + OBJC_ARC_UNAVAILABLE; +OBJC_EXPORT id object_realloc(id anObject, size_t nBytes) OBJC2_UNAVAILABLE; +OBJC_EXPORT id object_reallocFromZone(id anObject, size_t nBytes, void *z) OBJC2_UNAVAILABLE; + +#define OBSOLETE_OBJC_GETCLASSES 1 +OBJC_EXPORT void *objc_getClasses(void) OBJC2_UNAVAILABLE; +OBJC_EXPORT void objc_addClass(Class myClass) OBJC2_UNAVAILABLE; +OBJC_EXPORT void objc_setClassHandler(int (*)(const char *)) OBJC2_UNAVAILABLE; +OBJC_EXPORT void objc_setMultithreaded (BOOL flag) OBJC2_UNAVAILABLE; + +OBJC_EXPORT id class_createInstanceFromZone(Class, size_t idxIvars, void *z) + __OSX_DEPRECATED(10.0, 10.5, "use class_createInstance instead") + __IOS_UNAVAILABLE __TVOS_UNAVAILABLE __WATCHOS_UNAVAILABLE + OBJC_ARC_UNAVAILABLE; + +OBJC_EXPORT void class_addMethods(Class, struct objc_method_list *) OBJC2_UNAVAILABLE; +OBJC_EXPORT void class_removeMethods(Class, struct objc_method_list *) OBJC2_UNAVAILABLE; +OBJC_EXPORT void _objc_resolve_categories_for_class(Class cls) OBJC2_UNAVAILABLE; + +OBJC_EXPORT Class class_poseAs(Class imposter, Class original) OBJC2_UNAVAILABLE; + +OBJC_EXPORT unsigned int method_getSizeOfArguments(Method m) OBJC2_UNAVAILABLE; +OBJC_EXPORT unsigned method_getArgumentInfo(struct objc_method *m, int arg, const char **type, int *offset) OBJC2_UNAVAILABLE; + +OBJC_EXPORT Class objc_getOrigClass(const char *name) OBJC2_UNAVAILABLE; +#define OBJC_NEXT_METHOD_LIST 1 +OBJC_EXPORT struct objc_method_list *class_nextMethodList(Class, void **) OBJC2_UNAVAILABLE; +// usage for nextMethodList +// +// void *iterator = 0; +// struct objc_method_list *mlist; +// while ( mlist = class_nextMethodList( cls, &iterator ) ) +// ; + +OBJC_EXPORT id (*_alloc)(Class, size_t) OBJC2_UNAVAILABLE; +OBJC_EXPORT id (*_copy)(id, size_t) OBJC2_UNAVAILABLE; +OBJC_EXPORT id (*_realloc)(id, size_t) OBJC2_UNAVAILABLE; +OBJC_EXPORT id (*_dealloc)(id) OBJC2_UNAVAILABLE; +OBJC_EXPORT id (*_zoneAlloc)(Class, size_t, void *) OBJC2_UNAVAILABLE; +OBJC_EXPORT id (*_zoneRealloc)(id, size_t, void *) OBJC2_UNAVAILABLE; +OBJC_EXPORT id (*_zoneCopy)(id, size_t, void *) OBJC2_UNAVAILABLE; +OBJC_EXPORT void (*_error)(id, const char *, va_list) OBJC2_UNAVAILABLE; + +#endif diff --git a/unexported_symbols b/unexported_symbols new file mode 100644 index 0000000..73f77a8 --- /dev/null +++ b/unexported_symbols @@ -0,0 +1,17 @@ +.objc_class_name___IncompleteProtocol +__Znam +__ZnamRKSt9nothrow_t +__Znwm +__ZnwmRKSt9nothrow_t +__ZdaPv +__ZdaPvRKSt9nothrow_t +__ZdlPv +__ZdlPvRKSt9nothrow_t +__ZTISt9bad_alloc +__ZTISt9exception +__ZTISt11logic_error +__ZTISt12length_error +__ZTSSt9bad_alloc +__ZTSSt9exception +__ZTSSt11logic_error +__ZTSSt12length_error diff --git a/version.bat b/version.bat new file mode 100755 index 0000000..7df1c3a --- /dev/null +++ b/version.bat @@ -0,0 +1,29 @@ +:: version.bat +:: Writes version numbers from B&I into version.h for use by version.rc. + +@ECHO OFF + +:: Set default values for environment variables if not set by B&I +IF "%OBJROOT%"=="" SET OBJROOT=. +IF "%RC_PROJECTSOURCEVERSION%"=="" SET RC_PROJECTSOURCEVERSION=0.0 +IF "%RC_PROJECTBUILDVERSION%"=="" SET RC_PROJECTBUILDVERSION=0 + +:: Get version numbers from environment variables +SET major=1 +SET patch=0 +FOR /F "tokens=1* eol= delims=." %%i IN ("%RC_PROJECTSOURCEVERSION%") DO ( + SET minor=%%i + IF NOT "%%j"=="" SET patch=%%j +) +SET build=%RC_PROJECTBUILDVERSION% + +ECHO version %major% . %minor% . %patch% . %build% + +:: Write version.h +ECHO // This file is automatically generated by version.bat. > "%OBJROOT%\version.h" +ECHO // DO NOT EDIT >> "%OBJROOT%\version.h" +ECHO #define major %major% >> "%OBJROOT%\version.h" +ECHO #define minor %minor% >> "%OBJROOT%\version.h" +ECHO #define patch %patch% >> "%OBJROOT%\version.h" +ECHO #define build %build% >> "%OBJROOT%\version.h" +ECHO #define string "%major%,%minor%,%patch%,%build%" >> "%OBJROOT%\version.h" diff --git a/version.rc b/version.rc new file mode 100644 index 0000000..709d0a4 --- /dev/null +++ b/version.rc @@ -0,0 +1,38 @@ +#include "Winver.h" + +// built by version.bat; sets variables major, minor, patch, build, string +#include "version.h" + +VS_VERSION_INFO VERSIONINFO + FILEVERSION major,minor,patch,build + PRODUCTVERSION major,minor,patch,build + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS VS_FF_DEBUG +#else + FILEFLAGS 0x0L +#endif + FILEOS VOS_NT_WINDOWS32 + FILETYPE VFT_DLL + FILESUBTYPE VFT2_UNKNOWN +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "CompanyName", "Apple Inc." + VALUE "FileDescription", "Objective-C Runtime Library" + VALUE "FileVersion", string + VALUE "ProductVersion", string + VALUE "ProductName", "objc4" + VALUE "InternalName", "objc4" + VALUE "LegalCopyright", "Copyright (C) 2007-2009, Apple Inc." + VALUE "OriginalFilename", "objc.dll" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END +