mirror of
https://github.com/darlinghq/darling-libobjc2.git
synced 2025-02-17 07:07:57 +00:00
Added support for associative references. Modified sync code to use this.
This commit is contained in:
parent
d51500184c
commit
e198597bd3
@ -14,6 +14,7 @@ libobjc_VERSION = 4
|
||||
libobjc_OBJC_FILES = \
|
||||
NSBlocks.m\
|
||||
Protocol2.m\
|
||||
associate.m\
|
||||
blocks_runtime.m\
|
||||
mutation.m\
|
||||
properties.m\
|
||||
|
275
associate.m
Normal file
275
associate.m
Normal file
@ -0,0 +1,275 @@
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include "objc/runtime.h"
|
||||
#include "nsobject.h"
|
||||
#include "spinlock.h"
|
||||
#include "class.h"
|
||||
#include "dtable.h"
|
||||
#include "selector.h"
|
||||
|
||||
/**
|
||||
* A single associative reference. Contains the key, value, and association
|
||||
* policy.
|
||||
*/
|
||||
struct reference
|
||||
{
|
||||
/**
|
||||
* The key used for identifying this object. Opaque pointer, should be set
|
||||
* to 0 when this slot is unused.
|
||||
*/
|
||||
void *key;
|
||||
/**
|
||||
* The associated object. Note, if the policy is assign then this may be
|
||||
* some other type of pointer...
|
||||
*/
|
||||
void *object;
|
||||
/**
|
||||
* Association policy.
|
||||
*/
|
||||
uintptr_t policy;
|
||||
};
|
||||
|
||||
#define REFERENCE_LIST_SIZE 10
|
||||
|
||||
/**
|
||||
* Linked list of references associated with an object. We assume that there
|
||||
* won't be very many, so we don't bother with a proper hash table, and just
|
||||
* iterate over a list.
|
||||
*/
|
||||
struct reference_list
|
||||
{
|
||||
/**
|
||||
* Next group of references. This is only ever used if we have more than
|
||||
* 10 references associated with an object, which seems highly unlikely.
|
||||
*/
|
||||
struct reference_list *next;
|
||||
/**
|
||||
* Array of references.
|
||||
*/
|
||||
struct reference list[REFERENCE_LIST_SIZE];
|
||||
};
|
||||
enum
|
||||
{
|
||||
OBJC_ASSOCIATION_ATOMIC = 0x300,
|
||||
};
|
||||
|
||||
static BOOL isAtomic(uintptr_t policy)
|
||||
{
|
||||
return (policy & OBJC_ASSOCIATION_ATOMIC) == OBJC_ASSOCIATION_ATOMIC;
|
||||
}
|
||||
|
||||
static struct reference* findReference(struct reference_list *list, void *key)
|
||||
{
|
||||
if (NULL == list) { return NULL; }
|
||||
|
||||
for (int i=0 ; i<REFERENCE_LIST_SIZE ; i++)
|
||||
{
|
||||
if (list->list[i].key == key)
|
||||
{
|
||||
return &list->list[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
static void cleanupReferenceList(struct reference_list *list)
|
||||
{
|
||||
if (NULL == list) { return; }
|
||||
|
||||
for (int i=0 ; i<REFERENCE_LIST_SIZE ; i++)
|
||||
{
|
||||
struct reference *r = &list->list[i];
|
||||
if (0 != r->key)
|
||||
{
|
||||
r->key = 0;
|
||||
if (OBJC_ASSOCIATION_ASSIGN != r->policy)
|
||||
{
|
||||
// Full barrier - ensure that we've zero'd the key before doing
|
||||
// this!
|
||||
__sync_synchronize();
|
||||
[(id)r->object release];
|
||||
}
|
||||
r->object = 0;
|
||||
r->policy = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void setReference(struct reference_list *list,
|
||||
void *key,
|
||||
void *obj,
|
||||
uintptr_t policy)
|
||||
{
|
||||
switch (policy)
|
||||
{
|
||||
// Ignore any unknown association policies
|
||||
default: return;
|
||||
case OBJC_ASSOCIATION_COPY_NONATOMIC:
|
||||
case OBJC_ASSOCIATION_COPY:
|
||||
obj = [(id)obj copy];
|
||||
break;
|
||||
case OBJC_ASSOCIATION_RETAIN_NONATOMIC:
|
||||
case OBJC_ASSOCIATION_RETAIN:
|
||||
obj = [(id)obj retain];
|
||||
case OBJC_ASSOCIATION_ASSIGN:
|
||||
break;
|
||||
}
|
||||
// While inserting into the list, we need to lock it temporarily.
|
||||
int *lock = lock_for_pointer(list);
|
||||
lock_spinlock(lock);
|
||||
struct reference *r = findReference(list, key);
|
||||
// If there's an existing reference, then we can update it, otherwise we
|
||||
// have to install a new one
|
||||
if (NULL == r)
|
||||
{
|
||||
// Search for an unused slot
|
||||
r = findReference(list, 0);
|
||||
if (NULL == r)
|
||||
{
|
||||
struct reference_list *l = list;
|
||||
|
||||
while (NULL != l->next) { l = l->next; }
|
||||
|
||||
l->next = calloc(1, sizeof(struct reference_list));
|
||||
r = &l->next->list[0];
|
||||
}
|
||||
r->key = key;
|
||||
}
|
||||
unlock_spinlock(lock);
|
||||
// Now we only need to lock if the old or new property is atomic
|
||||
BOOL needLock = isAtomic(r->policy) || isAtomic(policy);
|
||||
if (needLock)
|
||||
{
|
||||
lock = lock_for_pointer(r);
|
||||
lock_spinlock(lock);
|
||||
}
|
||||
r->policy = policy;
|
||||
id old = r->object;
|
||||
r->object = obj;
|
||||
if (OBJC_ASSOCIATION_ASSIGN != r->policy)
|
||||
{
|
||||
[old release];
|
||||
}
|
||||
if (needLock)
|
||||
{
|
||||
unlock_spinlock(lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void deallocHiddenClass(id obj, SEL _cmd);
|
||||
|
||||
static inline Class findHiddenClass(id obj)
|
||||
{
|
||||
Class cls = obj->isa;
|
||||
while (Nil != cls &&
|
||||
!objc_test_class_flag(cls, objc_class_flag_assoc_class))
|
||||
{
|
||||
cls = class_getSuperclass(cls);
|
||||
}
|
||||
return cls;
|
||||
}
|
||||
|
||||
static Class allocateHiddenClass(Class superclass)
|
||||
{
|
||||
Class newClass =
|
||||
calloc(1, sizeof(struct objc_class) + sizeof(struct reference_list));
|
||||
|
||||
if (Nil == newClass) { return Nil; }
|
||||
|
||||
// Set up the new class
|
||||
newClass->isa = superclass->isa;
|
||||
// Set the superclass pointer to the name. The runtime will fix this when
|
||||
// the class links are resolved.
|
||||
newClass->name = superclass->name;
|
||||
newClass->info = objc_class_flag_resolved | objc_class_flag_initialized |
|
||||
objc_class_flag_class | objc_class_flag_user_created |
|
||||
objc_class_flag_new_abi | objc_class_flag_hidden_class |
|
||||
objc_class_flag_assoc_class;
|
||||
newClass->super_class = superclass;
|
||||
newClass->dtable = objc_copy_dtable_for_class(superclass->dtable, newClass);
|
||||
newClass->instance_size = superclass->instance_size;
|
||||
if (objc_test_class_flag(superclass, objc_class_flag_meta))
|
||||
{
|
||||
newClass->info |= objc_class_flag_meta;
|
||||
}
|
||||
|
||||
return newClass;
|
||||
}
|
||||
|
||||
static inline Class initHiddenClassForObject(id obj)
|
||||
{
|
||||
Class hiddenClass = allocateHiddenClass(obj->isa);
|
||||
if (class_isMetaClass(obj->isa))
|
||||
{
|
||||
obj->isa = hiddenClass;
|
||||
}
|
||||
else
|
||||
{
|
||||
const char *types =
|
||||
method_getTypeEncoding(class_getInstanceMethod(obj->isa,
|
||||
SELECTOR(dealloc)));
|
||||
class_addMethod(hiddenClass, SELECTOR(dealloc), (IMP)deallocHiddenClass,
|
||||
types);
|
||||
obj->isa = hiddenClass;
|
||||
}
|
||||
return hiddenClass;
|
||||
}
|
||||
|
||||
static void deallocHiddenClass(id obj, SEL _cmd)
|
||||
{
|
||||
Class hiddenClass = findHiddenClass(obj);
|
||||
Class realClass = class_getSuperclass(hiddenClass);
|
||||
// Call the real -dealloc method (this ordering is required in case the
|
||||
// user does @synchronized(self) in -dealloc)
|
||||
struct objc_super super = {obj, realClass};
|
||||
objc_msg_lookup_super(&super, SELECTOR(dealloc))(obj, SELECTOR(dealloc));
|
||||
// After calling [super dealloc], the object will no longer exist.
|
||||
// Free the hidden
|
||||
struct reference_list *list = object_getIndexedIvars(hiddenClass);
|
||||
cleanupReferenceList(list);
|
||||
|
||||
// FIXME: Low memory profile.
|
||||
SparseArrayDestroy(hiddenClass->dtable);
|
||||
|
||||
// Free the class
|
||||
free(hiddenClass);
|
||||
}
|
||||
|
||||
void objc_setAssociatedObject(id object,
|
||||
void *key,
|
||||
id value,
|
||||
objc_AssociationPolicy policy)
|
||||
{
|
||||
Class hiddenClass = findHiddenClass(object);
|
||||
if (NULL == hiddenClass)
|
||||
{
|
||||
int *lock = lock_for_pointer(object);
|
||||
lock_spinlock(lock);
|
||||
hiddenClass = findHiddenClass(object);
|
||||
if (NULL == hiddenClass)
|
||||
{
|
||||
hiddenClass = initHiddenClassForObject(object);
|
||||
}
|
||||
unlock_spinlock(lock);
|
||||
}
|
||||
struct reference_list *list = object_getIndexedIvars(hiddenClass);
|
||||
setReference(list, key, value, policy);
|
||||
}
|
||||
|
||||
id objc_getAssociatedObject(id object, void *key)
|
||||
{
|
||||
Class hiddenClass = findHiddenClass(object);
|
||||
if (NULL == hiddenClass) { return nil; }
|
||||
struct reference_list *list = object_getIndexedIvars(hiddenClass);
|
||||
struct reference *r = findReference(list, key);
|
||||
return r ? r->object : nil;
|
||||
}
|
||||
|
||||
|
||||
void objc_removeAssociatedObjects(id object)
|
||||
{
|
||||
Class hiddenClass = findHiddenClass(object);
|
||||
if (NULL == hiddenClass) { return; }
|
||||
struct reference_list *list = object_getIndexedIvars(hiddenClass);
|
||||
cleanupReferenceList(list);
|
||||
}
|
8
class.h
8
class.h
@ -74,9 +74,9 @@ struct objc_class
|
||||
*/
|
||||
struct objc_protocol_list *protocols;
|
||||
/**
|
||||
* Pointer used by the Boehm GC.
|
||||
* Linked list of extra data attached to this class.
|
||||
*/
|
||||
void *gc_object_type;
|
||||
struct class_annotation *extra_data;
|
||||
/**
|
||||
* New ABI. The following fields are only available with classes compiled to
|
||||
* support the new ABI. You may test whether any given class supports this
|
||||
@ -182,9 +182,9 @@ enum objc_class_flags
|
||||
*/
|
||||
objc_class_flag_hidden_class = (1<<7),
|
||||
/**
|
||||
* This class is a hidden class used to implement @synchronized()
|
||||
* This class is a hidden class used to store associated values.
|
||||
*/
|
||||
objc_class_flag_lock_class = (1<<8)
|
||||
objc_class_flag_assoc_class = (1<<8)
|
||||
};
|
||||
|
||||
static inline void objc_set_class_flag(struct objc_class *aClass,
|
||||
|
157
exports.txt
157
exports.txt
@ -1,157 +0,0 @@
|
||||
_Block_copy
|
||||
_Block_object_assign
|
||||
_Block_object_dispose
|
||||
_Block_release
|
||||
_NSConcreteGlobalBlock
|
||||
_NSConcreteStackBlock
|
||||
__gnu_objc_personality_v0
|
||||
__objc_class_name_Object
|
||||
__objc_class_name_Protocol
|
||||
__objc_class_name_Protocol2
|
||||
__objc_class_name___ObjC_Protocol_Holder_Ugly_Hack
|
||||
__objc_exec_class
|
||||
__objc_id_typeinfo
|
||||
__objc_msg_forward2
|
||||
__objc_msg_forward3
|
||||
__objc_responds_to
|
||||
__objc_runtime_mutex
|
||||
__objc_runtime_threads_alive
|
||||
__objc_sync_init
|
||||
__objc_uninstalled_dtable
|
||||
__objc_update_dispatch_table_for_class
|
||||
_objc_class_for_boxing_foreign_exception
|
||||
_objc_load_callback
|
||||
_objc_lookup_class
|
||||
_objc_selector_type_mismatch
|
||||
_objc_unexpected_exception
|
||||
block_getType_np
|
||||
class_addIvar
|
||||
class_addMethod
|
||||
class_addProtocol
|
||||
class_conformsToProtocol
|
||||
class_copyIvarList
|
||||
class_copyMethodList
|
||||
class_copyPropertyList
|
||||
class_copyProtocolList
|
||||
class_createInstance
|
||||
class_getClassMethod
|
||||
class_getClassVariable
|
||||
class_getInstanceMethod
|
||||
class_getInstanceSize
|
||||
class_getInstanceVariable
|
||||
class_getIvarLayout
|
||||
class_getMethodImplementation
|
||||
class_getMethodImplementation_stret
|
||||
class_getName
|
||||
class_getProperty
|
||||
class_getSuperclass
|
||||
class_getVersion
|
||||
class_getWeakIvarLayout
|
||||
class_isMetaClass
|
||||
class_replaceMethod
|
||||
class_respondsToSelector
|
||||
class_setIvarLayout
|
||||
class_setSuperclass
|
||||
class_setVersion
|
||||
class_setWeakIvarLayout
|
||||
get_imp
|
||||
ivar_getName
|
||||
ivar_getOffset
|
||||
ivar_getTypeEncoding
|
||||
method_copyArgumentType
|
||||
method_copyReturnType
|
||||
method_exchangeImplementations
|
||||
method_getArgumentType
|
||||
method_getImplementation
|
||||
method_getName
|
||||
method_getNumberOfArguments
|
||||
method_getReturnType
|
||||
method_getTypeEncoding
|
||||
method_get_number_of_arguments
|
||||
method_setImplementation
|
||||
objc_aligned_size
|
||||
objc_alignof_type
|
||||
objc_allocateClassPair
|
||||
objc_atomic_malloc
|
||||
objc_calloc
|
||||
objc_check_abi_version
|
||||
objc_create_block_classes_as_subclasses_of
|
||||
objc_disposeClassPair
|
||||
objc_enumerationMutation
|
||||
objc_exception_throw
|
||||
objc_free
|
||||
objc_getClass
|
||||
objc_getClassList
|
||||
objc_getMetaClass
|
||||
objc_getProperty
|
||||
objc_getProtocol
|
||||
objc_getRequiredClass
|
||||
objc_get_class
|
||||
objc_get_meta_class
|
||||
objc_get_slot
|
||||
objc_get_type_qualifiers
|
||||
objc_layout_structure
|
||||
objc_layout_structure_get_info
|
||||
objc_layout_structure_next_member
|
||||
objc_load_buffered_categories
|
||||
objc_load_class
|
||||
objc_lookUpClass
|
||||
objc_lookup_class
|
||||
objc_malloc
|
||||
objc_msg_lookup
|
||||
objc_msg_lookup_sender
|
||||
objc_msg_lookup_super
|
||||
objc_msg_profile
|
||||
objc_msg_sender
|
||||
objc_msg_sendv
|
||||
objc_next_class
|
||||
objc_plane_lookup
|
||||
objc_promoted_size
|
||||
objc_proxy_lookup
|
||||
objc_realloc
|
||||
objc_registerClassPair
|
||||
objc_setProperty
|
||||
objc_sizeof_type
|
||||
objc_skip_argspec
|
||||
objc_skip_type_qualifiers
|
||||
objc_skip_typespec
|
||||
objc_slot_lookup_super
|
||||
objc_sync_enter
|
||||
objc_sync_exit
|
||||
objc_test_capability
|
||||
objc_valloc
|
||||
object_getClass
|
||||
object_getClassName
|
||||
object_getIndexedIvars
|
||||
object_setClass
|
||||
property_getName
|
||||
protocol_conformsToProtocol
|
||||
protocol_copyMethodDescriptionList
|
||||
protocol_copyPropertyList
|
||||
protocol_copyProtocolList
|
||||
protocol_create
|
||||
protocol_for_name
|
||||
protocol_getMethodDescription
|
||||
protocol_getName
|
||||
protocol_getProperty
|
||||
protocol_isEqual
|
||||
sel_copyTypedSelectors_np
|
||||
sel_copyTypes_np
|
||||
sel_eq
|
||||
sel_getName
|
||||
sel_getType_np
|
||||
sel_getUid
|
||||
sel_get_any_typed_uid
|
||||
sel_get_any_uid
|
||||
sel_get_name
|
||||
sel_get_type
|
||||
sel_get_typed_uid
|
||||
sel_get_uid
|
||||
sel_isEqual
|
||||
sel_registerName
|
||||
sel_registerTypedName_np
|
||||
sel_register_name
|
||||
sel_register_typed_name
|
||||
selector_create
|
||||
toy_dispatch_async_f
|
||||
toy_dispatch_queue_create
|
5
loader.c
5
loader.c
@ -11,7 +11,6 @@
|
||||
PRIVATE mutex_t runtime_mutex;
|
||||
LEGACY void *__objc_runtime_mutex = &runtime_mutex;
|
||||
|
||||
void sync_init(void);
|
||||
void init_selector_tables(void);
|
||||
void init_protocol_table(void);
|
||||
void init_class_tables(void);
|
||||
@ -44,10 +43,6 @@ void __objc_exec_class(struct objc_module_abi_8 *module)
|
||||
// call dlopen() or equivalent, and the platform's implementation of
|
||||
// this does not perform any synchronization.
|
||||
INIT_LOCK(runtime_mutex);
|
||||
// Create the lock used to protect the creation of hidden classes by
|
||||
// @synchronized()
|
||||
sync_init();
|
||||
|
||||
// Create the various tables that the runtime needs.
|
||||
init_selector_tables();
|
||||
init_protocol_table();
|
||||
|
10
nsobject.h
Normal file
10
nsobject.h
Normal file
@ -0,0 +1,10 @@
|
||||
/**
|
||||
* Stub declaration of NSObject. Lots of things in the runtime require the
|
||||
*/
|
||||
@interface NSObject
|
||||
-retain;
|
||||
-copy;
|
||||
-(void)release;
|
||||
-autorelease;
|
||||
-(void)dealloc;
|
||||
@end
|
@ -683,6 +683,58 @@ unsigned sel_copyTypedSelectors_np(const char *selName, SEL *const sels, unsigne
|
||||
extern struct objc_slot *objc_msg_lookup_sender(id *receiver, SEL selector, id sender)
|
||||
OBJC_NONPORTABLE;
|
||||
|
||||
/**
|
||||
* Valid values for objc_AssociationPolicy. This is really a bitfield, but
|
||||
* only specific combinations of flags are permitted.
|
||||
*/
|
||||
enum
|
||||
{
|
||||
/**
|
||||
* Perform straight assignment, no message sends.
|
||||
*/
|
||||
OBJC_ASSOCIATION_ASSIGN = 0,
|
||||
/**
|
||||
* Retain the associated object.
|
||||
*/
|
||||
OBJC_ASSOCIATION_RETAIN_NONATOMIC = 1,
|
||||
/**
|
||||
* Copy the associated object, by sending it a -copy message.
|
||||
*/
|
||||
OBJC_ASSOCIATION_COPY_NONATOMIC = 3,
|
||||
/**
|
||||
* Atomic retain.
|
||||
*/
|
||||
OBJC_ASSOCIATION_RETAIN = 0x301,
|
||||
/**
|
||||
* Atomic copy.
|
||||
*/
|
||||
OBJC_ASSOCIATION_COPY = 0x303
|
||||
};
|
||||
/**
|
||||
* Association policy, used when setting associated objects.
|
||||
*/
|
||||
typedef uintptr_t objc_AssociationPolicy;
|
||||
|
||||
/**
|
||||
* Returns an object previously stored by calling objc_setAssociatedObject()
|
||||
* with the same arguments, or nil if none exists.
|
||||
*/
|
||||
id objc_getAssociatedObject(id object, void *key);
|
||||
/**
|
||||
* Associates an object with another. This provides a mechanism for storing
|
||||
* extra state with an object, beyond its declared instance variables. The
|
||||
* pointer used as a key is treated as an opaque value. The best way of
|
||||
* ensuring this is to pass the pointer to a static variable as the key. The
|
||||
* value may be any object, but must respond to -copy or -retain, and -release,
|
||||
* if an association policy of copy or retain is passed as the final argument.
|
||||
*/
|
||||
void objc_setAssociatedObject(id object, void *key, id value, objc_AssociationPolicy policy);
|
||||
/**
|
||||
* Removes all associations from an object.
|
||||
*/
|
||||
void objc_removeAssociatedObjects(id object);
|
||||
|
||||
|
||||
/**
|
||||
* Toggles whether Objective-C objects caught in C++ exception handlers in
|
||||
* Objective-C++ mode should follow Objective-C or C++ semantics. The obvious
|
||||
|
89
properties.m
89
properties.m
@ -5,92 +5,11 @@
|
||||
#include <unistd.h>
|
||||
#include "class.h"
|
||||
#include "properties.h"
|
||||
#include "spinlock.h"
|
||||
#include "visibility.h"
|
||||
#include "nsobject.h"
|
||||
|
||||
#ifdef __MINGW32__
|
||||
#include <windows.h>
|
||||
static unsigned sleep(unsigned seconds)
|
||||
{
|
||||
Sleep(seconds*1000);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Subset of NSObject interface needed for properties.
|
||||
@interface NSObject {}
|
||||
- (id)retain;
|
||||
- (id)copy;
|
||||
- (id)autorelease;
|
||||
- (void)release;
|
||||
@end
|
||||
|
||||
/**
|
||||
* Number of spinlocks. This allocates one page on 32-bit platforms.
|
||||
*/
|
||||
#define spinlock_count (1<<10)
|
||||
const int spinlock_mask = spinlock_count - 1;
|
||||
/**
|
||||
* Integers used as spinlocks for atomic property access.
|
||||
*/
|
||||
static int spinlocks[spinlock_count];
|
||||
/**
|
||||
* Get a spin lock from a pointer. We want to prevent lock contention between
|
||||
* properties in the same object - if someone is stupid enough to be using
|
||||
* atomic property access, they are probably stupid enough to do it for
|
||||
* multiple properties in the same object. We also want to try to avoid
|
||||
* contention between the same property in different objects, so we can't just
|
||||
* use the ivar offset.
|
||||
*/
|
||||
static inline int *lock_for_pointer(void *ptr)
|
||||
{
|
||||
intptr_t hash = (intptr_t)ptr;
|
||||
// Most properties will be pointers, so disregard the lowest few bits
|
||||
hash >>= sizeof(void*) == 4 ? 2 : 8;
|
||||
intptr_t low = hash & spinlock_mask;
|
||||
hash >>= 16;
|
||||
hash |= low;
|
||||
return spinlocks + (hash & spinlock_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlocks the spinlock. This is not an atomic operation. We are only ever
|
||||
* modifying the lowest bit of the spinlock word, so it doesn't matter if this
|
||||
* is two writes because there is no contention among the high bit. There is
|
||||
* no possibility of contention among calls to this, because it may only be
|
||||
* called by the thread owning the spin lock.
|
||||
*/
|
||||
inline static void unlock_spinlock(int *spinlock)
|
||||
{
|
||||
*spinlock = 0;
|
||||
}
|
||||
/**
|
||||
* Attempts to lock a spinlock. This is heavily optimised for the uncontended
|
||||
* case, because property access should (generally) not be contended. In the
|
||||
* uncontended case, this is a single atomic compare and swap instruction and a
|
||||
* branch. Atomic CAS is relatively expensive (can be a pipeline flush, and
|
||||
* may require locking a cache line in a cache-coherent SMP system, but it's a
|
||||
* lot cheaper than a system call).
|
||||
*
|
||||
* If the lock is contended, then we just sleep and then try again after the
|
||||
* other threads have run. Note that there is no upper bound on the potential
|
||||
* running time of this function, which is one of the great many reasons that
|
||||
* using atomic accessors is a terrible idea, but in the common case it should
|
||||
* be very fast.
|
||||
*/
|
||||
inline static void lock_spinlock(int *spinlock)
|
||||
{
|
||||
int count = 0;
|
||||
// Set the spin lock value to 1 if it is 0.
|
||||
while(!__sync_bool_compare_and_swap(spinlock, 0, 1))
|
||||
{
|
||||
count++;
|
||||
if (0 == count % 10)
|
||||
{
|
||||
// If it is already 1, let another thread play with the CPU for a
|
||||
// bit then try again.
|
||||
sleep(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
PRIVATE int spinlocks[spinlock_count];
|
||||
|
||||
/**
|
||||
* Public function for getting a property.
|
||||
|
11
selector.h
11
selector.h
@ -61,4 +61,15 @@ BOOL sel_is_mapped(SEL selector);
|
||||
*/
|
||||
SEL objc_register_selector(SEL aSel);
|
||||
|
||||
/**
|
||||
* SELECTOR() macro to work around the fact that GCC hard-codes the type of
|
||||
* selectors. This is functionally equivalent to @selector(), but it ensures
|
||||
* that the selector has the type that the runtime uses for selectors.
|
||||
*/
|
||||
#ifdef __clang__
|
||||
#define SELECTOR(x) @selector(x)
|
||||
#else
|
||||
#define SELECTOR(x) (SEL)@selector(x)
|
||||
#endif
|
||||
|
||||
#endif // OBJC_SELECTOR_H_INCLUDED
|
||||
|
80
spinlock.h
Normal file
80
spinlock.h
Normal file
@ -0,0 +1,80 @@
|
||||
#ifdef __MINGW32__
|
||||
#include <windows.h>
|
||||
static unsigned sleep(unsigned seconds)
|
||||
{
|
||||
Sleep(seconds*1000);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Number of spinlocks. This allocates one page on 32-bit platforms.
|
||||
*/
|
||||
#define spinlock_count (1<<10)
|
||||
static const int spinlock_mask = spinlock_count - 1;
|
||||
/**
|
||||
* Integers used as spinlocks for atomic property access.
|
||||
*/
|
||||
extern int spinlocks[spinlock_count];
|
||||
/**
|
||||
* Get a spin lock from a pointer. We want to prevent lock contention between
|
||||
* properties in the same object - if someone is stupid enough to be using
|
||||
* atomic property access, they are probably stupid enough to do it for
|
||||
* multiple properties in the same object. We also want to try to avoid
|
||||
* contention between the same property in different objects, so we can't just
|
||||
* use the ivar offset.
|
||||
*/
|
||||
static inline int *lock_for_pointer(void *ptr)
|
||||
{
|
||||
intptr_t hash = (intptr_t)ptr;
|
||||
// Most properties will be pointers, so disregard the lowest few bits
|
||||
hash >>= sizeof(void*) == 4 ? 2 : 8;
|
||||
intptr_t low = hash & spinlock_mask;
|
||||
hash >>= 16;
|
||||
hash |= low;
|
||||
return spinlocks + (hash & spinlock_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlocks the spinlock. This is not an atomic operation. We are only ever
|
||||
* modifying the lowest bit of the spinlock word, so it doesn't matter if this
|
||||
* is two writes because there is no contention among the high bit. There is
|
||||
* no possibility of contention among calls to this, because it may only be
|
||||
* called by the thread owning the spin lock.
|
||||
*/
|
||||
inline static void unlock_spinlock(int *spinlock)
|
||||
{
|
||||
*spinlock = 0;
|
||||
}
|
||||
/**
|
||||
* Attempts to lock a spinlock. This is heavily optimised for the uncontended
|
||||
* case, because property access should (generally) not be contended. In the
|
||||
* uncontended case, this is a single atomic compare and swap instruction and a
|
||||
* branch. Atomic CAS is relatively expensive (can be a pipeline flush, and
|
||||
* may require locking a cache line in a cache-coherent SMP system, but it's a
|
||||
* lot cheaper than a system call).
|
||||
*
|
||||
* If the lock is contended, then we just sleep and then try again after the
|
||||
* other threads have run. Note that there is no upper bound on the potential
|
||||
* running time of this function, which is one of the great many reasons that
|
||||
* using atomic accessors is a terrible idea, but in the common case it should
|
||||
* be very fast.
|
||||
*/
|
||||
inline static void lock_spinlock(int *spinlock)
|
||||
{
|
||||
int count = 0;
|
||||
// Set the spin lock value to 1 if it is 0.
|
||||
while(!__sync_bool_compare_and_swap(spinlock, 0, 1))
|
||||
{
|
||||
count++;
|
||||
if (0 == count % 10)
|
||||
{
|
||||
// If it is already 1, let another thread play with the CPU for a
|
||||
// bit then try again.
|
||||
sleep(0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
153
sync.m
153
sync.m
@ -1,138 +1,65 @@
|
||||
#include "objc/runtime.h"
|
||||
#include "lock.h"
|
||||
#include "class.h"
|
||||
#include "dtable.h"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#ifdef __clang__
|
||||
#define SELECTOR(x) @selector(x)
|
||||
#else
|
||||
#define SELECTOR(x) (SEL)@selector(x)
|
||||
#endif
|
||||
|
||||
int snprintf(char *restrict s, size_t n, const char *restrict format, ...);
|
||||
|
||||
@interface Fake
|
||||
+ (void)dealloc;
|
||||
@interface __ObjCLock
|
||||
{
|
||||
@public
|
||||
id isa;
|
||||
mutex_t lock;
|
||||
}
|
||||
@end
|
||||
@implementation __ObjCLock
|
||||
+ (id)new
|
||||
{
|
||||
__ObjCLock *l = calloc(1, class_getInstanceSize(self));
|
||||
l->isa = self;
|
||||
INIT_LOCK(l->lock);
|
||||
return l;
|
||||
}
|
||||
- (id)retain
|
||||
{
|
||||
return self;
|
||||
}
|
||||
- (void)release
|
||||
{
|
||||
DESTROY_LOCK(&lock);
|
||||
free(self);
|
||||
}
|
||||
@end
|
||||
|
||||
static mutex_t at_sync_init_lock;
|
||||
|
||||
PRIVATE void sync_init(void)
|
||||
{
|
||||
INIT_LOCK(at_sync_init_lock);
|
||||
}
|
||||
|
||||
IMP objc_msg_lookup(id, SEL);
|
||||
|
||||
static void deallocLockClass(id obj, SEL _cmd);
|
||||
|
||||
static inline Class findLockClass(id obj)
|
||||
{
|
||||
struct objc_object object = { obj->isa };
|
||||
while (Nil != object.isa &&
|
||||
!objc_test_class_flag(object.isa, objc_class_flag_lock_class))
|
||||
{
|
||||
object.isa = class_getSuperclass(object.isa);
|
||||
}
|
||||
return object.isa;
|
||||
}
|
||||
|
||||
static Class allocateLockClass(Class superclass)
|
||||
{
|
||||
Class newClass = calloc(1, sizeof(struct objc_class) + sizeof(mutex_t));
|
||||
|
||||
if (Nil == newClass) { return Nil; }
|
||||
|
||||
// Set up the new class
|
||||
newClass->isa = superclass->isa;
|
||||
// Set the superclass pointer to the name. The runtime will fix this when
|
||||
// the class links are resolved.
|
||||
newClass->name = superclass->name;
|
||||
newClass->info = objc_class_flag_resolved | objc_class_flag_initialized |
|
||||
objc_class_flag_class | objc_class_flag_user_created |
|
||||
objc_class_flag_new_abi | objc_class_flag_hidden_class |
|
||||
objc_class_flag_lock_class;
|
||||
newClass->super_class = superclass;
|
||||
newClass->dtable = objc_copy_dtable_for_class(superclass->dtable, newClass);
|
||||
newClass->instance_size = superclass->instance_size;
|
||||
if (objc_test_class_flag(superclass, objc_class_flag_meta))
|
||||
{
|
||||
newClass->info |= objc_class_flag_meta;
|
||||
}
|
||||
mutex_t *lock = object_getIndexedIvars(newClass);
|
||||
INIT_LOCK(*lock);
|
||||
|
||||
return newClass;
|
||||
}
|
||||
|
||||
static inline Class initLockObject(id obj)
|
||||
{
|
||||
Class lockClass = allocateLockClass(obj->isa);
|
||||
if (class_isMetaClass(obj->isa))
|
||||
{
|
||||
obj->isa = lockClass;
|
||||
}
|
||||
else
|
||||
{
|
||||
const char *types =
|
||||
method_getTypeEncoding(class_getInstanceMethod(obj->isa,
|
||||
SELECTOR(dealloc)));
|
||||
class_addMethod(lockClass, SELECTOR(dealloc), (IMP)deallocLockClass,
|
||||
types);
|
||||
obj->isa = lockClass;
|
||||
}
|
||||
|
||||
return lockClass;
|
||||
}
|
||||
|
||||
static void deallocLockClass(id obj, SEL _cmd)
|
||||
{
|
||||
Class lockClass = findLockClass(obj);
|
||||
Class realClass = class_getSuperclass(lockClass);
|
||||
// Call the real -dealloc method (this ordering is required in case the
|
||||
// user does @synchronized(self) in -dealloc)
|
||||
struct objc_super super = {obj, realClass};
|
||||
objc_msg_lookup_super(&super, SELECTOR(dealloc))(obj, SELECTOR(dealloc));
|
||||
// After calling [super dealloc], the object will no longer exist.
|
||||
// Free the lock
|
||||
mutex_t *lock = object_getIndexedIvars(lockClass);
|
||||
DESTROY_LOCK(lock);
|
||||
|
||||
// FIXME: Low memory profile.
|
||||
SparseArrayDestroy(lockClass->dtable);
|
||||
|
||||
// Free the class
|
||||
free(lockClass);
|
||||
}
|
||||
static char key;
|
||||
|
||||
// TODO: This should probably have a special case for classes conforming to the
|
||||
// NSLocking protocol, just sending them a -lock message.
|
||||
int objc_sync_enter(id obj)
|
||||
{
|
||||
Class lockClass = findLockClass(obj);
|
||||
if (Nil == lockClass)
|
||||
__ObjCLock *l = objc_getAssociatedObject(obj, &key);
|
||||
if (nil == l)
|
||||
{
|
||||
LOCK(&at_sync_init_lock);
|
||||
// Test again in case two threads call objc_sync_enter at once
|
||||
lockClass = findLockClass(obj);
|
||||
if (Nil == lockClass)
|
||||
__ObjCLock *lock = [__ObjCLock new];
|
||||
objc_setAssociatedObject(obj, &key, lock, OBJC_ASSOCIATION_RETAIN);
|
||||
l = objc_getAssociatedObject(obj, &key);
|
||||
// If another thread created the lock while we were doing this, then
|
||||
// use their one and free ours
|
||||
if (l != lock)
|
||||
{
|
||||
lockClass = initLockObject(obj);
|
||||
[lock release];
|
||||
}
|
||||
UNLOCK(&at_sync_init_lock);
|
||||
}
|
||||
mutex_t *lock = object_getIndexedIvars(lockClass);
|
||||
LOCK(lock);
|
||||
LOCK(&l->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int objc_sync_exit(id obj)
|
||||
{
|
||||
Class lockClass = findLockClass(obj);
|
||||
mutex_t *lock = object_getIndexedIvars(lockClass);
|
||||
UNLOCK(lock);
|
||||
__ObjCLock *l = objc_getAssociatedObject(obj, &key);
|
||||
if (nil != l)
|
||||
{
|
||||
UNLOCK(&l->lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user