mirror of
https://github.com/darlinghq/darling-libobjc2.git
synced 2024-11-26 21:50:25 +00:00
First pass at implementing Apple-compatible GC using Boehm. Still needs some tidying, but the following work:
- __strong pointers, preventing objects from being freed _ __weak pointers are automatically freed when the last remaining __strong pointer goes away - objc_gc_{retain,release}_np() functions, which can be used to implement CFRetain() and CFRelease() (adds an reference count - the object will not be collected until after its last retain is gone.
This commit is contained in:
parent
4f31ed8acd
commit
ad16172625
49
ANNOUNCE
49
ANNOUNCE
@ -1,4 +1,4 @@
|
||||
GNUstep Objective-C Runtime 1.4
|
||||
GNUstep Objective-C Runtime 1.5
|
||||
===============================
|
||||
|
||||
This is the fifth official release of the GNUstep Objective-C runtime (a.k.a.
|
||||
@ -6,53 +6,6 @@ libobjc2). This runtime was designed to support the features of Objective-C 2
|
||||
for use with GNUstep and other Objective-C programs. Highlights of this
|
||||
release include:
|
||||
|
||||
- Support for the associated reference APIs introduced with OS X 10.6. This
|
||||
allows storing arbitrary objects associated with another object.
|
||||
|
||||
- Concurrent, thread-safe, +initialize. The runtime will now send +initialize
|
||||
messages to different classes concurrently in multiple threads, but still
|
||||
ensures that no class receives another message until it has returned from
|
||||
+initialize. This mirrors OS X behaviour. Care must be taken that
|
||||
+initialize methods do not deadlock - if two classes are simultaneously
|
||||
initialised from two different threads, and there +initialize methods call
|
||||
methods in the other class, then deadlock will result.
|
||||
|
||||
- Exceptions can now safely propagate out of +initialize methods.
|
||||
|
||||
- Better hiding of local symbols. Now the internal runtime functions are not
|
||||
visible from outside of the runtime. This may break code that attempts to
|
||||
use private APIs, but means that it is now impossible to accidentally use
|
||||
private APIs.
|
||||
|
||||
- Dispatch table updates have been improved. Category loading now longer
|
||||
triggers dtable creation and partial dtable updates are faster.
|
||||
|
||||
- Improvements to the low memory profile. Uses 5-10% less (total) memory
|
||||
running Gorm, and now passes the entire GNUstep and EtoileFoundation test
|
||||
suites. Build with [g]make low_memory=yes to enable this mode. Note that
|
||||
the low memory profile trades some CPU time for memory usage, so don't use it
|
||||
for CPU-bound tasks.
|
||||
|
||||
- The class lookup cache optimisation (LLVM) now caches lookups irrespective of
|
||||
the ABI. It will insert direct references to the class structures if
|
||||
possible (i.e. if the symbol is visible). If not, then it will cache the
|
||||
result of objc_class_lookup(). The cache is shared across the module (the
|
||||
library, if run as a link-time optimisation), so the lookup only needs to be
|
||||
run once. This eliminates the need for explicit class lookup caching in the
|
||||
source code (when using LLVM-based compilers, such as Clang, LanguageKit, or
|
||||
DragonEgg).
|
||||
|
||||
- Added some missing runtime API functions, such as those for setting and
|
||||
getting instance variables. These are required by some language bridges,
|
||||
although using them safely is not actually possible with the non-fragile ABI
|
||||
(also true on OS X), since instance variables are no longer uniquely
|
||||
identified by name.
|
||||
|
||||
- Added support for accessing property type encodings. These are extended type
|
||||
encodings, allowing code introspecting the properties to learn if they are
|
||||
read-only, their assignment policy, the methods used to implement them, and
|
||||
so on.
|
||||
|
||||
You may obtain the code for this release from subversion at the following
|
||||
subversion branch:
|
||||
|
||||
|
11
GNUmakefile
11
GNUmakefile
@ -29,6 +29,7 @@ libobjc_C_FILES = \
|
||||
dtable.c\
|
||||
eh_personality.c\
|
||||
encoding2.c\
|
||||
gc_none.c\
|
||||
hash_table.c\
|
||||
hooks.c\
|
||||
ivar.c\
|
||||
@ -78,6 +79,14 @@ ifeq ($(low_memory), yes)
|
||||
libobjc_CPPFLAGS += -D__OBJC_LOW_MEMORY__
|
||||
endif
|
||||
|
||||
ifeq ($(boehm_gc), yes)
|
||||
libobjc_C_FILES += gc_boehm.c
|
||||
libobjc_LIBRARIES_DEPEND_UPON += -lgc-threaded
|
||||
libobjc_OBJCFLAGS += -fobjc-gc
|
||||
endif
|
||||
|
||||
|
||||
|
||||
ifeq ($(findstring openbsd, $(GNUSTEP_HOST_OS)), openbsd)
|
||||
libobjc_LIBRARIES_DEPEND_UPON += -pthread
|
||||
else
|
||||
@ -102,7 +111,7 @@ libobjc_CFLAGS += -Wno-unused-function
|
||||
|
||||
# Uncomment this when debugging - it makes everything slow, but means that the
|
||||
# debugger actually works...
|
||||
libobjc_CFLAGS += -fno-inline
|
||||
#libobjc_CFLAGS += -fno-inline
|
||||
libobjc_OBJCFLAGS += $(libobjc_CFLAGS) $(libobjc_CFLAGS)
|
||||
|
||||
ifneq ($(findstring gcc, $(CC)),)
|
||||
|
2
README
2
README
@ -105,7 +105,7 @@ Blocks
|
||||
------
|
||||
|
||||
The GNUstep runtime provides the run time support required for Apple's blocks
|
||||
(closures) extension to C.
|
||||
(closures) extension to C. This follows the same ABI as OS X 10.6.
|
||||
|
||||
Fast Proxies and Cacheable Lookups
|
||||
----------------------------------
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include "visibility.h"
|
||||
#include "objc/runtime.h"
|
||||
#include "module.h"
|
||||
#include "gc_ops.h"
|
||||
#include <assert.h>
|
||||
#include <stdio.h>
|
||||
|
||||
@ -28,15 +29,24 @@ struct objc_abi_version
|
||||
unsigned long module_size;
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
gcc_abi = 8,
|
||||
gnustep_abi = 9,
|
||||
gc_abi = 10
|
||||
};
|
||||
|
||||
/**
|
||||
* List of supported ABIs.
|
||||
*/
|
||||
static struct objc_abi_version known_abis[] =
|
||||
{
|
||||
/* GCC ABI. */
|
||||
{8, 8, 9, sizeof(struct objc_module_abi_8)},
|
||||
/* Clang ABI. */
|
||||
{9, 8, 9, sizeof(struct objc_module_abi_8)}
|
||||
{gcc_abi, gcc_abi, gnustep_abi, sizeof(struct objc_module_abi_8)},
|
||||
/* Non-fragile ABI. */
|
||||
{gnustep_abi, gcc_abi, gc_abi, sizeof(struct objc_module_abi_8)},
|
||||
/* GC ABI. Adds a field describing the GC mode. */
|
||||
{gc_abi, gcc_abi, gc_abi, sizeof(struct objc_module_abi_10)}
|
||||
};
|
||||
|
||||
static int known_abi_count =
|
||||
@ -50,8 +60,14 @@ static int known_abi_count =
|
||||
}\
|
||||
} while(0)
|
||||
|
||||
PRIVATE BOOL objc_check_abi_version(unsigned long version, unsigned long module_size)
|
||||
static enum objc_gc_mode current_gc_mode = GC_Optional;
|
||||
|
||||
PRIVATE BOOL objc_check_abi_version(struct objc_module_abi_8 *module)
|
||||
{
|
||||
unsigned long version = module->version;
|
||||
unsigned long module_size = module->size;
|
||||
enum objc_gc_mode gc_mode = (version < gc_abi) ? GC_None
|
||||
: ((struct objc_module_abi_10*)module)->gc_mode;
|
||||
struct objc_abi_version *v = NULL;
|
||||
for (int i=0 ; i<known_abi_count ; i++)
|
||||
{
|
||||
@ -84,5 +100,20 @@ PRIVATE BOOL objc_check_abi_version(unsigned long version, unsigned long module_
|
||||
min_loaded_version = version;
|
||||
max_loaded_version = version;
|
||||
}
|
||||
|
||||
// If we're currently in GC-optional mode, then fall to one side or the
|
||||
// other if this module requires / doesn't support GC
|
||||
if (current_gc_mode == GC_Optional && (gc_mode != current_gc_mode))
|
||||
{
|
||||
current_gc_mode = gc_mode;
|
||||
if (gc_mode != GC_None)
|
||||
{
|
||||
enableGC(NO);
|
||||
}
|
||||
}
|
||||
// We can't mix GC_None and GC_Required code, but we can mix any other
|
||||
// combination
|
||||
FAIL_IF((gc_mode != GC_Optional) && (gc_mode != current_gc_mode),
|
||||
"Attempting to mix GC and non-GC code!");
|
||||
return YES;
|
||||
}
|
||||
|
3
dtable.c
3
dtable.c
@ -1,6 +1,7 @@
|
||||
#define __BSD_VISIBLE 1
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include "objc/runtime.h"
|
||||
#include "sarray2.h"
|
||||
#include "selector.h"
|
||||
@ -312,7 +313,7 @@ static BOOL installMethodInDtable(Class class,
|
||||
struct objc_method *method,
|
||||
BOOL replaceExisting)
|
||||
{
|
||||
assert(uninstalled_dtable != dtable);
|
||||
ASSERT(uninstalled_dtable != dtable);
|
||||
uint32_t sel_id = method->selector->index;
|
||||
struct objc_slot *slot = SparseArrayLookup(dtable, sel_id);
|
||||
if (NULL != slot)
|
||||
|
337
gc_boehm.c
Normal file
337
gc_boehm.c
Normal file
@ -0,0 +1,337 @@
|
||||
#include "objc/runtime.h"
|
||||
#include "visibility.h"
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include "gc_ops.h"
|
||||
#define I_HIDE_POINTERS
|
||||
#include <gc.h>
|
||||
|
||||
#ifndef __clang__
|
||||
#define __sync_swap __sync_lock_test_and_set
|
||||
#endif
|
||||
|
||||
enum
|
||||
{
|
||||
OBJC_RATIO_COLLECTION = 0,
|
||||
OBJC_GENERATIONAL_COLLECTION = 1,
|
||||
OBJC_FULL_COLLECTION = 2,
|
||||
OBJC_EXHAUSTIVE_COLLECTION = 3,
|
||||
OBJC_COLLECT_IF_NEEDED = (1 << 3),
|
||||
OBJC_WAIT_UNTIL_DONE = (1 << 4),
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
OBJC_CLEAR_RESIDENT_STACK = 1
|
||||
};
|
||||
|
||||
static unsigned long collectionType(unsigned options)
|
||||
{
|
||||
// Low 2 bits in GC options are used for the
|
||||
return options & 3;
|
||||
}
|
||||
|
||||
|
||||
void objc_collect(unsigned long options)
|
||||
{
|
||||
if (OBJC_FULL_COLLECTION == collectionType(options))
|
||||
{
|
||||
GC_gcollect();
|
||||
}
|
||||
else
|
||||
{
|
||||
GC_collect_a_little();
|
||||
}
|
||||
}
|
||||
|
||||
BOOL objc_collectingEnabled(void)
|
||||
{
|
||||
return YES;
|
||||
}
|
||||
|
||||
BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile id *objectLocation)
|
||||
{
|
||||
return __sync_bool_compare_and_swap(objectLocation, predicate, replacement);
|
||||
}
|
||||
BOOL objc_atomicCompareAndSwapPtrBarrier(id predicate, id replacement, volatile id *objectLocation)
|
||||
{
|
||||
return __sync_bool_compare_and_swap(objectLocation, predicate, replacement);
|
||||
}
|
||||
|
||||
BOOL objc_atomicCompareAndSwapGlobal(id predicate, id replacement, volatile id *objectLocation)
|
||||
{
|
||||
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
|
||||
}
|
||||
BOOL objc_atomicCompareAndSwapGlobalBarrier(id predicate, id replacement, volatile id *objectLocation)
|
||||
{
|
||||
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
|
||||
}
|
||||
BOOL objc_atomicCompareAndSwapInstanceVariable(id predicate, id replacement, volatile id *objectLocation)
|
||||
{
|
||||
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
|
||||
}
|
||||
BOOL objc_atomicCompareAndSwapInstanceVariableBarrier(id predicate, id replacement, volatile id *objectLocation)
|
||||
{
|
||||
return objc_atomicCompareAndSwapPtr(predicate, replacement, objectLocation);
|
||||
}
|
||||
|
||||
|
||||
id objc_assign_strongCast(id val, id *ptr)
|
||||
{
|
||||
GC_change_stubborn(ptr);
|
||||
*ptr = val;
|
||||
GC_end_stubborn_change(ptr);
|
||||
return val;
|
||||
}
|
||||
|
||||
id objc_assign_global(id val, id *ptr)
|
||||
{
|
||||
*ptr = val;
|
||||
return val;
|
||||
}
|
||||
id objc_assign_ivar(id val, id dest, ptrdiff_t offset)
|
||||
{
|
||||
GC_change_stubborn(dest);
|
||||
*(id*)((char*)dest+offset) = val;
|
||||
GC_end_stubborn_change(dest);
|
||||
return val;
|
||||
}
|
||||
void *objc_memmove_collectable(void *dst, const void *src, size_t size)
|
||||
{
|
||||
// FIXME: Does this need to be called with the allocation lock held?
|
||||
memmove(dst, src, size);
|
||||
return dst;
|
||||
}
|
||||
/**
|
||||
* Weak Pointers:
|
||||
*
|
||||
* To implement weak pointers, we store the hidden pointer (bits all flipped)
|
||||
* in the real address. We tell the GC to zero the pointer when the associated
|
||||
* object is finalized. The read barrier locks the GC to prevent it from
|
||||
* freeing anything, deobfuscates the pointer (at which point it becomes a
|
||||
* GC-visible on-stack pointer), and then returns it.
|
||||
*/
|
||||
|
||||
static void *readWeakLocked(void *ptr)
|
||||
{
|
||||
void *val = *(void**)ptr;
|
||||
return 0 == val ? val : REVEAL_POINTER(val);
|
||||
}
|
||||
|
||||
id objc_read_weak(id *location)
|
||||
{
|
||||
return GC_call_with_alloc_lock(readWeakLocked, location);
|
||||
}
|
||||
|
||||
id objc_assign_weak(id value, id *location)
|
||||
{
|
||||
|
||||
// Temporarily zero this pointer and get the old value
|
||||
id old = __sync_swap(location, 0);
|
||||
if (0 != old)
|
||||
{
|
||||
GC_unregister_disappearing_link((void**)location);
|
||||
}
|
||||
GC_general_register_disappearing_link((void**)location, value);
|
||||
// If some other thread has modified this, then we may have two different
|
||||
// objects registered to make this pointer 0 if either is destroyed. This
|
||||
// would be bad, so we need to make sure that we unregister them and
|
||||
// register the correct one.
|
||||
if (!__sync_bool_compare_and_swap(location, old, (id)HIDE_POINTER(value)))
|
||||
{
|
||||
return objc_assign_weak(value, location);
|
||||
}
|
||||
else
|
||||
{
|
||||
//fprintf(stderr, "Done weak assignment\n");
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
static void runFinalize(void *addr, void *context)
|
||||
{
|
||||
static SEL finalize;
|
||||
if (UNLIKELY(0 == finalize))
|
||||
{
|
||||
finalize = sel_registerName("finalize");
|
||||
}
|
||||
objc_msg_lookup(addr, finalize)(addr, finalize);
|
||||
}
|
||||
|
||||
static id allocate_class(Class cls, size_t extra)
|
||||
{
|
||||
id obj;
|
||||
if (extra > 0)
|
||||
{
|
||||
// FIXME: Overflow checking!
|
||||
obj = GC_malloc(class_getInstanceSize(cls) + extra);
|
||||
}
|
||||
else
|
||||
{
|
||||
obj = GC_malloc_stubborn(class_getInstanceSize(cls));
|
||||
}
|
||||
GC_register_finalizer_no_order(obj, runFinalize, 0, 0, 0);
|
||||
return obj;
|
||||
}
|
||||
|
||||
id objc_allocate_object(Class cls, int extra)
|
||||
{
|
||||
return class_createInstance(cls, extra);
|
||||
}
|
||||
|
||||
static void registerThread(BOOL errorOnNotRegistered)
|
||||
{
|
||||
struct GC_stack_base base;
|
||||
if (GC_get_stack_base(&base) != GC_SUCCESS)
|
||||
{
|
||||
fprintf(stderr, "Unable to find stack base for new thread\n");
|
||||
abort();
|
||||
}
|
||||
switch (GC_register_my_thread(&base))
|
||||
{
|
||||
case GC_SUCCESS:
|
||||
if (errorOnNotRegistered)
|
||||
{
|
||||
fprintf(stderr, "Thread should have already been registered with the GC\n");
|
||||
}
|
||||
case GC_DUPLICATE:
|
||||
return;
|
||||
case GC_NO_THREADS:
|
||||
case GC_UNIMPLEMENTED:
|
||||
fprintf(stderr, "Unable to register stack\n");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
void objc_registerThreadWithCollector(void)
|
||||
{
|
||||
registerThread(NO);
|
||||
}
|
||||
void objc_unregisterThreadWithCollector(void)
|
||||
{
|
||||
GC_unregister_my_thread();
|
||||
}
|
||||
void objc_assertRegisteredThreadWithCollector()
|
||||
{
|
||||
registerThread(YES);
|
||||
}
|
||||
|
||||
/**
|
||||
* Structure stored for each GC
|
||||
*/
|
||||
struct gc_refcount
|
||||
{
|
||||
/** Reference count */
|
||||
int refCount;
|
||||
/** Strong pointer */
|
||||
id ptr;
|
||||
};
|
||||
|
||||
static int refcount_compare(const void *ptr, const struct gc_refcount *rc)
|
||||
{
|
||||
return ptr == rc->ptr;
|
||||
}
|
||||
static uint32_t ptr_hash(const void *ptr)
|
||||
{
|
||||
// Bit-rotate right 4, since the lowest few bits in an object pointer will
|
||||
// always be 0, which is not so useful for a hash value
|
||||
return ((uintptr_t)ptr >> 4) | ((uintptr_t)ptr << (sizeof(id) * 8) - 4);
|
||||
}
|
||||
static uint32_t refcount_hash(const struct gc_refcount *rc)
|
||||
{
|
||||
return ptr_hash(rc->ptr);
|
||||
}
|
||||
#define MAP_TABLE_NAME refcount
|
||||
#define MAP_TABLE_COMPARE_FUNCTION refcount_compare
|
||||
#define MAP_TABLE_HASH_KEY ptr_hash
|
||||
#define MAP_TABLE_HASH_VALUE refcount_hash
|
||||
#include "hash_table.h"
|
||||
|
||||
static refcount_table *refcounts;
|
||||
|
||||
id objc_gc_retain_np(id object)
|
||||
{
|
||||
struct gc_refcount *refcount = refcount_table_get(refcounts, object);
|
||||
if (NULL == refcount)
|
||||
{
|
||||
LOCK_FOR_SCOPE(&(refcounts->lock));
|
||||
refcount = refcount_table_get(refcounts, object);
|
||||
if (NULL == refcount)
|
||||
{
|
||||
refcount = GC_malloc_uncollectable(sizeof(struct gc_refcount));
|
||||
refcount->ptr = object;
|
||||
refcount->refCount = 1;
|
||||
refcount_insert(refcounts, refcount);
|
||||
return object;
|
||||
}
|
||||
}
|
||||
__sync_fetch_and_add(&(refcount->refCount), 1);
|
||||
return object;
|
||||
}
|
||||
void objc_gc_release_np(id object)
|
||||
{
|
||||
struct gc_refcount *refcount = refcount_table_get(refcounts, object);
|
||||
// This object has not been explicitly retained, don't release it
|
||||
if (0 == refcount) { return; }
|
||||
|
||||
if (0 == __sync_sub_and_fetch(&(refcount->refCount), 1))
|
||||
{
|
||||
LOCK_FOR_SCOPE(&(refcounts->lock));
|
||||
refcount_remove(refcounts, object);
|
||||
__sync_synchronize();
|
||||
// If another thread has incremented the reference count while we were
|
||||
// doing this, then we need to add the count back into the table,
|
||||
// otherwise we can carry on.
|
||||
if (__sync_bool_compare_and_swap(&(refcount->refCount), 0, 0))
|
||||
{
|
||||
// This doesn't free the object, it just removes the explicit
|
||||
// reference
|
||||
GC_free(refcount);
|
||||
}
|
||||
else
|
||||
{
|
||||
refcount_insert(refcounts, refcount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void init(void)
|
||||
{
|
||||
refcounts = refcount_create(4096);
|
||||
}
|
||||
|
||||
// FIXME: These are all stub implementations that should be replaced with
|
||||
// something better
|
||||
BOOL objc_is_finalized(void *ptr) { return NO; }
|
||||
void objc_clear_stack(unsigned long options)
|
||||
{
|
||||
// This isn't a very good implementation - we should really be working out
|
||||
// how much stack space is left somehow, but this is not possible to do
|
||||
// portably.
|
||||
int i[1024];
|
||||
memset(&i, 0, 1024);
|
||||
}
|
||||
BOOL objc_collecting_enabled(void) { return NO; }
|
||||
void objc_set_collection_threshold(size_t threshold) {}
|
||||
void objc_set_collection_ratio(size_t ratio) {}
|
||||
void objc_start_collector_thread(void) {}
|
||||
void objc_finalizeOnMainThread(Class cls) {}
|
||||
void objc_setCollectionThreshold(size_t threshold) {}
|
||||
void objc_startCollectorThread(void) {}
|
||||
|
||||
|
||||
PRIVATE struct gc_ops gc_ops_boehm =
|
||||
{
|
||||
.allocate_class = allocate_class,
|
||||
.init = init
|
||||
};
|
||||
|
||||
PRIVATE void enableGC(BOOL exclude)
|
||||
{
|
||||
if (__sync_bool_compare_and_swap(&gc, &gc_ops_none, &gc_ops_boehm))
|
||||
{
|
||||
gc->init();
|
||||
}
|
||||
}
|
19
gc_none.c
Normal file
19
gc_none.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include "visibility.h"
|
||||
#include "objc/runtime.h"
|
||||
#include "gc_ops.h"
|
||||
#include "class.h"
|
||||
#include <stdlib.h>
|
||||
|
||||
static id allocate_class(Class cls, size_t extraBytes)
|
||||
{
|
||||
return calloc(cls->instance_size + extraBytes, 1);
|
||||
}
|
||||
|
||||
PRIVATE struct gc_ops gc_ops_none =
|
||||
{
|
||||
.allocate_class = allocate_class
|
||||
};
|
||||
PRIVATE struct gc_ops *gc = &gc_ops_none;
|
||||
|
||||
PRIVATE BOOL isGCEnabled = NO;
|
||||
|
52
gc_ops.h
Normal file
52
gc_ops.h
Normal file
@ -0,0 +1,52 @@
|
||||
/**
|
||||
* Garbage collection operations.
|
||||
*/
|
||||
struct gc_ops
|
||||
{
|
||||
/**
|
||||
* Initialises this collector.
|
||||
*/
|
||||
void (*init)(void);
|
||||
/**
|
||||
* Allocates enough space for a class, followed by some extra bytes.
|
||||
*/
|
||||
id (*allocate_class)(Class, size_t);
|
||||
};
|
||||
|
||||
/**
|
||||
* Enables garbage collection, if it isn't already enabled.
|
||||
*
|
||||
* If the exclusive flag is set, then this will ensure that all -retain /
|
||||
* -release / -autorelease messages become no-ops.
|
||||
*/
|
||||
PRIVATE void enableGC(BOOL exclusive);
|
||||
/**
|
||||
* The mode for garbage collection
|
||||
*/
|
||||
enum objc_gc_mode
|
||||
{
|
||||
/** This module neither uses, nor supports, garbage collection. */
|
||||
GC_None = 0,
|
||||
/**
|
||||
* This module uses garbage collection, but also sends retain / release
|
||||
* messages. It can be used with or without GC.
|
||||
*/
|
||||
GC_Optional = 1,
|
||||
/**
|
||||
* This module expects garbage collection and will break without it.
|
||||
*/
|
||||
GC_Required = 2
|
||||
};
|
||||
|
||||
/**
|
||||
* The current Objective-C garbage collection mode.
|
||||
*/
|
||||
extern enum objc_gc_mode gc_mode;
|
||||
|
||||
/**
|
||||
* The current set of garbage collector operations to use.
|
||||
*/
|
||||
extern struct gc_ops *gc;
|
||||
|
||||
extern struct gc_ops gc_ops_boehm;
|
||||
extern struct gc_ops gc_ops_none;
|
2
loader.c
2
loader.c
@ -28,7 +28,7 @@ void __objc_exec_class(struct objc_module_abi_8 *module)
|
||||
// Check that this module uses an ABI version that we recognise.
|
||||
// In future, we should pass the ABI version to the class / category load
|
||||
// functions so that we can change various structures more easily.
|
||||
assert(objc_check_abi_version(module->version, module->size));
|
||||
assert(objc_check_abi_version(module));
|
||||
|
||||
if (first_run)
|
||||
{
|
||||
|
2
loader.h
2
loader.h
@ -11,7 +11,7 @@
|
||||
* module size. This depends on whether another module with an incompatible
|
||||
* ABI has already been loaded.
|
||||
*/
|
||||
BOOL objc_check_abi_version(unsigned long version, unsigned long module_size);
|
||||
BOOL objc_check_abi_version(struct objc_module_abi_8 *module);
|
||||
/**
|
||||
* Initializes a protocol list, uniquing the protocols in the list.
|
||||
*/
|
||||
|
13
module.h
13
module.h
@ -72,6 +72,19 @@ struct objc_module_abi_8
|
||||
struct objc_symbol_table_abi_8 *symbol_table;
|
||||
};
|
||||
|
||||
struct objc_module_abi_10
|
||||
{
|
||||
/**
|
||||
* Inherited fields from version 8 of the ABI.
|
||||
*/
|
||||
struct objc_module_abi_8 old;
|
||||
/**
|
||||
* GC mode. GC_Optional code can be mixed with anything, but GC_None code
|
||||
* can't be mixed with GC_Required code.
|
||||
*/
|
||||
int gc_mode;
|
||||
};
|
||||
|
||||
/**
|
||||
* List of static instances of a named class provided in this module.
|
||||
*/
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "method_list.h"
|
||||
#include "lock.h"
|
||||
#include "dtable.h"
|
||||
#include "gc_ops.h"
|
||||
|
||||
/* Make glibc export strdup() */
|
||||
|
||||
@ -274,7 +275,7 @@ id class_createInstance(Class cls, size_t extraBytes)
|
||||
{
|
||||
CHECK_ARG(cls);
|
||||
if (Nil == cls) { return nil; }
|
||||
id obj = calloc(cls->instance_size + extraBytes, 1);
|
||||
id obj = gc->allocate_class(cls, extraBytes);
|
||||
obj->isa = cls;
|
||||
return obj;
|
||||
}
|
||||
|
@ -11,7 +11,7 @@
|
||||
#define _SARRAY_H_INCLUDED_
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <assert.h>
|
||||
#include "visibility.h"
|
||||
|
||||
/**
|
||||
* Sparse arrays, used to implement dispatch tables. Current implementation is
|
||||
@ -70,7 +70,7 @@ static inline void* SparseArrayLookup(SparseArray * sarray, uint32_t index)
|
||||
uint32_t i = index;
|
||||
switch (sarray->shift)
|
||||
{
|
||||
default: assert(0 && "broken sarray");
|
||||
default: UNREACHABLE("broken sarray");
|
||||
case 0:
|
||||
return sarray->data[i & 0xff];
|
||||
case 8:
|
||||
|
11
sendmsg2.c
11
sendmsg2.c
@ -58,7 +58,7 @@ Slot_t objc_msg_lookup_internal(id *receiver,
|
||||
retry:;
|
||||
Slot_t result = objc_dtable_lookup((*receiver)->isa->dtable,
|
||||
selector->index);
|
||||
if (0 == result)
|
||||
if (UNLIKELY(0 == result))
|
||||
{
|
||||
Class class = (*receiver)->isa;
|
||||
dtable_t dtable = dtable_for_class(class);
|
||||
@ -110,6 +110,11 @@ retry:;
|
||||
Slot_t (*objc_plane_lookup)(id *receiver, SEL op, id sender) =
|
||||
objc_msg_lookup_internal;
|
||||
|
||||
Slot_t objc_msg_lookup_sender_non_nil(id *receiver, SEL selector, id sender)
|
||||
{
|
||||
return objc_msg_lookup_internal(receiver, selector, sender);
|
||||
}
|
||||
|
||||
/**
|
||||
* New Objective-C lookup function. This permits the lookup to modify the
|
||||
* receiver and also supports multi-dimensional dispatch based on the sender.
|
||||
@ -119,7 +124,7 @@ Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender)
|
||||
// Returning a nil slot allows the caller to cache the lookup for nil too,
|
||||
// although this is not particularly useful because the nil method can be
|
||||
// inlined trivially.
|
||||
if(*receiver == nil)
|
||||
if (UNLIKELY(*receiver == nil))
|
||||
{
|
||||
return &nil_slot;
|
||||
}
|
||||
@ -155,7 +160,7 @@ Slot_t objc_slot_lookup_super(struct objc_super *super, SEL selector)
|
||||
if (0 == result)
|
||||
{
|
||||
// Dtable should always be installed in the superclass
|
||||
assert(dtable_for_class(class) != uninstalled_dtable);
|
||||
ASSERT(dtable_for_class(class) != uninstalled_dtable);
|
||||
result = &nil_slot;
|
||||
}
|
||||
return result;
|
||||
|
11
visibility.h
11
visibility.h
@ -11,3 +11,14 @@
|
||||
# define LEGACY PUBLIC
|
||||
#endif
|
||||
|
||||
#if defined(DEBUG) || (!defined(__clang__))
|
||||
# include <assert.h>
|
||||
# define UNREACHABLE(x) assert(0 && x)
|
||||
# define ASSERT(x) assert(x)
|
||||
#else
|
||||
# define UNREACHABLE(x) __builtin_unreachable()
|
||||
# define ASSERT(x) do { if (x) __builtin_unreachable(); } while(0)
|
||||
#endif
|
||||
|
||||
#define LIKELY(x) __builtin_expect(x, 1)
|
||||
#define UNLIKELY(x) __builtin_expect(x, 0)
|
||||
|
Loading…
Reference in New Issue
Block a user