mirror of
https://github.com/darlinghq/darling-libobjc2.git
synced 2024-11-27 06:00:30 +00:00
More GC fixes
This commit is contained in:
parent
33b7ecb2f8
commit
e05c9c2ff5
@ -113,7 +113,7 @@ libobjc_CFLAGS += -Wno-unused-function
|
||||
|
||||
# Uncomment this when debugging - it makes everything slow, but means that the
|
||||
# debugger actually works...
|
||||
#libobjc_CFLAGS += -fno-inline
|
||||
libobjc_CFLAGS += -fno-inline
|
||||
libobjc_OBJCFLAGS += $(libobjc_CFLAGS) $(libobjc_CFLAGS)
|
||||
|
||||
ifneq ($(findstring gcc, $(CC)),)
|
||||
|
@ -65,6 +65,7 @@ PRIVATE enum objc_gc_mode current_gc_mode = GC_Optional;
|
||||
|
||||
static BOOL endsWith(const char *string, const char *suffix)
|
||||
{
|
||||
if (NULL == string) { return NO; }
|
||||
char *interior = strstr(string, suffix);
|
||||
return (interior && (strlen(string) == strlen(interior)));
|
||||
}
|
||||
|
18
associate.m
18
associate.m
@ -136,7 +136,7 @@ static void setReference(struct reference_list *list,
|
||||
break;
|
||||
}
|
||||
// While inserting into the list, we need to lock it temporarily.
|
||||
int *lock = lock_for_pointer(list);
|
||||
volatile int *lock = lock_for_pointer(list);
|
||||
lock_spinlock(lock);
|
||||
struct reference *r = findReference(list, key);
|
||||
// If there's an existing reference, then we can update it, otherwise we
|
||||
@ -266,21 +266,27 @@ static struct reference_list* referenceListForObject(id object, BOOL create)
|
||||
Class cls = (Class)object;
|
||||
if ((NULL == cls->extra_data) && create)
|
||||
{
|
||||
int *lock = lock_for_pointer(cls);
|
||||
volatile int *lock = lock_for_pointer(cls);
|
||||
struct reference_list *list = gc->malloc(sizeof(struct reference_list));
|
||||
lock_spinlock(lock);
|
||||
if (NULL == cls->extra_data)
|
||||
{
|
||||
cls->extra_data = gc->malloc(sizeof(struct reference_list));
|
||||
INIT_LOCK(cls->extra_data->lock);
|
||||
INIT_LOCK(list->lock);
|
||||
cls->extra_data = list;
|
||||
unlock_spinlock(lock);
|
||||
}
|
||||
else
|
||||
{
|
||||
unlock_spinlock(lock);
|
||||
gc->free(list);
|
||||
}
|
||||
unlock_spinlock(lock);
|
||||
}
|
||||
return cls->extra_data;
|
||||
}
|
||||
Class hiddenClass = findHiddenClass(object);
|
||||
if ((NULL == hiddenClass) && create)
|
||||
{
|
||||
int *lock = lock_for_pointer(object);
|
||||
volatile int *lock = lock_for_pointer(object);
|
||||
lock_spinlock(lock);
|
||||
hiddenClass = findHiddenClass(object);
|
||||
if (NULL == hiddenClass)
|
||||
|
57
gc_boehm.c
57
gc_boehm.c
@ -1,3 +1,4 @@
|
||||
//#define GC_DEBUG
|
||||
#define GNUSTEP_LIBOBJC_NO_LEGACY
|
||||
#include "objc/runtime.h"
|
||||
#include "class.h"
|
||||
@ -17,6 +18,8 @@
|
||||
#define __sync_swap __sync_lock_test_and_set
|
||||
#endif
|
||||
|
||||
Class dead_class;
|
||||
|
||||
Class objc_lookup_class(const char*);
|
||||
|
||||
GC_descr gc_typeForClass(Class cls);
|
||||
@ -86,12 +89,10 @@ BOOL objc_collectingEnabled(void)
|
||||
|
||||
void objc_gc_disable(void)
|
||||
{
|
||||
fprintf(stderr, "Disabled collecting\n");
|
||||
GC_disable();
|
||||
}
|
||||
void objc_gc_enable(void)
|
||||
{
|
||||
fprintf(stderr, "Enabled collecting\n");
|
||||
GC_enable();
|
||||
}
|
||||
|
||||
@ -130,6 +131,7 @@ id objc_assign_strongCast(id val, id *ptr)
|
||||
|
||||
id objc_assign_global(id val, id *ptr)
|
||||
{
|
||||
//fprintf(stderr, "Storign %p in global %p\n", val, ptr);
|
||||
GC_add_roots(ptr, ptr+1);
|
||||
*ptr = val;
|
||||
return val;
|
||||
@ -190,7 +192,7 @@ id objc_assign_weak(id value, id *location)
|
||||
|
||||
static void runFinalize(void *addr, void *context)
|
||||
{
|
||||
//fprintf(stderr, "FINALIZING %p\n", addr);
|
||||
//fprintf(stderr, "FINALIZING %p (%s)\n", addr, ((id)addr)->isa->name);
|
||||
static SEL finalize;
|
||||
static SEL cxx_destruct;
|
||||
if (UNLIKELY(0 == finalize))
|
||||
@ -208,7 +210,7 @@ static void runFinalize(void *addr, void *context)
|
||||
{
|
||||
objc_msg_lookup(addr, finalize)(addr, finalize);
|
||||
}
|
||||
*(void**)addr = (void*)(intptr_t)-1;//objc_lookup_class("NSZombie");
|
||||
*(void**)addr = objc_lookup_class("NSZombie");
|
||||
}
|
||||
|
||||
static void collectIvarForClass(Class cls, GC_word *bitmap)
|
||||
@ -253,28 +255,33 @@ static GC_descr descriptor_for_class(Class cls)
|
||||
GC_word bitmap[size];
|
||||
memset(bitmap, 0, size);
|
||||
collectIvarForClass(cls, bitmap);
|
||||
descr = GC_make_descriptor(bitmap, cls->instance_size);
|
||||
// It's safe to round down here - if a class ends with an ivar that is
|
||||
// smaller than a pointer, then it can't possibly be a pointer.
|
||||
//fprintf(stderr, "Class is %d byes, %d words\n", cls->instance_size, cls->instance_size/sizeof(void*));
|
||||
descr = GC_make_descriptor(bitmap, cls->instance_size / sizeof(void*));
|
||||
gc_setTypeForClass(cls, descr);
|
||||
return descr;
|
||||
}
|
||||
|
||||
static id allocate_class(Class cls, size_t extra)
|
||||
{
|
||||
id obj;
|
||||
GC_collect_a_little();
|
||||
id obj = 0;
|
||||
// If there are some extra bytes, they may contain pointers, so we ignore
|
||||
// the type
|
||||
if (extra > 0)
|
||||
{
|
||||
// FIXME: Overflow checking!
|
||||
obj = GC_malloc(class_getInstanceSize(cls) + extra);
|
||||
//obj = GC_malloc(class_getInstanceSize(cls) + extra);
|
||||
obj = GC_MALLOC(class_getInstanceSize(cls) + extra);
|
||||
}
|
||||
else
|
||||
{
|
||||
GC_descr d = descriptor_for_class(cls);
|
||||
obj = GC_malloc_explicitly_typed(class_getInstanceSize(cls), d);
|
||||
obj = GC_MALLOC_EXPLICITLY_TYPED(class_getInstanceSize(cls), d);
|
||||
}
|
||||
GC_register_finalizer_no_order(obj, runFinalize, 0, 0, 0);
|
||||
//fprintf(stderr, "Allocating %p (%p + %d)\n", obj, cls, extra);
|
||||
//fprintf(stderr, "Allocating %p (%p + %d). Base is %p\n", obj, cls, extra, GC_base(obj));
|
||||
GC_REGISTER_FINALIZER_NO_ORDER(obj, runFinalize, 0, 0, 0);
|
||||
return obj;
|
||||
}
|
||||
|
||||
@ -362,7 +369,7 @@ id objc_gc_retain(id object)
|
||||
refcount = refcount_table_get(refcounts, object);
|
||||
if (NULL == refcount)
|
||||
{
|
||||
refcount = GC_malloc_uncollectable(sizeof(struct gc_refcount));
|
||||
refcount = GC_MALLOC_UNCOLLECTABLE(sizeof(struct gc_refcount));
|
||||
refcount->ptr = object;
|
||||
refcount->refCount = 1;
|
||||
refcount_insert(refcounts, refcount);
|
||||
@ -414,19 +421,17 @@ static void finalizeBuffer(void *addr, void *context)
|
||||
void* objc_gc_allocate_collectable(size_t size, BOOL isScanned)
|
||||
{
|
||||
size_t allocSize = size;
|
||||
//if ((size > 20) )
|
||||
{
|
||||
//allocSize += 450;
|
||||
//allocSize *= 8;
|
||||
}
|
||||
if (1 || isScanned)
|
||||
{
|
||||
void *buf = GC_malloc(allocSize);
|
||||
//void *buf = GC_malloc(allocSize);
|
||||
//void *buf = GC_MALLOC(allocSize);
|
||||
void *buf = GC_MALLOC_UNCOLLECTABLE(allocSize);
|
||||
//if (size != allocSize)
|
||||
// fprintf(stderr, "Allocating %p (%d %d)\n", buf, size, allocSize);
|
||||
//fprintf(stderr, "Allocating %p (%d %d)\n", buf, size, allocSize);
|
||||
//GC_REGISTER_FINALIZER(buf, finalizeBuffer, 0, 0, 0);
|
||||
return buf;
|
||||
}
|
||||
return GC_malloc_explicitly_typed(size, UnscannedDescr);
|
||||
return GC_MALLOC_EXPLICITLY_TYPED(size, UnscannedDescr);
|
||||
}
|
||||
void* objc_gc_reallocate_collectable(void *ptr, size_t size, BOOL isScanned)
|
||||
{
|
||||
@ -452,7 +457,7 @@ void* objc_gc_reallocate_collectable(void *ptr, size_t size, BOOL isScanned)
|
||||
|
||||
static void init(void)
|
||||
{
|
||||
GC_init();
|
||||
GC_INIT();
|
||||
// Dump GC stats on exit - uncomment when debugging.
|
||||
//atexit(GC_dump);
|
||||
refcounts = refcount_create(4096);
|
||||
@ -485,12 +490,20 @@ BOOL objc_is_finalized(void *ptr) { return NO; }
|
||||
void objc_start_collector_thread(void) {}
|
||||
void objc_finalizeOnMainThread(Class cls) {}
|
||||
|
||||
static void *debug_malloc(size_t s)
|
||||
{
|
||||
return GC_MALLOC_UNCOLLECTABLE(s);
|
||||
}
|
||||
static void debug_free(void *ptr)
|
||||
{
|
||||
GC_FREE(ptr);
|
||||
}
|
||||
|
||||
PRIVATE struct gc_ops gc_ops_boehm =
|
||||
{
|
||||
.allocate_class = allocate_class,
|
||||
.malloc = GC_malloc_uncollectable,
|
||||
.free = GC_free,
|
||||
.malloc = debug_malloc,
|
||||
.free = debug_free,
|
||||
.init = init
|
||||
};
|
||||
|
||||
|
12
properties.m
12
properties.m
@ -27,7 +27,7 @@ id objc_getProperty(id obj, SEL _cmd, ptrdiff_t offset, BOOL isAtomic)
|
||||
id ret;
|
||||
if (isAtomic)
|
||||
{
|
||||
int *lock = lock_for_pointer(addr);
|
||||
volatile int *lock = lock_for_pointer(addr);
|
||||
lock_spinlock(lock);
|
||||
ret = *(id*)addr;
|
||||
ret = [ret retain];
|
||||
@ -67,7 +67,7 @@ void objc_setProperty(id obj, SEL _cmd, ptrdiff_t offset, id arg, BOOL isAtomic,
|
||||
id old;
|
||||
if (isAtomic)
|
||||
{
|
||||
int *lock = lock_for_pointer(addr);
|
||||
volatile int *lock = lock_for_pointer(addr);
|
||||
lock_spinlock(lock);
|
||||
old = *(id*)addr;
|
||||
*(id*)addr = arg;
|
||||
@ -96,8 +96,8 @@ void objc_copyPropertyStruct(void *dest,
|
||||
{
|
||||
if (atomic)
|
||||
{
|
||||
int *lock = lock_for_pointer(src);
|
||||
int *lock2 = lock_for_pointer(src);
|
||||
volatile int *lock = lock_for_pointer(src);
|
||||
volatile int *lock2 = lock_for_pointer(src);
|
||||
lock_spinlock(lock);
|
||||
lock_spinlock(lock2);
|
||||
memcpy(dest, src, size);
|
||||
@ -122,7 +122,7 @@ void objc_getPropertyStruct(void *dest,
|
||||
{
|
||||
if (atomic)
|
||||
{
|
||||
int *lock = lock_for_pointer(src);
|
||||
volatile int *lock = lock_for_pointer(src);
|
||||
lock_spinlock(lock);
|
||||
memcpy(dest, src, size);
|
||||
unlock_spinlock(lock);
|
||||
@ -145,7 +145,7 @@ void objc_setPropertyStruct(void *dest,
|
||||
{
|
||||
if (atomic)
|
||||
{
|
||||
int *lock = lock_for_pointer(dest);
|
||||
volatile int *lock = lock_for_pointer(dest);
|
||||
lock_spinlock(lock);
|
||||
memcpy(dest, src, size);
|
||||
unlock_spinlock(lock);
|
||||
|
@ -26,7 +26,7 @@ extern int spinlocks[spinlock_count];
|
||||
* contention between the same property in different objects, so we can't just
|
||||
* use the ivar offset.
|
||||
*/
|
||||
static inline int *lock_for_pointer(void *ptr)
|
||||
static inline volatile int *lock_for_pointer(void *ptr)
|
||||
{
|
||||
intptr_t hash = (intptr_t)ptr;
|
||||
// Most properties will be pointers, so disregard the lowest few bits
|
||||
@ -44,8 +44,9 @@ static inline int *lock_for_pointer(void *ptr)
|
||||
* no possibility of contention among calls to this, because it may only be
|
||||
* called by the thread owning the spin lock.
|
||||
*/
|
||||
inline static void unlock_spinlock(int *spinlock)
|
||||
inline static void unlock_spinlock(volatile int *spinlock)
|
||||
{
|
||||
__sync_synchronize();
|
||||
*spinlock = 0;
|
||||
}
|
||||
/**
|
||||
@ -62,7 +63,7 @@ inline static void unlock_spinlock(int *spinlock)
|
||||
* using atomic accessors is a terrible idea, but in the common case it should
|
||||
* be very fast.
|
||||
*/
|
||||
inline static void lock_spinlock(int *spinlock)
|
||||
inline static void lock_spinlock(volatile int *spinlock)
|
||||
{
|
||||
int count = 0;
|
||||
// Set the spin lock value to 1 if it is 0.
|
||||
|
Loading…
Reference in New Issue
Block a user