mirror of
https://github.com/darlinghq/darling-libkqueue.git
synced 2025-02-15 01:37:31 +00:00
Re-enable the knote mutex and reference counter.
Improve the knote reference counting mechanism. New function knote_delete(). Rename 'flags' to 'kn_flags' in struct knote. Rename 'mtx' to 'kn_mtx' in struct knote. Rename 'kntree_ent' to 'kn_entries' in struct knote. Add a knote flag named 'KNFL_KNOTE_DELETED' to indicate that a knote has been deleted. With reference counting, this becomes necessary. git-svn-id: svn://svn.code.sf.net/p/libkqueue/code/trunk@491 fb4e3144-bc1c-4b72-a658-5bcd248dd7f7
This commit is contained in:
parent
731410fef0
commit
fcc95e5a18
10
BUGS
10
BUGS
@ -1,3 +1,13 @@
|
||||
* When passing a knote pointer to the kernel, the reference count of
|
||||
the knote structure should be incremented. Conversely, when the pointer
|
||||
has been returned from the kernel and the event unregistered from the
|
||||
kernel, the reference count should be decremented.
|
||||
|
||||
* Some functions should crash instead of silently printing a debug
|
||||
message.. for example, knote_release().
|
||||
|
||||
* knote_get_by_ident uses 'short' for the ident, but the actual datatype
|
||||
is 'uintptr_t'.
|
||||
|
||||
* need to uninitialize library after fork() using pthread_atfork()
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -68,7 +68,7 @@ debug: clean $(PROGRAM).so.$(ABI_VERSION) test/config.mk
|
||||
cd test && make debug
|
||||
|
||||
debug-check: clean test/config.mk
|
||||
cd test && KQUEUE_DEBUG=y make check
|
||||
cd test && KQUEUE_DEBUG=yes make check
|
||||
|
||||
$(DISTFILE): $(SOURCES) $(HEADERS)
|
||||
mkdir $(PROGRAM)-$(VERSION)
|
||||
|
27
TODO
27
TODO
@ -1,3 +1,30 @@
|
||||
* There are very likely to be lock-related bugs stemming from when the
|
||||
kevent_copyin() and kevent_copyout() functions stopped holding the
|
||||
kqueue lock. It would be nice to have a lock testing mechanism to
|
||||
check that the correct locks are held.. something like:
|
||||
|
||||
#define MTX_LOCKED 1
|
||||
#define MTX_UNLOCKED 0
|
||||
#if NDEBUG
|
||||
#define LOCK_MONITOR(x) int x
|
||||
#define lock_assert(x,y) assert((x) == (y))
|
||||
#define lock_acquire(x,y) do { pthread_mutex_lock((x)); y = MTX_LOCKED } while (0)
|
||||
#define lock_release(x,y) do { pthread_mutex_unlock((x)); y = MTX_UNLOCKED } while (0)
|
||||
#else
|
||||
#define LOCK_MONITOR(x) /**/
|
||||
#define lock_assert(x,y) do {} while (0)
|
||||
#define lock_acquire(x,y) do { pthread_mutex_lock((x)); } while (0)
|
||||
#define lock_release(x,y) do { pthread_mutex_unlock((x)); } while (0)
|
||||
#endif
|
||||
|
||||
struct foo {
|
||||
LOCK_MONITOR(foo_lockstat);
|
||||
}
|
||||
...
|
||||
|
||||
lock_assert(&kn->kn_lockmon, MTX_LOCKED);
|
||||
lock_assert(&kn->kn_lockmon, MTX_UNLOCKED);
|
||||
|
||||
* Add a counter that increments on each each kevent() call. When printing
|
||||
debug information within kevent(), display the value of the counter.
|
||||
This will be helpful when debugging a multithreaded program that may have
|
||||
|
@ -124,7 +124,8 @@ filter_unregister_all(struct kqueue *kq)
|
||||
if (kq->kq_filt[i].kf_destroy != NULL)
|
||||
kq->kq_filt[i].kf_destroy(&kq->kq_filt[i]);
|
||||
|
||||
knote_free_all(&kq->kq_filt[i]);
|
||||
//XXX-FIXME
|
||||
//knote_free_all(&kq->kq_filt[i]);
|
||||
|
||||
if (kqops.filter_free != NULL)
|
||||
kqops.filter_free(kq, &kq->kq_filt[i]);
|
||||
|
@ -154,7 +154,6 @@ kevent_copyin_one(struct kqueue *kq, const struct kevent *src)
|
||||
knote_insert(filt, kn);
|
||||
dbg_printf("created kevent %s", kevent_dump(src));
|
||||
|
||||
|
||||
/* XXX- FIXME Needs to be handled in kn_create() to prevent races */
|
||||
if (src->flags & EV_DISABLE) {
|
||||
kn->kev.flags |= EV_DISABLE;
|
||||
@ -162,33 +161,33 @@ kevent_copyin_one(struct kqueue *kq, const struct kevent *src)
|
||||
}
|
||||
//........................................
|
||||
|
||||
|
||||
|
||||
return (0);
|
||||
} else {
|
||||
dbg_printf("no entry found for ident=%u", (unsigned int)src->ident);
|
||||
errno = ENOENT;
|
||||
return (-1);
|
||||
}
|
||||
}
|
||||
|
||||
if (src->flags & EV_DELETE) {
|
||||
knote_unlock(kn);
|
||||
knote_release(filt, kn);
|
||||
rv = knote_delete(filt, kn);
|
||||
dbg_printf("knote_delete returned %d", rv);
|
||||
/* NOTE: the knote lock was dropped by knote_delete() */
|
||||
} else if (src->flags & EV_DISABLE) {
|
||||
kn->kev.flags |= EV_DISABLE;
|
||||
rv = filt->kn_disable(filt, kn);
|
||||
dbg_printf("kn_disable returned %d", rv);
|
||||
knote_unlock(kn);
|
||||
knote_unlock(kn);
|
||||
} else if (src->flags & EV_ENABLE) {
|
||||
kn->kev.flags &= ~EV_DISABLE;
|
||||
rv = filt->kn_enable(filt, kn);
|
||||
dbg_printf("kn_enable returned %d", rv);
|
||||
knote_unlock(kn);
|
||||
knote_unlock(kn);
|
||||
} else if (src->flags & EV_ADD || src->flags == 0 || src->flags & EV_RECEIPT) {
|
||||
kn->kev.udata = src->udata;
|
||||
rv = filt->kn_modify(filt, kn, src);
|
||||
dbg_printf("kn_modify returned %d", rv);
|
||||
knote_unlock(kn);
|
||||
knote_unlock(kn);
|
||||
}
|
||||
|
||||
return (rv);
|
||||
|
@ -22,8 +22,6 @@
|
||||
|
||||
#include "alloc.h"
|
||||
|
||||
static void knote_free(struct filter *, struct knote *);
|
||||
|
||||
int
|
||||
knote_init(void)
|
||||
{
|
||||
@ -37,7 +35,7 @@ knote_cmp(struct knote *a, struct knote *b)
|
||||
return memcmp(&a->kev.ident, &b->kev.ident, sizeof(a->kev.ident));
|
||||
}
|
||||
|
||||
RB_GENERATE(knt, knote, kntree_ent, knote_cmp)
|
||||
RB_GENERATE(knt, knote, kn_entries, knote_cmp)
|
||||
|
||||
struct knote *
|
||||
knote_new(void)
|
||||
@ -49,57 +47,39 @@ knote_new(void)
|
||||
if (res == NULL)
|
||||
return (NULL);
|
||||
|
||||
#if ! SERIALIZE_KEVENT
|
||||
#ifdef _WIN32
|
||||
pthread_mutex_init(&res->mtx, NULL);
|
||||
pthread_mutex_init(&res->kn_mtx, NULL);
|
||||
#else
|
||||
if (pthread_mutex_init(&res->mtx, NULL)){
|
||||
if (pthread_mutex_init(&res->kn_mtx, NULL)){
|
||||
dbg_perror("pthread_mutex_init");
|
||||
free(res);
|
||||
return (NULL);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
res->kn_ref = 1;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
void
|
||||
knote_retain(struct knote *kn)
|
||||
{
|
||||
#if ! SERIALIZE_KEVENT
|
||||
atomic_inc(&kn->kn_ref);
|
||||
#endif
|
||||
return (res);
|
||||
}
|
||||
|
||||
/* Must be called while holding the knote lock */
|
||||
void
|
||||
knote_release(struct filter *filt, struct knote *kn)
|
||||
{
|
||||
#if SERIALIZE_KEVENT
|
||||
knote_free(filt, kn);
|
||||
#else
|
||||
int ref;
|
||||
assert (kn->kn_ref > 0);
|
||||
|
||||
assert (kn->kn_ref >= 0);
|
||||
|
||||
/*
|
||||
* Optimize for the case where only a single thread is accessing
|
||||
* the knote structure.
|
||||
*/
|
||||
if (fastpath(kn->kn_ref == 0))
|
||||
ref = 0;
|
||||
else
|
||||
ref = atomic_dec(&kn->kn_ref);
|
||||
|
||||
if (ref == 0) {
|
||||
dbg_printf("freeing knote at %p, rc=%d", kn, ref);
|
||||
pthread_rwlock_wrlock(&filt->kf_knote_mtx);
|
||||
knote_free(filt, kn);
|
||||
pthread_rwlock_unlock(&filt->kf_knote_mtx);
|
||||
if (--kn->kn_ref == 0) {
|
||||
if (kn->kn_flags & KNFL_KNOTE_DELETED) {
|
||||
dbg_printf("freeing knote at %p", kn);
|
||||
knote_unlock(kn);
|
||||
pthread_mutex_destroy(&kn->kn_mtx);
|
||||
free(kn);
|
||||
} else {
|
||||
dbg_puts("this should never happen");
|
||||
}
|
||||
} else {
|
||||
dbg_printf("decrementing refcount of knote %p rc=%d", kn, ref);
|
||||
dbg_printf("decrementing refcount of knote %p rc=%d", kn, kn->kn_ref);
|
||||
knote_unlock(kn);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
@ -110,37 +90,41 @@ knote_insert(struct filter *filt, struct knote *kn)
|
||||
pthread_rwlock_unlock(&filt->kf_knote_mtx);
|
||||
}
|
||||
|
||||
static void
|
||||
knote_free(struct filter *filt, struct knote *kn)
|
||||
/* Must be called while holding the knote lock */
|
||||
int
|
||||
knote_delete(struct filter *filt, struct knote *kn)
|
||||
{
|
||||
RB_REMOVE(knt, &filt->kf_knote, kn);
|
||||
filt->kn_delete(filt, kn);
|
||||
#if ! SERIALIZE_KEVENT
|
||||
pthread_mutex_destroy(&kn->mtx);
|
||||
#endif
|
||||
free(kn);
|
||||
// mem_free(kn);
|
||||
}
|
||||
struct knote query;
|
||||
struct knote *tmp;
|
||||
|
||||
/* XXX-FIXME this is broken and should be removed */
|
||||
void
|
||||
knote_free_all(struct filter *filt)
|
||||
{
|
||||
struct knote *n1, *n2;
|
||||
if (kn->kn_flags & KNFL_KNOTE_DELETED) {
|
||||
dbg_puts("ERROR: double deletion detected");
|
||||
return (-1);
|
||||
}
|
||||
|
||||
abort();
|
||||
|
||||
/* Destroy all knotes */
|
||||
/*
|
||||
* Drop the knote lock, and acquire both the knotelist write lock
|
||||
* and the knote lock. Verify that the knote wasn't removed by another
|
||||
* thread before we acquired the knotelist lock.
|
||||
*/
|
||||
query.kev.ident = kn->kev.ident;
|
||||
knote_unlock(kn);
|
||||
pthread_rwlock_wrlock(&filt->kf_knote_mtx);
|
||||
for (n1 = RB_MIN(knt, &filt->kf_knote); n1 != NULL; n1 = n2) {
|
||||
n2 = RB_NEXT(knt, filt->kf_knote, n1);
|
||||
RB_REMOVE(knt, &filt->kf_knote, n1);
|
||||
free(n1);
|
||||
tmp = RB_FIND(knt, &filt->kf_knote, &query);
|
||||
if (tmp == kn) {
|
||||
RB_REMOVE(knt, &filt->kf_knote, kn);
|
||||
}
|
||||
pthread_rwlock_unlock(&filt->kf_knote_mtx);
|
||||
|
||||
filt->kn_delete(filt, kn); //XXX-FIXME check return value
|
||||
|
||||
kn->kn_flags |= KNFL_KNOTE_DELETED;
|
||||
|
||||
knote_release(filt, kn);
|
||||
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* TODO: rename to knote_lookup_ident */
|
||||
struct knote *
|
||||
knote_lookup(struct filter *filt, short ident)
|
||||
{
|
||||
@ -151,10 +135,8 @@ knote_lookup(struct filter *filt, short ident)
|
||||
|
||||
pthread_rwlock_rdlock(&filt->kf_knote_mtx);
|
||||
ent = RB_FIND(knt, &filt->kf_knote, &query);
|
||||
#if ! SERIALIZE_KEVENT
|
||||
if (ent != NULL)
|
||||
knote_lock(ent);
|
||||
#endif
|
||||
pthread_rwlock_unlock(&filt->kf_knote_mtx);
|
||||
|
||||
dbg_printf("id=%d ent=%p", ident, ent);
|
||||
@ -162,8 +144,9 @@ knote_lookup(struct filter *filt, short ident)
|
||||
return (ent);
|
||||
}
|
||||
|
||||
#if DEADWOOD
|
||||
struct knote *
|
||||
knote_lookup_data(struct filter *filt, intptr_t data)
|
||||
knote_get_by_data(struct filter *filt, intptr_t data)
|
||||
{
|
||||
struct knote *kn;
|
||||
|
||||
@ -172,14 +155,15 @@ knote_lookup_data(struct filter *filt, intptr_t data)
|
||||
if (data == kn->kev.data)
|
||||
break;
|
||||
}
|
||||
#if ! SERIALIZE_KEVENT
|
||||
if (kn != NULL)
|
||||
if (kn != NULL) {
|
||||
knote_lock(kn);
|
||||
#endif
|
||||
knote_retain(kn);
|
||||
}
|
||||
pthread_rwlock_unlock(&filt->kf_knote_mtx);
|
||||
|
||||
return (kn);
|
||||
}
|
||||
#endif
|
||||
|
||||
int
|
||||
knote_disable(struct filter *filt, struct knote *kn)
|
||||
|
@ -63,12 +63,16 @@ struct eventfd {
|
||||
#endif
|
||||
};
|
||||
|
||||
/* TODO: Make this a variable length structure and allow
|
||||
each filter to add custom fields at the end.
|
||||
/*
|
||||
* Flags used by knote->kn_flags
|
||||
*/
|
||||
#define KNFL_PASSIVE_SOCKET (0x01) /* Socket is in listen(2) mode */
|
||||
#define KNFL_REGULAR_FILE (0x02) /* File descriptor is a regular file */
|
||||
#define KNFL_KNOTE_DELETED (0x10) /* The knote object is no longer valid */
|
||||
|
||||
struct knote {
|
||||
struct kevent kev;
|
||||
int flags;
|
||||
int kn_flags;
|
||||
union {
|
||||
/* OLD */
|
||||
int pfd; /* Used by timerfd */
|
||||
@ -82,15 +86,12 @@ struct knote {
|
||||
void *handle; /* Used by win32 filters */
|
||||
} data;
|
||||
struct kqueue* kn_kq;
|
||||
#if ! SERIALIZE_KEVENT
|
||||
pthread_mutex_t mtx;
|
||||
pthread_mutex_t kn_mtx;
|
||||
volatile uint32_t kn_ref;
|
||||
#endif
|
||||
#if defined(KNOTE_PLATFORM_SPECIFIC)
|
||||
KNOTE_PLATFORM_SPECIFIC;
|
||||
#endif
|
||||
TAILQ_ENTRY(knote) event_ent; /* Used by filter->kf_event */
|
||||
RB_ENTRY(knote) kntree_ent; /* Used by filter->kntree */
|
||||
RB_ENTRY(knote) kn_entries;
|
||||
};
|
||||
|
||||
#define KNOTE_ENABLE(ent) do { \
|
||||
@ -185,12 +186,11 @@ extern const struct kqueue_vtable kqops;
|
||||
* knote internal API
|
||||
*/
|
||||
struct knote * knote_lookup(struct filter *, short);
|
||||
struct knote * knote_lookup_data(struct filter *filt, intptr_t);
|
||||
//DEADWOOD: struct knote * knote_get_by_data(struct filter *filt, intptr_t);
|
||||
struct knote * knote_new(void);
|
||||
void knote_release(struct filter *, struct knote *);
|
||||
void knote_retain(struct knote *);
|
||||
void knote_free_all(struct filter *);
|
||||
void knote_insert(struct filter *, struct knote *);
|
||||
int knote_delete(struct filter *, struct knote *);
|
||||
int knote_init(void);
|
||||
int knote_disable(struct filter *, struct knote *);
|
||||
#if ! SERIALIZE_KEVENT
|
||||
|
@ -173,8 +173,9 @@ linux_kevent_copyout(struct kqueue *kq, int nready,
|
||||
*/
|
||||
if (eventlist->flags & EV_DISPATCH)
|
||||
knote_disable(filt, kn); //TODO: Error checking
|
||||
//^^^^^^^^ FIXME, shouldn't this need the lock held?
|
||||
if (eventlist->flags & EV_ONESHOT)
|
||||
knote_release(filt, kn); //TODO: Error checking
|
||||
knote_delete(filt, kn); //TODO: Error checking
|
||||
|
||||
/* If an empty kevent structure is returned, the event is discarded. */
|
||||
/* TODO: add these semantics to windows + solaris platform.c */
|
||||
@ -290,7 +291,7 @@ linux_get_descriptor_type(struct knote *kn)
|
||||
}
|
||||
if (! S_ISSOCK(sb.st_mode)) {
|
||||
//FIXME: could be a pipe, device file, or other non-regular file
|
||||
kn->flags |= KNFL_REGULAR_FILE;
|
||||
kn->kn_flags |= KNFL_REGULAR_FILE;
|
||||
dbg_printf("fd %d is a regular file\n", (int)kn->kev.ident);
|
||||
return (0);
|
||||
}
|
||||
@ -312,7 +313,7 @@ linux_get_descriptor_type(struct knote *kn)
|
||||
}
|
||||
} else {
|
||||
if (lsock)
|
||||
kn->flags |= KNFL_PASSIVE_SOCKET;
|
||||
kn->kn_flags |= KNFL_PASSIVE_SOCKET;
|
||||
return (0);
|
||||
}
|
||||
}
|
||||
|
@ -39,12 +39,6 @@ extern long int syscall (long int __sysno, ...);
|
||||
#define kqueue_epfd(kq) ((kq)->kq_id)
|
||||
#define filter_epfd(filt) ((filt)->kf_kqueue->kq_id)
|
||||
|
||||
/*
|
||||
* Flags used by knote->flags
|
||||
*/
|
||||
#define KNFL_PASSIVE_SOCKET (0x01) /* Socket is in listen(2) mode */
|
||||
#define KNFL_REGULAR_FILE (0x02) /* File descriptor is a regular file */
|
||||
|
||||
/*
|
||||
* Additional members of struct filter
|
||||
*/
|
||||
|
@ -59,7 +59,7 @@ evfilt_read_copyout(struct kevent *dst, struct knote *src, void *ptr)
|
||||
struct epoll_event * const ev = (struct epoll_event *) ptr;
|
||||
|
||||
/* Special case: for regular files, return the offset from current position to end of file */
|
||||
if (src->flags & KNFL_REGULAR_FILE) {
|
||||
if (src->kn_flags & KNFL_REGULAR_FILE) {
|
||||
memcpy(dst, &src->kev, sizeof(*dst));
|
||||
dst->data = get_eof_offset(src->kev.ident);
|
||||
|
||||
@ -115,7 +115,7 @@ evfilt_read_copyout(struct kevent *dst, struct knote *src, void *ptr)
|
||||
if (ev->events & EPOLLERR)
|
||||
dst->fflags = 1; /* FIXME: Return the actual socket error */
|
||||
|
||||
if (src->flags & KNFL_PASSIVE_SOCKET) {
|
||||
if (src->kn_flags & KNFL_PASSIVE_SOCKET) {
|
||||
/* On return, data contains the length of the
|
||||
socket backlog. This is not available under Linux.
|
||||
*/
|
||||
@ -161,7 +161,7 @@ evfilt_read_knote_create(struct filter *filt, struct knote *kn)
|
||||
ev.data.ptr = kn;
|
||||
|
||||
/* Special case: for regular files, add a surrogate eventfd that is always readable */
|
||||
if (kn->flags & KNFL_REGULAR_FILE) {
|
||||
if (kn->kn_flags & KNFL_REGULAR_FILE) {
|
||||
int evfd;
|
||||
|
||||
kn->kn_epollfd = filter_epfd(filt);
|
||||
@ -200,7 +200,7 @@ evfilt_read_knote_delete(struct filter *filt, struct knote *kn)
|
||||
if (kn->kev.flags & EV_DISABLE)
|
||||
return (0);
|
||||
|
||||
if ((kn->flags & KNFL_REGULAR_FILE && kn->kdata.kn_eventfd != -1) < 0) {
|
||||
if ((kn->kn_flags & KNFL_REGULAR_FILE && kn->kdata.kn_eventfd != -1) < 0) {
|
||||
if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) {
|
||||
dbg_perror("epoll_ctl(2)");
|
||||
return (-1);
|
||||
@ -221,7 +221,7 @@ evfilt_read_knote_enable(struct filter *filt, struct knote *kn)
|
||||
ev.events = kn->data.events;
|
||||
ev.data.ptr = kn;
|
||||
|
||||
if (kn->flags & KNFL_REGULAR_FILE) {
|
||||
if (kn->kn_flags & KNFL_REGULAR_FILE) {
|
||||
if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_ADD, kn->kdata.kn_eventfd, &ev) < 0) {
|
||||
dbg_perror("epoll_ctl(2)");
|
||||
return (-1);
|
||||
@ -235,7 +235,7 @@ evfilt_read_knote_enable(struct filter *filt, struct knote *kn)
|
||||
int
|
||||
evfilt_read_knote_disable(struct filter *filt, struct knote *kn)
|
||||
{
|
||||
if (kn->flags & KNFL_REGULAR_FILE) {
|
||||
if (kn->kn_flags & KNFL_REGULAR_FILE) {
|
||||
if (epoll_ctl(kn->kn_epollfd, EPOLL_CTL_DEL, kn->kdata.kn_eventfd, NULL) < 0) {
|
||||
dbg_perror("epoll_ctl(2)");
|
||||
return (-1);
|
||||
|
@ -66,7 +66,7 @@ evfilt_socket_knote_create(struct filter *filt, struct knote *kn)
|
||||
return (-1);
|
||||
|
||||
/* TODO: return EBADF? */
|
||||
if (kn->flags & KNFL_REGULAR_FILE)
|
||||
if (kn->kn_flags & KNFL_REGULAR_FILE)
|
||||
return (-1);
|
||||
|
||||
/* Convert the kevent into an epoll_event */
|
||||
|
@ -207,7 +207,7 @@ solaris_kevent_copyout(struct kqueue *kq, int nready,
|
||||
if (eventlist->flags & EV_DISPATCH)
|
||||
knote_disable(filt, kn); //TODO: Error checking
|
||||
if (eventlist->flags & EV_ONESHOT)
|
||||
knote_release(filt, kn); //TODO: Error checking
|
||||
knote_delete(filt, kn); //TODO: Error checking
|
||||
|
||||
eventlist++;
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ windows_kevent_copyout(struct kqueue *kq, int nready,
|
||||
if (eventlist->flags & EV_DISPATCH)
|
||||
knote_disable(filt, kn); //TODO: Error checking
|
||||
if (eventlist->flags & EV_ONESHOT)
|
||||
knote_release(filt, kn); //TODO: Error checking
|
||||
knote_delete(filt, kn); //TODO: Error checking
|
||||
|
||||
/* If an empty kevent structure is returned, the event is discarded. */
|
||||
if (fastpath(eventlist->filter != 0)) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user