mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-12 12:22:42 +00:00
f3c9d38a26
This looks like a lot of change, but in fact its not. Mostly its things moving from one file to another. The change is just that instead of queuing lock completions and callbacks from the DLM we now pass them directly to GFS2. This gives us a net loss of two list heads per glock (a fair saving in memory) plus a reduction in the latency of delivering the messages to GFS2, plus we now have one thread fewer as well. There was a bug where callbacks and completions could be delivered in the wrong order due to this unnecessary queuing which is fixed by this patch. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Bob Peterson <rpeterso@redhat.com>
88 lines
1.8 KiB
C
88 lines
1.8 KiB
C
/*
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
* Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
|
* of the GNU General Public License version 2.
|
|
*/
|
|
|
|
#include "lock_dlm.h"
|
|
|
|
static inline int no_work(struct gdlm_ls *ls)
|
|
{
|
|
int ret;
|
|
|
|
spin_lock(&ls->async_lock);
|
|
ret = list_empty(&ls->submit);
|
|
spin_unlock(&ls->async_lock);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline int check_drop(struct gdlm_ls *ls)
|
|
{
|
|
if (!ls->drop_locks_count)
|
|
return 0;
|
|
|
|
if (time_after(jiffies, ls->drop_time + ls->drop_locks_period * HZ)) {
|
|
ls->drop_time = jiffies;
|
|
if (ls->all_locks_count >= ls->drop_locks_count)
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static int gdlm_thread(void *data)
|
|
{
|
|
struct gdlm_ls *ls = (struct gdlm_ls *) data;
|
|
struct gdlm_lock *lp = NULL;
|
|
|
|
while (!kthread_should_stop()) {
|
|
wait_event_interruptible(ls->thread_wait,
|
|
!no_work(ls) || kthread_should_stop());
|
|
|
|
spin_lock(&ls->async_lock);
|
|
|
|
if (!list_empty(&ls->submit)) {
|
|
lp = list_entry(ls->submit.next, struct gdlm_lock,
|
|
delay_list);
|
|
list_del_init(&lp->delay_list);
|
|
spin_unlock(&ls->async_lock);
|
|
gdlm_do_lock(lp);
|
|
spin_lock(&ls->async_lock);
|
|
}
|
|
/* Does this ever happen these days? I hope not anyway */
|
|
if (check_drop(ls)) {
|
|
spin_unlock(&ls->async_lock);
|
|
ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
|
|
spin_lock(&ls->async_lock);
|
|
}
|
|
spin_unlock(&ls->async_lock);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int gdlm_init_threads(struct gdlm_ls *ls)
|
|
{
|
|
struct task_struct *p;
|
|
int error;
|
|
|
|
p = kthread_run(gdlm_thread, ls, "lock_dlm");
|
|
error = IS_ERR(p);
|
|
if (error) {
|
|
log_error("can't start lock_dlm thread %d", error);
|
|
return error;
|
|
}
|
|
ls->thread = p;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void gdlm_release_threads(struct gdlm_ls *ls)
|
|
{
|
|
kthread_stop(ls->thread);
|
|
}
|
|
|