mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-29 21:05:13 +00:00
rcutorture: Test SRCU cleanup code path
The current rcutorture testing does not do any cleanup operations. This works because the srcu_struct is statically allocated, but it does represent a memory leak of the associated dynamically allocated ->per_cpu_ref per-CPU variables. However, rcutorture currently uses a statically allocated srcu_struct, which cannot legally be passed to cleanup_srcu_struct(). Therefore, this commit adds a second form of srcu (called srcud) that dynamically allocates and frees the associated per-CPU variables. This commit also adds a ->cleanup() member to rcu_torture_ops that is invoked at the end of the test, after ->cb_barriers(). This ->cleanup() pointer is NULL for all existing tests, and thus only used for scrud. Finally, the SRCU-P torture-test configuration selects scrud instead of srcu, with SRCU-N continuing to use srcu, thereby testing both static and dynamic srcu_struct structures. Reported-by: "Ahmed, Iftekhar" <ahmedi@onid.oregonstate.edu> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
6c7ed42c81
commit
ca1d51ed98
@ -241,6 +241,7 @@ rcu_torture_free(struct rcu_torture *p)
|
||||
struct rcu_torture_ops {
|
||||
int ttype;
|
||||
void (*init)(void);
|
||||
void (*cleanup)(void);
|
||||
int (*readlock)(void);
|
||||
void (*read_delay)(struct torture_random_state *rrsp);
|
||||
void (*readunlock)(int idx);
|
||||
@ -477,10 +478,12 @@ static struct rcu_torture_ops rcu_busted_ops = {
|
||||
*/
|
||||
|
||||
DEFINE_STATIC_SRCU(srcu_ctl);
|
||||
static struct srcu_struct srcu_ctld;
|
||||
static struct srcu_struct *srcu_ctlp = &srcu_ctl;
|
||||
|
||||
static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
|
||||
static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
|
||||
{
|
||||
return srcu_read_lock(&srcu_ctl);
|
||||
return srcu_read_lock(srcu_ctlp);
|
||||
}
|
||||
|
||||
static void srcu_read_delay(struct torture_random_state *rrsp)
|
||||
@ -499,49 +502,49 @@ static void srcu_read_delay(struct torture_random_state *rrsp)
|
||||
rcu_read_delay(rrsp);
|
||||
}
|
||||
|
||||
static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
|
||||
static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
|
||||
{
|
||||
srcu_read_unlock(&srcu_ctl, idx);
|
||||
srcu_read_unlock(srcu_ctlp, idx);
|
||||
}
|
||||
|
||||
static unsigned long srcu_torture_completed(void)
|
||||
{
|
||||
return srcu_batches_completed(&srcu_ctl);
|
||||
return srcu_batches_completed(srcu_ctlp);
|
||||
}
|
||||
|
||||
static void srcu_torture_deferred_free(struct rcu_torture *rp)
|
||||
{
|
||||
call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
|
||||
call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
|
||||
}
|
||||
|
||||
static void srcu_torture_synchronize(void)
|
||||
{
|
||||
synchronize_srcu(&srcu_ctl);
|
||||
synchronize_srcu(srcu_ctlp);
|
||||
}
|
||||
|
||||
static void srcu_torture_call(struct rcu_head *head,
|
||||
void (*func)(struct rcu_head *head))
|
||||
{
|
||||
call_srcu(&srcu_ctl, head, func);
|
||||
call_srcu(srcu_ctlp, head, func);
|
||||
}
|
||||
|
||||
static void srcu_torture_barrier(void)
|
||||
{
|
||||
srcu_barrier(&srcu_ctl);
|
||||
srcu_barrier(srcu_ctlp);
|
||||
}
|
||||
|
||||
static void srcu_torture_stats(void)
|
||||
{
|
||||
int cpu;
|
||||
int idx = srcu_ctl.completed & 0x1;
|
||||
int idx = srcu_ctlp->completed & 0x1;
|
||||
|
||||
pr_alert("%s%s per-CPU(idx=%d):",
|
||||
torture_type, TORTURE_FLAG, idx);
|
||||
for_each_possible_cpu(cpu) {
|
||||
long c0, c1;
|
||||
|
||||
c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx];
|
||||
c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx];
|
||||
c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx];
|
||||
c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx];
|
||||
pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
|
||||
}
|
||||
pr_cont("\n");
|
||||
@ -549,7 +552,7 @@ static void srcu_torture_stats(void)
|
||||
|
||||
static void srcu_torture_synchronize_expedited(void)
|
||||
{
|
||||
synchronize_srcu_expedited(&srcu_ctl);
|
||||
synchronize_srcu_expedited(srcu_ctlp);
|
||||
}
|
||||
|
||||
static struct rcu_torture_ops srcu_ops = {
|
||||
@ -569,6 +572,38 @@ static struct rcu_torture_ops srcu_ops = {
|
||||
.name = "srcu"
|
||||
};
|
||||
|
||||
static void srcu_torture_init(void)
|
||||
{
|
||||
rcu_sync_torture_init();
|
||||
WARN_ON(init_srcu_struct(&srcu_ctld));
|
||||
srcu_ctlp = &srcu_ctld;
|
||||
}
|
||||
|
||||
static void srcu_torture_cleanup(void)
|
||||
{
|
||||
cleanup_srcu_struct(&srcu_ctld);
|
||||
srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
|
||||
}
|
||||
|
||||
/* As above, but dynamically allocated. */
|
||||
static struct rcu_torture_ops srcud_ops = {
|
||||
.ttype = SRCU_FLAVOR,
|
||||
.init = srcu_torture_init,
|
||||
.cleanup = srcu_torture_cleanup,
|
||||
.readlock = srcu_torture_read_lock,
|
||||
.read_delay = srcu_read_delay,
|
||||
.readunlock = srcu_torture_read_unlock,
|
||||
.started = NULL,
|
||||
.completed = srcu_torture_completed,
|
||||
.deferred_free = srcu_torture_deferred_free,
|
||||
.sync = srcu_torture_synchronize,
|
||||
.exp_sync = srcu_torture_synchronize_expedited,
|
||||
.call = srcu_torture_call,
|
||||
.cb_barrier = srcu_torture_barrier,
|
||||
.stats = srcu_torture_stats,
|
||||
.name = "srcud"
|
||||
};
|
||||
|
||||
/*
|
||||
* Definitions for sched torture testing.
|
||||
*/
|
||||
@ -1053,7 +1088,7 @@ static void rcu_torture_timer(unsigned long unused)
|
||||
p = rcu_dereference_check(rcu_torture_current,
|
||||
rcu_read_lock_bh_held() ||
|
||||
rcu_read_lock_sched_held() ||
|
||||
srcu_read_lock_held(&srcu_ctl));
|
||||
srcu_read_lock_held(srcu_ctlp));
|
||||
if (p == NULL) {
|
||||
/* Leave because rcu_torture_writer is not yet underway */
|
||||
cur_ops->readunlock(idx);
|
||||
@ -1127,7 +1162,7 @@ rcu_torture_reader(void *arg)
|
||||
p = rcu_dereference_check(rcu_torture_current,
|
||||
rcu_read_lock_bh_held() ||
|
||||
rcu_read_lock_sched_held() ||
|
||||
srcu_read_lock_held(&srcu_ctl));
|
||||
srcu_read_lock_held(srcu_ctlp));
|
||||
if (p == NULL) {
|
||||
/* Wait for rcu_torture_writer to get underway */
|
||||
cur_ops->readunlock(idx);
|
||||
@ -1590,10 +1625,14 @@ rcu_torture_cleanup(void)
|
||||
rcutorture_booster_cleanup(i);
|
||||
}
|
||||
|
||||
/* Wait for all RCU callbacks to fire. */
|
||||
|
||||
/*
|
||||
* Wait for all RCU callbacks to fire, then do flavor-specific
|
||||
* cleanup operations.
|
||||
*/
|
||||
if (cur_ops->cb_barrier != NULL)
|
||||
cur_ops->cb_barrier();
|
||||
if (cur_ops->cleanup != NULL)
|
||||
cur_ops->cleanup();
|
||||
|
||||
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
|
||||
|
||||
@ -1670,8 +1709,8 @@ rcu_torture_init(void)
|
||||
int cpu;
|
||||
int firsterr = 0;
|
||||
static struct rcu_torture_ops *torture_ops[] = {
|
||||
&rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops,
|
||||
RCUTORTURE_TASKS_OPS
|
||||
&rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
|
||||
&sched_ops, RCUTORTURE_TASKS_OPS
|
||||
};
|
||||
|
||||
if (!torture_init_begin(torture_type, verbose, &torture_runnable))
|
||||
|
@ -1 +1 @@
|
||||
rcutorture.torture_type=srcu
|
||||
rcutorture.torture_type=srcud
|
||||
|
Loading…
Reference in New Issue
Block a user