sched: print_rq(): Don't use tasklist_lock

read_lock_irqsave(tasklist_lock) in print_rq() looks strange. We do
not need to disable irqs, and they are already disabled by the caller.

And afaics this lock buys nothing, we can rely on rcu_read_lock().
In this case it makes sense to also move rcu_read_lock/unlock from
the caller to print_rq().

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Kirill Tkhai <tkhai@yandex.ru>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20140921193341.GA28628@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Oleg Nesterov 2014-09-21 21:33:41 +02:00 committed by Ingo Molnar
parent 3472eaa1f1
commit 5bd96ab6fe

View File

@ -150,7 +150,6 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
{
struct task_struct *g, *p;
unsigned long flags;
SEQ_printf(m,
"\nrunnable tasks:\n"
@ -159,14 +158,14 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
"------------------------------------------------------"
"----------------------------------------------------\n");
read_lock_irqsave(&tasklist_lock, flags);
rcu_read_lock();
for_each_process_thread(g, p) {
if (task_cpu(p) != rq_cpu)
continue;
print_task(m, rq, p);
}
read_unlock_irqrestore(&tasklist_lock, flags);
rcu_read_unlock();
}
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
@ -331,9 +330,7 @@ do { \
print_cfs_stats(m, cpu);
print_rt_stats(m, cpu);
rcu_read_lock();
print_rq(m, rq, cpu);
rcu_read_unlock();
spin_unlock_irqrestore(&sched_debug_lock, flags);
SEQ_printf(m, "\n");
}