mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-20 08:22:39 +00:00
perf_counter: remove rq->lock usage
Now that all the task runtime clock users are gone, remove the ugly rq->lock usage from perf counters, which solves the nasty deadlock seen when a software task clock counter was read from an NMI overflow context. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090406094518.531137582@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a39d6f2556
commit
849691a6cd
@ -85,8 +85,6 @@ static inline unsigned int kstat_irqs(unsigned int irq)
|
|||||||
/*
|
/*
|
||||||
* Lock/unlock the current runqueue - to extract task statistics:
|
* Lock/unlock the current runqueue - to extract task statistics:
|
||||||
*/
|
*/
|
||||||
extern void curr_rq_lock_irq_save(unsigned long *flags);
|
|
||||||
extern void curr_rq_unlock_irq_restore(unsigned long *flags);
|
|
||||||
extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update);
|
extern unsigned long long __task_delta_exec(struct task_struct *tsk, int update);
|
||||||
extern unsigned long long task_delta_exec(struct task_struct *);
|
extern unsigned long long task_delta_exec(struct task_struct *);
|
||||||
|
|
||||||
|
@ -172,8 +172,7 @@ static void __perf_counter_remove_from_context(void *info)
|
|||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
spin_lock_irqsave(&ctx->lock, flags);
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
|
|
||||||
counter_sched_out(counter, cpuctx, ctx);
|
counter_sched_out(counter, cpuctx, ctx);
|
||||||
|
|
||||||
@ -198,8 +197,7 @@ static void __perf_counter_remove_from_context(void *info)
|
|||||||
perf_max_counters - perf_reserved_percpu);
|
perf_max_counters - perf_reserved_percpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -319,8 +317,7 @@ static void __perf_counter_disable(void *info)
|
|||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
spin_lock_irqsave(&ctx->lock, flags);
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the counter is on, turn it off.
|
* If the counter is on, turn it off.
|
||||||
@ -336,8 +333,7 @@ static void __perf_counter_disable(void *info)
|
|||||||
counter->state = PERF_COUNTER_STATE_OFF;
|
counter->state = PERF_COUNTER_STATE_OFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -515,8 +511,7 @@ static void __perf_install_in_context(void *info)
|
|||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
spin_lock_irqsave(&ctx->lock, flags);
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
update_context_time(ctx);
|
update_context_time(ctx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -565,8 +560,7 @@ static void __perf_install_in_context(void *info)
|
|||||||
unlock:
|
unlock:
|
||||||
hw_perf_restore(perf_flags);
|
hw_perf_restore(perf_flags);
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -641,8 +635,7 @@ static void __perf_counter_enable(void *info)
|
|||||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
spin_lock_irqsave(&ctx->lock, flags);
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
update_context_time(ctx);
|
update_context_time(ctx);
|
||||||
|
|
||||||
counter->prev_state = counter->state;
|
counter->prev_state = counter->state;
|
||||||
@ -678,8 +671,7 @@ static void __perf_counter_enable(void *info)
|
|||||||
}
|
}
|
||||||
|
|
||||||
unlock:
|
unlock:
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -971,7 +963,7 @@ int perf_counter_task_disable(void)
|
|||||||
if (likely(!ctx->nr_counters))
|
if (likely(!ctx->nr_counters))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
local_irq_save(flags);
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
|
|
||||||
perf_counter_task_sched_out(curr, cpu);
|
perf_counter_task_sched_out(curr, cpu);
|
||||||
@ -992,9 +984,7 @@ int perf_counter_task_disable(void)
|
|||||||
|
|
||||||
hw_perf_restore(perf_flags);
|
hw_perf_restore(perf_flags);
|
||||||
|
|
||||||
spin_unlock(&ctx->lock);
|
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||||
|
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1011,7 +1001,7 @@ int perf_counter_task_enable(void)
|
|||||||
if (likely(!ctx->nr_counters))
|
if (likely(!ctx->nr_counters))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
local_irq_save(flags);
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
|
|
||||||
perf_counter_task_sched_out(curr, cpu);
|
perf_counter_task_sched_out(curr, cpu);
|
||||||
@ -1037,7 +1027,7 @@ int perf_counter_task_enable(void)
|
|||||||
|
|
||||||
perf_counter_task_sched_in(curr, cpu);
|
perf_counter_task_sched_in(curr, cpu);
|
||||||
|
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
local_irq_restore(flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1095,12 +1085,12 @@ static void __read(void *info)
|
|||||||
struct perf_counter_context *ctx = counter->ctx;
|
struct perf_counter_context *ctx = counter->ctx;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
curr_rq_lock_irq_save(&flags);
|
local_irq_save(flags);
|
||||||
if (ctx->is_active)
|
if (ctx->is_active)
|
||||||
update_context_time(ctx);
|
update_context_time(ctx);
|
||||||
counter->hw_ops->read(counter);
|
counter->hw_ops->read(counter);
|
||||||
update_counter_times(counter);
|
update_counter_times(counter);
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 perf_counter_read(struct perf_counter *counter)
|
static u64 perf_counter_read(struct perf_counter *counter)
|
||||||
@ -2890,7 +2880,7 @@ __perf_counter_exit_task(struct task_struct *child,
|
|||||||
* Be careful about zapping the list - IRQ/NMI context
|
* Be careful about zapping the list - IRQ/NMI context
|
||||||
* could still be processing it:
|
* could still be processing it:
|
||||||
*/
|
*/
|
||||||
curr_rq_lock_irq_save(&flags);
|
local_irq_save(flags);
|
||||||
perf_flags = hw_perf_save_disable();
|
perf_flags = hw_perf_save_disable();
|
||||||
|
|
||||||
cpuctx = &__get_cpu_var(perf_cpu_context);
|
cpuctx = &__get_cpu_var(perf_cpu_context);
|
||||||
@ -2903,7 +2893,7 @@ __perf_counter_exit_task(struct task_struct *child,
|
|||||||
child_ctx->nr_counters--;
|
child_ctx->nr_counters--;
|
||||||
|
|
||||||
hw_perf_restore(perf_flags);
|
hw_perf_restore(perf_flags);
|
||||||
curr_rq_unlock_irq_restore(&flags);
|
local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
parent_counter = child_counter->parent;
|
parent_counter = child_counter->parent;
|
||||||
|
@ -997,26 +997,6 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void curr_rq_lock_irq_save(unsigned long *flags)
|
|
||||||
__acquires(rq->lock)
|
|
||||||
{
|
|
||||||
struct rq *rq;
|
|
||||||
|
|
||||||
local_irq_save(*flags);
|
|
||||||
rq = cpu_rq(smp_processor_id());
|
|
||||||
spin_lock(&rq->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
void curr_rq_unlock_irq_restore(unsigned long *flags)
|
|
||||||
__releases(rq->lock)
|
|
||||||
{
|
|
||||||
struct rq *rq;
|
|
||||||
|
|
||||||
rq = cpu_rq(smp_processor_id());
|
|
||||||
spin_unlock(&rq->lock);
|
|
||||||
local_irq_restore(*flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
void task_rq_unlock_wait(struct task_struct *p)
|
void task_rq_unlock_wait(struct task_struct *p)
|
||||||
{
|
{
|
||||||
struct rq *rq = task_rq(p);
|
struct rq *rq = task_rq(p);
|
||||||
|
Loading…
Reference in New Issue
Block a user