powerpc/kprobes: Do not disable interrupts for optprobes and kprobes_on_ftrace

Per Documentation/kprobes.txt, we don't necessarily need to disable
interrupts before invoking the kprobe handlers. Masami submitted
similar changes for x86 via commit a19b2e3d78 ("kprobes/x86: Remove
IRQ disabling from ftrace-based/optimized kprobes"). Do the same for
powerpc.

Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Naveen N. Rao 2017-10-23 22:07:39 +05:30 committed by Michael Ellerman
parent 8a2d71a3f2
commit f72180cc93
2 changed files with 2 additions and 18 deletions

View File

@ -75,11 +75,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
{ {
struct kprobe *p; struct kprobe *p;
struct kprobe_ctlblk *kcb; struct kprobe_ctlblk *kcb;
unsigned long flags;
/* Disable irq for emulating a breakpoint and avoiding preempt */
local_irq_save(flags);
hard_irq_disable();
preempt_disable(); preempt_disable();
p = get_kprobe((kprobe_opcode_t *)nip); p = get_kprobe((kprobe_opcode_t *)nip);
@ -105,16 +101,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
else { else {
/* /*
* If pre_handler returns !0, it sets regs->nip and * If pre_handler returns !0, it sets regs->nip and
* resets current kprobe. In this case, we still need * resets current kprobe. In this case, we should not
* to restore irq, but not preemption. * re-enable preemption.
*/ */
local_irq_restore(flags);
return; return;
} }
} }
end: end:
preempt_enable_no_resched(); preempt_enable_no_resched();
local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(kprobe_ftrace_handler); NOKPROBE_SYMBOL(kprobe_ftrace_handler);

View File

@ -115,14 +115,10 @@ static unsigned long can_optimize(struct kprobe *p)
static void optimized_callback(struct optimized_kprobe *op, static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long flags;
/* This is possible if op is under delayed unoptimizing */ /* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(&op->kp)) if (kprobe_disabled(&op->kp))
return; return;
local_irq_save(flags);
hard_irq_disable();
preempt_disable(); preempt_disable();
if (kprobe_running()) { if (kprobe_running()) {
@ -135,13 +131,7 @@ static void optimized_callback(struct optimized_kprobe *op,
__this_cpu_write(current_kprobe, NULL); __this_cpu_write(current_kprobe, NULL);
} }
/*
* No need for an explicit __hard_irq_enable() here.
* local_irq_restore() will re-enable interrupts,
* if they were hard disabled.
*/
preempt_enable_no_resched(); preempt_enable_no_resched();
local_irq_restore(flags);
} }
NOKPROBE_SYMBOL(optimized_callback); NOKPROBE_SYMBOL(optimized_callback);