Per Documentation/kprobes.txt, we don't necessarily need to disable
interrupts before invoking the kprobe handlers. Masami submitted
similar changes for x86 via commit a19b2e3d783964 ("kprobes/x86: Remove
IRQ disabling from ftrace-based/optimized kprobes"). Do the same for
powerpc.

Signed-off-by: Naveen N. Rao <naveen.n....@linux.vnet.ibm.com>
---
 arch/powerpc/kernel/kprobes-ftrace.c | 10 ++--------
 arch/powerpc/kernel/optprobes.c      | 10 ----------
 2 files changed, 2 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/kernel/kprobes-ftrace.c 
b/arch/powerpc/kernel/kprobes-ftrace.c
index 4b1f34f685b1..7a1f99f1b47f 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -75,11 +75,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long 
parent_nip,
 {
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
-       unsigned long flags;
 
-       /* Disable irq for emulating a breakpoint and avoiding preempt */
-       local_irq_save(flags);
-       hard_irq_disable();
        preempt_disable();
 
        p = get_kprobe((kprobe_opcode_t *)nip);
@@ -105,16 +101,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned 
long parent_nip,
                else {
                        /*
                         * If pre_handler returns !0, it sets regs->nip and
-                        * resets current kprobe. In this case, we still need
-                        * to restore irq, but not preemption.
+                        * resets current kprobe. In this case, we should not
+                        * re-enable preemption.
                         */
-                       local_irq_restore(flags);
                        return;
                }
        }
 end:
        preempt_enable_no_resched();
-       local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(kprobe_ftrace_handler);
 
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 60ba7f1370a8..8237884ca389 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -115,14 +115,10 @@ static unsigned long can_optimize(struct kprobe *p)
 static void optimized_callback(struct optimized_kprobe *op,
                               struct pt_regs *regs)
 {
-       unsigned long flags;
-
        /* This is possible if op is under delayed unoptimizing */
        if (kprobe_disabled(&op->kp))
                return;
 
-       local_irq_save(flags);
-       hard_irq_disable();
        preempt_disable();
 
        if (kprobe_running()) {
@@ -135,13 +131,7 @@ static void optimized_callback(struct optimized_kprobe *op,
                __this_cpu_write(current_kprobe, NULL);
        }
 
-       /*
-        * No need for an explicit __hard_irq_enable() here.
-        * local_irq_restore() will re-enable interrupts,
-        * if they were hard disabled.
-        */
        preempt_enable_no_resched();
-       local_irq_restore(flags);
 }
 NOKPROBE_SYMBOL(optimized_callback);
 
-- 
2.14.2

Reply via email to