Copy the prefetch of map_sem from X86_64 and move the check
notify_page_fault (soon to be kprobe_handle_fault) out of
the unlikely if() statement.

This makes the X86_32|64 pagefault handlers closer to each
other.

Signed-off-by: Harvey Harrison <[EMAIL PROTECTED]>
---
Ingo, I'll go back to the zero changes unifications now, just
wanted to send the last few for comment as they will eventually
need to be dealt with.

 arch/x86/mm/fault_32.c |   16 +++++++---------
 arch/x86/mm/fault_64.c |    7 ++-----
 2 files changed, 9 insertions(+), 14 deletions(-)

diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index f8fc240..363267a 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -306,13 +306,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, 
unsigned long error_code)
         */
        trace_hardirqs_fixup();
 
+       tsk = current;
+       mm = tsk->mm;
+       prefetchw(&mm->mmap_sem);
+
        /* get the address */
        address = read_cr2();
 
-       tsk = current;
-
        si_code = SEGV_MAPERR;
 
+       if (notify_page_fault(regs))
+               return;
+
        /*
         * We fault-in kernel-space virtual memory on-demand. The
         * 'reference' page table is init_mm.pgd.
@@ -330,8 +335,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned 
long error_code)
                if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
                    vmalloc_fault(address) >= 0)
                        return;
-               if (notify_page_fault(regs))
-                       return;
                /*
                 * Don't take the mm semaphore here. If we fixup a prefetch
                 * fault we could otherwise deadlock.
@@ -339,16 +342,11 @@ void __kprobes do_page_fault(struct pt_regs *regs, 
unsigned long error_code)
                goto bad_area_nosemaphore;
        }
 
-       if (notify_page_fault(regs))
-               return;
-
        /* It's safe to allow irq's after cr2 has been saved and the vmalloc
           fault has been handled. */
        if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
                local_irq_enable();
 
-       mm = tsk->mm;
-
        /*
         * If we're in an interrupt, have no user context or are running in an
         * atomic region then we must not take the fault.
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 11e9398..128f812 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -366,6 +366,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs 
*regs,
 
        si_code = SEGV_MAPERR;
 
+       if (notify_page_fault(regs))
+               return;
 
        /*
         * We fault-in kernel-space virtual memory on-demand. The
@@ -391,8 +393,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs 
*regs,
                        if (vmalloc_fault(address) >= 0)
                                return;
                }
-               if (notify_page_fault(regs))
-                       return;
                /*
                 * Don't take the mm semaphore here. If we fixup a prefetch
                 * fault we could otherwise deadlock.
@@ -400,9 +400,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs 
*regs,
                goto bad_area_nosemaphore;
        }
 
-       if (notify_page_fault(regs))
-               return;
-
        if (likely(regs->flags & X86_EFLAGS_IF))
                local_irq_enable();
 
-- 
1.5.4.rc2.1164.g6451



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to