This patch enable the speculative page fault on the PowerPC
architecture.

This will try a speculative page fault without holding the mmap_sem,
if it returns with VM_FAULT_RETRY, the mmap_sem is acquired and the
traditional page fault processing is done.

The speculative path is only tried for multithreaded process as there is no
risk of contention on the mmap_sem otherwise.

Signed-off-by: Laurent Dufour <lduf...@linux.vnet.ibm.com>
---
 arch/powerpc/mm/fault.c | 33 +++++++++++++++++++++++++++++++--
 1 file changed, 31 insertions(+), 2 deletions(-)

diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index c01d627e687a..37191147026e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -464,6 +464,26 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
        if (is_exec)
                flags |= FAULT_FLAG_INSTRUCTION;
 
+       if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT)) {
+               fault = handle_speculative_fault(mm, address, flags, &vma);
+               /*
+                * Page fault is done if VM_FAULT_RETRY is not returned.
+                * But if the memory protection keys are active, we don't know
+                * if the fault is due to key mistmatch or due to a
+                * classic protection check.
+                * To differentiate that, we will need the VMA we no
+                * more have, so let's retry with the mmap_sem held.
+                */
+               if (fault != VM_FAULT_RETRY &&
+                   (IS_ENABLED(CONFIG_PPC_MEM_KEYS) &&
+                    fault != VM_FAULT_SIGSEGV)) {
+                       perf_sw_event(PERF_COUNT_SW_SPF, 1, regs, address);
+                       goto done;
+               }
+       } else {
+               vma = NULL;
+       }
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -494,7 +514,8 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
                might_sleep();
        }
 
-       vma = find_vma(mm, address);
+       if (!vma || !can_reuse_spf_vma(vma, address))
+               vma = find_vma(mm, address);
        if (unlikely(!vma))
                return bad_area(regs, address);
        if (likely(vma->vm_start <= address))
@@ -551,8 +572,15 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
                         */
                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
                        flags |= FAULT_FLAG_TRIED;
-                       if (!fatal_signal_pending(current))
+                       if (!fatal_signal_pending(current)) {
+                               /*
+                                * Do not try to reuse this vma and fetch it
+                                * again since we will release the mmap_sem.
+                                */
+                               if (IS_ENABLED(CONFIG_SPECULATIVE_PAGE_FAULT))
+                                       vma = NULL;
                                goto retry;
+                       }
                }
 
                /*
@@ -564,6 +592,7 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
 
        up_read(&current->mm->mmap_sem);
 
+done:
        if (unlikely(fault & VM_FAULT_ERROR))
                return mm_fault_error(regs, address, fault);
 
-- 
2.7.4

Reply via email to