Author: nwhitehorn
Date: Thu Mar 15 19:36:52 2012
New Revision: 233011
URL: http://svn.freebsd.org/changeset/base/233011

Log:
  Improve algorithm for deciding whether to loop through all process pages
  or look them up individually in pmap_remove() and apply the same logic
  in the other ranged operation (pmap_protect). This speeds up make
  installworld by a factor of 2 on powerpc64.
  
  MFC after:    1 week

Modified:
  head/sys/powerpc/aim/mmu_oea64.c

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Thu Mar 15 17:01:25 2012        
(r233010)
+++ head/sys/powerpc/aim/mmu_oea64.c    Thu Mar 15 19:36:52 2012        
(r233011)
@@ -1909,16 +1909,51 @@ moea64_pinit0(mmu_t mmu, pmap_t pm)
 /*
  * Set the physical protection on the specified range of this map as requested.
  */
+static void
+moea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t 
prot)
+{
+       uintptr_t pt;
+
+       /*
+        * Grab the PTE pointer before we diddle with the cached PTE
+        * copy.
+        */
+       LOCK_TABLE();
+       pt = MOEA64_PVO_TO_PTE(mmu, pvo);
+
+       /*
+        * Change the protection of the page.
+        */
+       pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
+       pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
+       pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
+       if ((prot & VM_PROT_EXECUTE) == 0) 
+               pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
+
+       /*
+        * If the PVO is in the page table, update that pte as well.
+        */
+       if (pt != -1) {
+               MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
+                   pvo->pvo_vpn);
+               if ((pvo->pvo_pte.lpte.pte_lo & 
+                   (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
+                       moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
+                           pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
+                           PAGE_SIZE);
+               }
+       }
+       UNLOCK_TABLE();
+}
+
 void
 moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
     vm_prot_t prot)
 {
-       struct  pvo_entry *pvo;
-       uintptr_t pt;
-
-       CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 
sva,
-           eva, prot);
+       struct  pvo_entry *pvo, *tpvo;
 
+       CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
+           sva, eva, prot);
 
        KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
            ("moea64_protect: non current pmap"));
@@ -1930,41 +1965,18 @@ moea64_protect(mmu_t mmu, pmap_t pm, vm_
 
        vm_page_lock_queues();
        PMAP_LOCK(pm);
-       for (; sva < eva; sva += PAGE_SIZE) {
-               pvo = moea64_pvo_find_va(pm, sva);
-               if (pvo == NULL)
-                       continue;
-
-               /*
-                * Grab the PTE pointer before we diddle with the cached PTE
-                * copy.
-                */
-               LOCK_TABLE();
-               pt = MOEA64_PVO_TO_PTE(mmu, pvo);
-
-               /*
-                * Change the protection of the page.
-                */
-               pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
-               pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
-               pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
-               if ((prot & VM_PROT_EXECUTE) == 0) 
-                       pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
-
-               /*
-                * If the PVO is in the page table, update that pte as well.
-                */
-               if (pt != -1) {
-                       MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
-                           pvo->pvo_vpn);
-                       if ((pvo->pvo_pte.lpte.pte_lo & 
-                           (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
-                               moea64_syncicache(mmu, pm, sva,
-                                   pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
-                                   PAGE_SIZE);
-                       }
+       if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
+               for (; sva < eva; sva += PAGE_SIZE) {
+                       pvo = moea64_pvo_find_va(pm, sva);
+                       if (pvo != NULL)
+                               moea64_pvo_protect(mmu, pm, pvo, prot);
+               }
+       } else {
+               LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) {
+                       if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
+                               continue;
+                       moea64_pvo_protect(mmu, pm, pvo, prot);
                }
-               UNLOCK_TABLE();
        }
        vm_page_unlock_queues();
        PMAP_UNLOCK(pm);
@@ -2041,9 +2053,15 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_o
 {
        struct  pvo_entry *pvo, *tpvo;
 
+       /*
+        * Perform an unsynchronized read.  This is, however, safe.
+        */
+       if (pm->pm_stats.resident_count == 0)
+               return;
+
        vm_page_lock_queues();
        PMAP_LOCK(pm);
-       if ((eva - sva)/PAGE_SIZE < 10) {
+       if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) {
                for (; sva < eva; sva += PAGE_SIZE) {
                        pvo = moea64_pvo_find_va(pm, sva);
                        if (pvo != NULL)
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to