Author: alc
Date: Sat May 29 17:10:45 2010
New Revision: 208645
URL: http://svn.freebsd.org/changeset/base/208645

Log:
  When I pushed down the page queues lock into pmap_is_modified(), I created
  an ordering dependence: A pmap operation that clears PG_WRITEABLE and calls
  vm_page_dirty() must perform the call first.  Otherwise, pmap_is_modified()
  could return FALSE without acquiring the page queues lock because the page
  is not (currently) writeable, and the caller to pmap_is_modified() might
  believe that the page's dirty field is clear because it has not seen the
  effect of the vm_page_dirty() call.
  
  When I pushed down the page queues lock into pmap_is_modified(), I
  overlooked one place where this ordering dependence is violated:
  pmap_enter().  In a rare situation pmap_enter() can be called to replace a
  dirty mapping to one page with a mapping to another page.  (I say rare
  because replacements generally occur as a result of a copy-on-write fault,
  and so the old page is not dirty.)  This change delays clearing PG_WRITEABLE
  until after vm_page_dirty() has been called.
  
  Fixing the ordering dependency also makes it easy to introduce a small
  optimization: When pmap_enter() used to replace a mapping to one page with a
  mapping to another page, it freed the pv entry for the first mapping and
  later called the pv entry allocator for the new mapping.  Now, pmap_enter()
  attempts to recycle the old pv entry, saving two calls to the pv entry
  allocator.
  
  There is no point in setting PG_WRITEABLE on unmanaged pages, so don't.
  Update a comment to reflect this.
  
  Tidy up the variable declarations at the start of pmap_enter().

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/i386/i386/pmap.c
  head/sys/vm/vm_page.h

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Sat May 29 16:14:02 2010        (r208644)
+++ head/sys/amd64/amd64/pmap.c Sat May 29 17:10:45 2010        (r208645)
@@ -3128,11 +3128,11 @@ void
 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
     vm_prot_t prot, boolean_t wired)
 {
-       vm_paddr_t pa;
        pd_entry_t *pde;
        pt_entry_t *pte;
-       vm_paddr_t opa;
-       pt_entry_t origpte, newpte;
+       pt_entry_t newpte, origpte;
+       pv_entry_t pv;
+       vm_paddr_t opa, pa;
        vm_page_t mpte, om;
        boolean_t invlva;
 
@@ -3200,6 +3200,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                }
                goto validate;
        } 
+
+       pv = NULL;
+
        /*
         * Mapping has changed, invalidate old range and fall through to
         * handle validating new mapping.
@@ -3209,7 +3212,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                        pmap->pm_stats.wired_count--;
                if (origpte & PG_MANAGED) {
                        om = PHYS_TO_VM_PAGE(opa);
-                       pmap_remove_entry(pmap, om, va);
+                       pv = pmap_pvh_remove(&om->md, pmap, va);
                }
                if (mpte != NULL) {
                        mpte->wire_count--;
@@ -3226,9 +3229,13 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
                KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
                    ("pmap_enter: managed mapping within the clean submap"));
-               pmap_insert_entry(pmap, va, m);
+               if (pv == NULL)
+                       pv = get_pv_entry(pmap, FALSE);
+               pv->pv_va = va;
+               TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
                pa |= PG_MANAGED;
-       }
+       } else if (pv != NULL)
+               free_pv_entry(pmap, pv);
 
        /*
         * Increment counters
@@ -3243,7 +3250,8 @@ validate:
        newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
        if ((prot & VM_PROT_WRITE) != 0) {
                newpte |= PG_RW;
-               vm_page_flag_set(m, PG_WRITEABLE);
+               if ((newpte & PG_MANAGED) != 0)
+                       vm_page_flag_set(m, PG_WRITEABLE);
        }
        if ((prot & VM_PROT_EXECUTE) == 0)
                newpte |= pg_nx;
@@ -3278,6 +3286,10 @@ validate:
                                if ((newpte & PG_RW) == 0)
                                        invlva = TRUE;
                        }
+                       if ((origpte & PG_MANAGED) != 0 &&
+                           TAILQ_EMPTY(&om->md.pv_list) &&
+                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
+                               vm_page_flag_clear(om, PG_WRITEABLE);
                        if (invlva)
                                pmap_invalidate_page(pmap, va);
                } else

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Sat May 29 16:14:02 2010        (r208644)
+++ head/sys/i386/i386/pmap.c   Sat May 29 17:10:45 2010        (r208645)
@@ -3257,11 +3257,11 @@ void
 pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
     vm_prot_t prot, boolean_t wired)
 {
-       vm_paddr_t pa;
        pd_entry_t *pde;
        pt_entry_t *pte;
-       vm_paddr_t opa;
-       pt_entry_t origpte, newpte;
+       pt_entry_t newpte, origpte;
+       pv_entry_t pv;
+       vm_paddr_t opa, pa;
        vm_page_t mpte, om;
        boolean_t invlva;
 
@@ -3336,6 +3336,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                }
                goto validate;
        } 
+
+       pv = NULL;
+
        /*
         * Mapping has changed, invalidate old range and fall through to
         * handle validating new mapping.
@@ -3345,7 +3348,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
                        pmap->pm_stats.wired_count--;
                if (origpte & PG_MANAGED) {
                        om = PHYS_TO_VM_PAGE(opa);
-                       pmap_remove_entry(pmap, om, va);
+                       pv = pmap_pvh_remove(&om->md, pmap, va);
                }
                if (mpte != NULL) {
                        mpte->wire_count--;
@@ -3362,9 +3365,13 @@ pmap_enter(pmap_t pmap, vm_offset_t va, 
        if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
                KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
                    ("pmap_enter: managed mapping within the clean submap"));
-               pmap_insert_entry(pmap, va, m);
+               if (pv == NULL)
+                       pv = get_pv_entry(pmap, FALSE);
+               pv->pv_va = va;
+               TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
                pa |= PG_MANAGED;
-       }
+       } else if (pv != NULL)
+               free_pv_entry(pmap, pv);
 
        /*
         * Increment counters
@@ -3379,7 +3386,8 @@ validate:
        newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
        if ((prot & VM_PROT_WRITE) != 0) {
                newpte |= PG_RW;
-               vm_page_flag_set(m, PG_WRITEABLE);
+               if ((newpte & PG_MANAGED) != 0)
+                       vm_page_flag_set(m, PG_WRITEABLE);
        }
 #ifdef PAE
        if ((prot & VM_PROT_EXECUTE) == 0)
@@ -3420,6 +3428,10 @@ validate:
                                if ((prot & VM_PROT_WRITE) == 0)
                                        invlva = TRUE;
                        }
+                       if ((origpte & PG_MANAGED) != 0 &&
+                           TAILQ_EMPTY(&om->md.pv_list) &&
+                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
+                               vm_page_flag_clear(om, PG_WRITEABLE);
                        if (invlva)
                                pmap_invalidate_page(pmap, va);
                } else

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Sat May 29 16:14:02 2010        (r208644)
+++ head/sys/vm/vm_page.h       Sat May 29 17:10:45 2010        (r208645)
@@ -219,8 +219,8 @@ extern struct vpglocks pa_lock[];
  *      pte mappings, nor can they be removed from their objects via 
  *      the object, and such pages are also not on any PQ queue.
  *
- * PG_WRITEABLE is set exclusively by pmap_enter().  When it does so, the page
- * must be VPO_BUSY.
+ * PG_WRITEABLE is set exclusively on managed pages by pmap_enter().  When it
+ * does so, the page must be VPO_BUSY.
  */
 #define        PG_CACHED       0x0001          /* page is cached */
 #define        PG_FREE         0x0002          /* page is free */
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to