Author: alc
Date: Thu Sep  1 15:57:44 2016
New Revision: 305213
URL: https://svnweb.freebsd.org/changeset/base/305213

Log:
  As an optimization to the machine-independent layer, change the machine-
  dependent pmap_ts_referenced() so that it updates the page's dirty field
  if a modified bit is found while counting reference bits.  This
  opportunistic update can be performed at low cost and can eliminate the
  need for some future calls to pmap_is_modified() by the machine-
  independent layer.
  
  Reviewed by:  kib, markj
  MFC after:    3 weeks
  Sponsored by: EMC / Isilon Storage Division
  Differential Revision:        https://reviews.freebsd.org/D7722

Modified:
  head/sys/amd64/amd64/pmap.c
  head/sys/i386/i386/pmap.c

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c Thu Sep  1 15:23:33 2016        (r305212)
+++ head/sys/amd64/amd64/pmap.c Thu Sep  1 15:57:44 2016        (r305213)
@@ -5843,6 +5843,14 @@ safe_to_clear_referenced(pmap_t pmap, pt
  *     should be tested and standardized at some point in the future for
  *     optimal aging of shared pages.
  *
+ *     As an optimization, update the page's dirty field if a modified bit is
+ *     found while counting reference bits.  This opportunistic update can be
+ *     performed at low cost and can eliminate the need for some future calls
+ *     to pmap_is_modified().  However, since this function stops after
+ *     finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ *     dirty pages.  Those dirty pages will only be detected by a future call
+ *     to pmap_is_modified().
+ *
  *     A DI block is not needed within this function, because
  *     invalidations are performed before the PV list lock is
  *     released.
@@ -5855,7 +5863,7 @@ pmap_ts_referenced(vm_page_t m)
        pmap_t pmap;
        struct rwlock *lock;
        pd_entry_t oldpde, *pde;
-       pt_entry_t *pte, PG_A;
+       pt_entry_t *pte, PG_A, PG_M, PG_RW;
        vm_offset_t va;
        vm_paddr_t pa;
        int cleared, md_gen, not_cleared, pvh_gen;
@@ -5890,9 +5898,19 @@ retry:
                        }
                }
                PG_A = pmap_accessed_bit(pmap);
+               PG_M = pmap_modified_bit(pmap);
+               PG_RW = pmap_rw_bit(pmap);
                va = pv->pv_va;
                pde = pmap_pde(pmap, pv->pv_va);
                oldpde = *pde;
+               if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+                       /*
+                        * Although "oldpde" is mapping a 2MB page, because
+                        * this function is called at a 4KB page granularity,
+                        * we only update the 4KB page under test.
+                        */
+                       vm_page_dirty(m);
+               }
                if ((*pde & PG_A) != 0) {
                        /*
                         * Since this reference bit is shared by 512 4KB
@@ -5986,11 +6004,15 @@ small_mappings:
                        }
                }
                PG_A = pmap_accessed_bit(pmap);
+               PG_M = pmap_modified_bit(pmap);
+               PG_RW = pmap_rw_bit(pmap);
                pde = pmap_pde(pmap, pv->pv_va);
                KASSERT((*pde & PG_PS) == 0,
                    ("pmap_ts_referenced: found a 2mpage in page %p's pv list",
                    m));
                pte = pmap_pde_to_pte(pde, pv->pv_va);
+               if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+                       vm_page_dirty(m);
                if ((*pte & PG_A) != 0) {
                        if (safe_to_clear_referenced(pmap, *pte)) {
                                atomic_clear_long(pte, PG_A);

Modified: head/sys/i386/i386/pmap.c
==============================================================================
--- head/sys/i386/i386/pmap.c   Thu Sep  1 15:23:33 2016        (r305212)
+++ head/sys/i386/i386/pmap.c   Thu Sep  1 15:57:44 2016        (r305213)
@@ -4798,6 +4798,14 @@ retry:
  *     XXX: The exact number of bits to check and clear is a matter that
  *     should be tested and standardized at some point in the future for
  *     optimal aging of shared pages.
+ *
+ *     As an optimization, update the page's dirty field if a modified bit is
+ *     found while counting reference bits.  This opportunistic update can be
+ *     performed at low cost and can eliminate the need for some future calls
+ *     to pmap_is_modified().  However, since this function stops after
+ *     finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some
+ *     dirty pages.  Those dirty pages will only be detected by a future call
+ *     to pmap_is_modified().
  */
 int
 pmap_ts_referenced(vm_page_t m)
@@ -4824,6 +4832,14 @@ pmap_ts_referenced(vm_page_t m)
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
                pde = pmap_pde(pmap, pv->pv_va);
+               if ((*pde & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
+                       /*
+                        * Although "*pde" is mapping a 2/4MB page, because
+                        * this function is called at a 4KB page granularity,
+                        * we only update the 4KB page under test.
+                        */
+                       vm_page_dirty(m);
+               }
                if ((*pde & PG_A) != 0) {
                        /*
                         * Since this reference bit is shared by either 1024
@@ -4872,6 +4888,8 @@ small_mappings:
                    ("pmap_ts_referenced: found a 4mpage in page %p's pv list",
                    m));
                pte = pmap_pte_quick(pmap, pv->pv_va);
+               if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
+                       vm_page_dirty(m);
                if ((*pte & PG_A) != 0) {
                        atomic_clear_int((u_int *)pte, PG_A);
                        pmap_invalidate_page(pmap, pv->pv_va);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to