Author: kib
Date: Mon Apr  2 11:27:20 2012
New Revision: 233775
URL: http://svn.freebsd.org/changeset/base/233775

Log:
  MFC r233168:
  If we ever allow for managed fictitious pages, the pages shall be
  excluded from superpage promotions.  At least one of the reason is
  that pv_table is sized for non-fictitious pages only.
  
  Consistently check for the page to be non-fictitious before accesing
  superpage pv list.
  
  MFC note: erronous chunks from r233168 which were fixed in r233185
  are not included into the merge.

Modified:
  stable/9/sys/amd64/amd64/pmap.c
  stable/9/sys/i386/i386/pmap.c
Directory Properties:
  stable/9/sys/   (props changed)

Modified: stable/9/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/9/sys/amd64/amd64/pmap.c     Mon Apr  2 10:50:42 2012        
(r233774)
+++ stable/9/sys/amd64/amd64/pmap.c     Mon Apr  2 11:27:20 2012        
(r233775)
@@ -2338,7 +2338,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t
 
        mtx_assert(&vm_page_queue_mtx, MA_OWNED);
        pmap_pvh_free(&m->md, pmap, va);
-       if (TAILQ_EMPTY(&m->md.pv_list)) {
+       if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
                pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
                if (TAILQ_EMPTY(&pvh->pv_list))
                        vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -2799,6 +2799,8 @@ pmap_remove_all(vm_page_t m)
            ("pmap_remove_all: page %p is not managed", m));
        free = NULL;
        vm_page_lock_queues();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
                pmap = PV_PMAP(pv);
@@ -2808,6 +2810,7 @@ pmap_remove_all(vm_page_t m)
                (void)pmap_demote_pde(pmap, pde, va);
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -3286,7 +3289,8 @@ validate:
                        }
                        if ((origpte & PG_MANAGED) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list) &&
-                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
+                           ((om->flags & PG_FICTITIOUS) != 0 ||
+                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
                                vm_page_aflag_clear(om, PGA_WRITEABLE);
                        if (invlva)
                                pmap_invalidate_page(pmap, va);
@@ -3299,7 +3303,8 @@ validate:
         * populated, then attempt promotion.
         */
        if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
-           pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
+           pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+           vm_reserv_level_iffullpop(m) == 0)
                pmap_promote_pde(pmap, pde, va);
 
        vm_page_unlock_queues();
@@ -3919,7 +3924,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
                if (loops >= 16)
                        break;
        }
-       if (!rv && loops < 16) {
+       if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
                pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
                TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
                        if (PV_PMAP(pv) == pmap) {
@@ -3951,7 +3956,10 @@ pmap_page_wired_mappings(vm_page_t m)
                return (count);
        vm_page_lock_queues();
        count = pmap_pvh_wired_mappings(&m->md, count);
-       count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
+       if ((m->flags & PG_FICTITIOUS) == 0) {
+           count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
+               count);
+       }
        vm_page_unlock_queues();
        return (count);
 }
@@ -3993,7 +4001,8 @@ pmap_page_is_mapped(vm_page_t m)
                return (FALSE);
        vm_page_lock_queues();
        rv = !TAILQ_EMPTY(&m->md.pv_list) ||
-           !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
+           ((m->flags & PG_FICTITIOUS) == 0 &&
+           !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
        vm_page_unlock_queues();
        return (rv);
 }
@@ -4066,9 +4075,10 @@ pmap_remove_pages(pmap_t pmap)
                                    m, (uintmax_t)m->phys_addr,
                                    (uintmax_t)tpte));
 
-                               KASSERT(m < &vm_page_array[vm_page_array_size],
-                                       ("pmap_remove_pages: bad tpte %#jx",
-                                       (uintmax_t)tpte));
+                               KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+                                   m < &vm_page_array[vm_page_array_size],
+                                   ("pmap_remove_pages: bad tpte %#jx",
+                                   (uintmax_t)tpte));
 
                                pte_clear(pte);
 
@@ -4110,7 +4120,8 @@ pmap_remove_pages(pmap_t pmap)
                                } else {
                                        pmap_resident_count_dec(pmap, 1);
                                        TAILQ_REMOVE(&m->md.pv_list, pv, 
pv_list);
-                                       if (TAILQ_EMPTY(&m->md.pv_list)) {
+                                       if (TAILQ_EMPTY(&m->md.pv_list) &&
+                                           (m->flags & PG_FICTITIOUS) == 0) {
                                                pvh = 
pa_to_pvh(VM_PAGE_TO_PHYS(m));
                                                if (TAILQ_EMPTY(&pvh->pv_list))
                                                        vm_page_aflag_clear(m, 
PGA_WRITEABLE);
@@ -4161,7 +4172,8 @@ pmap_is_modified(vm_page_t m)
                return (FALSE);
        vm_page_lock_queues();
        rv = pmap_is_modified_pvh(&m->md) ||
-           pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+           ((m->flags & PG_FICTITIOUS) == 0 &&
+           pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
        vm_page_unlock_queues();
        return (rv);
 }
@@ -4232,7 +4244,8 @@ pmap_is_referenced(vm_page_t m)
            ("pmap_is_referenced: page %p is not managed", m));
        vm_page_lock_queues();
        rv = pmap_is_referenced_pvh(&m->md) ||
-           pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+           ((m->flags & PG_FICTITIOUS) == 0 &&
+           pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
        vm_page_unlock_queues();
        return (rv);
 }
@@ -4289,6 +4302,8 @@ pmap_remove_write(vm_page_t m)
            (m->aflags & PGA_WRITEABLE) == 0)
                return;
        vm_page_lock_queues();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
                pmap = PV_PMAP(pv);
@@ -4299,6 +4314,7 @@ pmap_remove_write(vm_page_t m)
                        (void)pmap_demote_pde(pmap, pde, va);
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -4347,8 +4363,10 @@ pmap_ts_referenced(vm_page_t m)
 
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_ts_referenced: page %p is not managed", m));
-       pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        vm_page_lock_queues();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
+       pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -4379,6 +4397,7 @@ pmap_ts_referenced(vm_page_t m)
                }
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pvf = pv;
                do {
@@ -4433,6 +4452,8 @@ pmap_clear_modify(vm_page_t m)
        if ((m->aflags & PGA_WRITEABLE) == 0)
                return;
        vm_page_lock_queues();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
                pmap = PV_PMAP(pv);
@@ -4465,6 +4486,7 @@ pmap_clear_modify(vm_page_t m)
                }
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -4499,6 +4521,8 @@ pmap_clear_reference(vm_page_t m)
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("pmap_clear_reference: page %p is not managed", m));
        vm_page_lock_queues();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
                pmap = PV_PMAP(pv);
@@ -4522,6 +4546,7 @@ pmap_clear_reference(vm_page_t m)
                }
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);

Modified: stable/9/sys/i386/i386/pmap.c
==============================================================================
--- stable/9/sys/i386/i386/pmap.c       Mon Apr  2 10:50:42 2012        
(r233774)
+++ stable/9/sys/i386/i386/pmap.c       Mon Apr  2 11:27:20 2012        
(r233775)
@@ -2459,7 +2459,7 @@ pmap_remove_entry(pmap_t pmap, vm_page_t
 
        mtx_assert(&vm_page_queue_mtx, MA_OWNED);
        pmap_pvh_free(&m->md, pmap, va);
-       if (TAILQ_EMPTY(&m->md.pv_list)) {
+       if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) {
                pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
                if (TAILQ_EMPTY(&pvh->pv_list))
                        vm_page_aflag_clear(m, PGA_WRITEABLE);
@@ -2933,6 +2933,8 @@ pmap_remove_all(vm_page_t m)
        free = NULL;
        vm_page_lock_queues();
        sched_pin();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
                va = pv->pv_va;
@@ -2942,6 +2944,7 @@ pmap_remove_all(vm_page_t m)
                (void)pmap_demote_pde(pmap, pde, va);
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -3457,7 +3460,8 @@ validate:
                        }
                        if ((origpte & PG_MANAGED) != 0 &&
                            TAILQ_EMPTY(&om->md.pv_list) &&
-                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))
+                           ((om->flags & PG_FICTITIOUS) != 0 ||
+                           TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
                                vm_page_aflag_clear(om, PGA_WRITEABLE);
                        if (invlva)
                                pmap_invalidate_page(pmap, va);
@@ -3470,7 +3474,8 @@ validate:
         * populated, then attempt promotion.
         */
        if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
-           pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
+           pg_ps_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
+           vm_reserv_level_iffullpop(m) == 0)
                pmap_promote_pde(pmap, pde, va);
 
        sched_unpin();
@@ -4110,7 +4115,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_p
                if (loops >= 16)
                        break;
        }
-       if (!rv && loops < 16) {
+       if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
                pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
                TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
                        if (PV_PMAP(pv) == pmap) {
@@ -4142,7 +4147,10 @@ pmap_page_wired_mappings(vm_page_t m)
                return (count);
        vm_page_lock_queues();
        count = pmap_pvh_wired_mappings(&m->md, count);
-       count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count);
+       if ((m->flags & PG_FICTITIOUS) == 0) {
+           count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)),
+               count);
+       }
        vm_page_unlock_queues();
        return (count);
 }
@@ -4186,7 +4194,8 @@ pmap_page_is_mapped(vm_page_t m)
                return (FALSE);
        vm_page_lock_queues();
        rv = !TAILQ_EMPTY(&m->md.pv_list) ||
-           !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list);
+           ((m->flags & PG_FICTITIOUS) == 0 &&
+           !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list));
        vm_page_unlock_queues();
        return (rv);
 }
@@ -4259,9 +4268,10 @@ pmap_remove_pages(pmap_t pmap)
                                    m, (uintmax_t)m->phys_addr,
                                    (uintmax_t)tpte));
 
-                               KASSERT(m < &vm_page_array[vm_page_array_size],
-                                       ("pmap_remove_pages: bad tpte %#jx",
-                                       (uintmax_t)tpte));
+                               KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+                                   m < &vm_page_array[vm_page_array_size],
+                                   ("pmap_remove_pages: bad tpte %#jx",
+                                   (uintmax_t)tpte));
 
                                pte_clear(pte);
 
@@ -4303,7 +4313,8 @@ pmap_remove_pages(pmap_t pmap)
                                } else {
                                        pmap->pm_stats.resident_count--;
                                        TAILQ_REMOVE(&m->md.pv_list, pv, 
pv_list);
-                                       if (TAILQ_EMPTY(&m->md.pv_list)) {
+                                       if (TAILQ_EMPTY(&m->md.pv_list) &&
+                                           (m->flags & PG_FICTITIOUS) == 0) {
                                                pvh = 
pa_to_pvh(VM_PAGE_TO_PHYS(m));
                                                if (TAILQ_EMPTY(&pvh->pv_list))
                                                        vm_page_aflag_clear(m, 
PGA_WRITEABLE);
@@ -4356,7 +4367,8 @@ pmap_is_modified(vm_page_t m)
                return (FALSE);
        vm_page_lock_queues();
        rv = pmap_is_modified_pvh(&m->md) ||
-           pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+           ((m->flags & PG_FICTITIOUS) == 0 &&
+           pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
        vm_page_unlock_queues();
        return (rv);
 }
@@ -4429,7 +4441,8 @@ pmap_is_referenced(vm_page_t m)
            ("pmap_is_referenced: page %p is not managed", m));
        vm_page_lock_queues();
        rv = pmap_is_referenced_pvh(&m->md) ||
-           pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)));
+           ((m->flags & PG_FICTITIOUS) == 0 &&
+           pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
        vm_page_unlock_queues();
        return (rv);
 }
@@ -4489,6 +4502,8 @@ pmap_remove_write(vm_page_t m)
                return;
        vm_page_lock_queues();
        sched_pin();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
                va = pv->pv_va;
@@ -4499,6 +4514,7 @@ pmap_remove_write(vm_page_t m)
                        (void)pmap_demote_pde(pmap, pde, va);
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -4556,6 +4572,8 @@ pmap_ts_referenced(vm_page_t m)
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        vm_page_lock_queues();
        sched_pin();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
                va = pv->pv_va;
                pmap = PV_PMAP(pv);
@@ -4586,6 +4604,7 @@ pmap_ts_referenced(vm_page_t m)
                }
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
                pvf = pv;
                do {
@@ -4642,6 +4661,8 @@ pmap_clear_modify(vm_page_t m)
                return;
        vm_page_lock_queues();
        sched_pin();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
                va = pv->pv_va;
@@ -4679,6 +4700,7 @@ pmap_clear_modify(vm_page_t m)
                }
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
@@ -4720,6 +4742,8 @@ pmap_clear_reference(vm_page_t m)
            ("pmap_clear_reference: page %p is not managed", m));
        vm_page_lock_queues();
        sched_pin();
+       if ((m->flags & PG_FICTITIOUS) != 0)
+               goto small_mappings;
        pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
                va = pv->pv_va;
@@ -4743,6 +4767,7 @@ pmap_clear_reference(vm_page_t m)
                }
                PMAP_UNLOCK(pmap);
        }
+small_mappings:
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
                pmap = PV_PMAP(pv);
                PMAP_LOCK(pmap);
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to