Author: alc
Date: Sat Nov  3 23:22:49 2012
New Revision: 242535
URL: http://svn.freebsd.org/changeset/base/242535

Log:
  Replace all uses of the page queues lock by a R/W lock that is private
  to this pmap.
  
  Eliminate two redundant #include's.
  
  Tested by:    marcel

Modified:
  head/sys/powerpc/booke/pmap.c

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Sat Nov  3 23:03:14 2012        
(r242534)
+++ head/sys/powerpc/booke/pmap.c       Sat Nov  3 23:22:49 2012        
(r242535)
@@ -51,7 +51,6 @@
 #include <sys/cdefs.h>
 __FBSDID("$FreeBSD$");
 
-#include <sys/types.h>
 #include <sys/param.h>
 #include <sys/malloc.h>
 #include <sys/ktr.h>
@@ -64,6 +63,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/msgbuf.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
+#include <sys/rwlock.h>
 #include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/vmmeter.h>
@@ -85,7 +85,6 @@ __FBSDID("$FreeBSD$");
 
 #include <machine/tlb.h>
 #include <machine/spr.h>
-#include <machine/vmparam.h>
 #include <machine/md_var.h>
 #include <machine/mmuvar.h>
 #include <machine/pmap.h>
@@ -214,6 +213,8 @@ static inline unsigned int tlb0_tableidx
 /* Page table management */
 /**************************************************************************/
 
+static struct rwlock_padalign pvh_global_lock;
+
 /* Data for the pv entry allocation mechanism */
 static uma_zone_t pvzone;
 static struct vm_object pvzone_obj;
@@ -551,9 +552,9 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsig
                    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
 
                        PMAP_UNLOCK(pmap);
-                       vm_page_unlock_queues();
+                       rw_wunlock(&pvh_global_lock);
                        VM_WAIT;
-                       vm_page_lock_queues();
+                       rw_wlock(&pvh_global_lock);
                        PMAP_LOCK(pmap);
                }
                mtbl[i] = m;
@@ -743,7 +744,7 @@ pv_insert(pmap_t pmap, vm_offset_t va, v
 
        /* add to pv_list */
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
 
        TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
 
@@ -760,7 +761,7 @@ pv_remove(pmap_t pmap, vm_offset_t va, v
        //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, 
(u_int32_t)pmap, va);
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-       mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+       rw_assert(&pvh_global_lock, RA_WLOCKED);
 
        /* find pv entry */
        TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) {
@@ -1239,6 +1240,11 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset
        /* Mark kernel_pmap active on all CPUs */
        CPU_FILL(&kernel_pmap->pm_active);
 
+       /*
+        * Initialize the global pv list lock.
+        */
+       rw_init(&pvh_global_lock, "pmap pv global");
+
        /*******************************************************/
        /* Final setup */
        /*******************************************************/
@@ -1522,10 +1528,10 @@ mmu_booke_enter(mmu_t mmu, pmap_t pmap, 
     vm_prot_t prot, boolean_t wired)
 {
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
 }
 
@@ -1711,14 +1717,14 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t
 
        psize = atop(end - start);
        m = m_start;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
                    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
                m = TAILQ_NEXT(m, listq);
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
 }
 
@@ -1727,11 +1733,11 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t 
     vm_prot_t prot)
 {
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        mmu_booke_enter_locked(mmu, pmap, va, m,
            prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
 }
 
@@ -1768,7 +1774,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap,
        hold_flag = PTBL_HOLD_FLAG(pmap);
        //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag);
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        for (; va < endva; va += PAGE_SIZE) {
                pte = pte_find(mmu, pmap, va);
@@ -1776,7 +1782,7 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap,
                        pte_remove(mmu, pmap, va, hold_flag);
        }
        PMAP_UNLOCK(pmap);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 
        //debugf("mmu_booke_remove: e\n");
 }
@@ -1790,7 +1796,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_
        pv_entry_t pv, pvn;
        uint8_t hold_flag;
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) {
                pvn = TAILQ_NEXT(pv, pv_link);
 
@@ -1800,7 +1806,7 @@ mmu_booke_remove_all(mmu_t mmu, vm_page_
                PMAP_UNLOCK(pv->pv_pmap);
        }
        vm_page_aflag_clear(m, PGA_WRITEABLE);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -1958,7 +1964,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag
        if ((m->oflags & VPO_BUSY) == 0 &&
            (m->aflags & PGA_WRITEABLE) == 0)
                return;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) {
@@ -1982,7 +1988,7 @@ mmu_booke_remove_write(mmu_t mmu, vm_pag
                PMAP_UNLOCK(pv->pv_pmap);
        }
        vm_page_aflag_clear(m, PGA_WRITEABLE);
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 static void
@@ -1998,7 +2004,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t 
        va = trunc_page(va);
        sz = round_page(sz);
 
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        pmap = PCPU_GET(curpmap);
        active = (pm == kernel_pmap || pm == pmap) ? 1 : 0;
        while (sz > 0) {
@@ -2025,7 +2031,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t 
                va += PAGE_SIZE;
                sz -= PAGE_SIZE;
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -2173,7 +2179,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page
        if ((m->oflags & VPO_BUSY) == 0 &&
            (m->aflags & PGA_WRITEABLE) == 0)
                return (rv);
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2185,7 +2191,7 @@ mmu_booke_is_modified(mmu_t mmu, vm_page
                if (rv)
                        break;
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
 }
 
@@ -2214,7 +2220,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_pa
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("mmu_booke_is_referenced: page %p is not managed", m));
        rv = FALSE;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2226,7 +2232,7 @@ mmu_booke_is_referenced(mmu_t mmu, vm_pa
                if (rv)
                        break;
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
 }
 
@@ -2252,7 +2258,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_pag
         */
        if ((m->aflags & PGA_WRITEABLE) == 0)
                return;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2271,7 +2277,7 @@ mmu_booke_clear_modify(mmu_t mmu, vm_pag
                }
                PMAP_UNLOCK(pv->pv_pmap);
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -2294,7 +2300,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_pa
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("mmu_booke_ts_referenced: page %p is not managed", m));
        count = 0;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2317,7 +2323,7 @@ mmu_booke_ts_referenced(mmu_t mmu, vm_pa
                }
                PMAP_UNLOCK(pv->pv_pmap);
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (count);
 }
 
@@ -2332,7 +2338,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_
 
        KASSERT((m->oflags & VPO_UNMANAGED) == 0,
            ("mmu_booke_clear_reference: page %p is not managed", m));
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL &&
@@ -2350,7 +2356,7 @@ mmu_booke_clear_reference(mmu_t mmu, vm_
                }
                PMAP_UNLOCK(pv->pv_pmap);
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
 }
 
 /*
@@ -2395,7 +2401,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, p
            ("mmu_booke_page_exists_quick: page %p is not managed", m));
        loops = 0;
        rv = FALSE;
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                if (pv->pv_pmap == pmap) {
                        rv = TRUE;
@@ -2404,7 +2410,7 @@ mmu_booke_page_exists_quick(mmu_t mmu, p
                if (++loops >= 16)
                        break;
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (rv);
 }
 
@@ -2421,7 +2427,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu,
 
        if ((m->oflags & VPO_UNMANAGED) != 0)
                return (count);
-       vm_page_lock_queues();
+       rw_wlock(&pvh_global_lock);
        TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
                PMAP_LOCK(pv->pv_pmap);
                if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL)
@@ -2429,7 +2435,7 @@ mmu_booke_page_wired_mappings(mmu_t mmu,
                                count++;
                PMAP_UNLOCK(pv->pv_pmap);
        }
-       vm_page_unlock_queues();
+       rw_wunlock(&pvh_global_lock);
        return (count);
 }
 
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to