Author: kib
Date: Wed Oct 16 07:09:15 2019
New Revision: 353622
URL: https://svnweb.freebsd.org/changeset/base/353622

Log:
  Fix assert in PowerPC pmaps after introduction of object busy.
  
  The VM_PAGE_OBJECT_BUSY_ASSERT() in pmap_enter() implementation should
  be only asserted when the code is executed as result of pmap_enter(),
  not when the same code is entered from e.g. pmap_enter_quick().  This
  is relevant for all PowerPC pmap variants, because mmu_*_enter() is
  used as the backend, and assert is located there.
  
  Add a PowerPC private pmap_enter() PMAP_ENTER_QUICK_LOCKED flag to
  indicate that the call is not from pmap_enter().  For non-quick-locked
  calls, assert that the object is locked.
  
  Reported and tested by:       bdragon
  Reviewed by:  alc, bdragon, markj
  Sponsored by: The FreeBSD Foundation
  Differential revision:        https://reviews.freebsd.org/D22041

Modified:
  head/sys/powerpc/aim/mmu_oea.c
  head/sys/powerpc/aim/mmu_oea64.c
  head/sys/powerpc/booke/pmap.c
  head/sys/powerpc/include/pmap.h

Modified: head/sys/powerpc/aim/mmu_oea.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea.c      Wed Oct 16 07:09:00 2019        
(r353621)
+++ head/sys/powerpc/aim/mmu_oea.c      Wed Oct 16 07:09:15 2019        
(r353622)
@@ -1149,8 +1149,12 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page
        if (pmap_bootstrapped)
                rw_assert(&pvh_global_lock, RA_WLOCKED);
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-       if ((m->oflags & VPO_UNMANAGED) == 0)
-               VM_PAGE_OBJECT_BUSY_ASSERT(m);
+       if ((m->oflags & VPO_UNMANAGED) == 0) {
+               if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
+                       VM_PAGE_OBJECT_BUSY_ASSERT(m);
+               else
+                       VM_OBJECT_ASSERT_LOCKED(m->object);
+       }
 
        if ((m->oflags & VPO_UNMANAGED) != 0 || !moea_initialized) {
                pvo_head = &moea_pvo_kunmanaged;
@@ -1218,7 +1222,8 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t st
        PMAP_LOCK(pm);
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea_enter_locked(pm, start + ptoa(diff), m, prot &
-                   (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
+                   (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_QUICK_LOCKED,
+                   0);
                m = TAILQ_NEXT(m, listq);
        }
        rw_wunlock(&pvh_global_lock);
@@ -1233,7 +1238,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va,
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pm);
        moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-           0, 0);
+           PMAP_ENTER_QUICK_LOCKED, 0);
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pm);
 }

Modified: head/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- head/sys/powerpc/aim/mmu_oea64.c    Wed Oct 16 07:09:00 2019        
(r353621)
+++ head/sys/powerpc/aim/mmu_oea64.c    Wed Oct 16 07:09:15 2019        
(r353622)
@@ -1406,8 +1406,12 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, v
        uint64_t        pte_lo;
        int             error;
 
-       if ((m->oflags & VPO_UNMANAGED) == 0)
-               VM_PAGE_OBJECT_BUSY_ASSERT(m);
+       if ((m->oflags & VPO_UNMANAGED) == 0) {
+               if ((flags & PMAP_ENTER_QUICK_LOCKED) == 0)
+                       VM_PAGE_OBJECT_BUSY_ASSERT(m);
+               else
+                       VM_OBJECT_ASSERT_LOCKED(m->object);
+       }
 
        pvo = alloc_pvo_entry(0);
        if (pvo == NULL)
@@ -1548,7 +1552,8 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t 
        m = m_start;
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
-                   (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
+                   (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
+                   PMAP_ENTER_QUICK_LOCKED, 0);
                m = TAILQ_NEXT(m, listq);
        }
 }
@@ -1559,7 +1564,7 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t v
 {
 
        moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-           PMAP_ENTER_NOSLEEP, 0);
+           PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
 }
 
 vm_paddr_t

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c       Wed Oct 16 07:09:00 2019        
(r353621)
+++ head/sys/powerpc/booke/pmap.c       Wed Oct 16 07:09:15 2019        
(r353622)
@@ -2278,8 +2278,12 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offs
                KASSERT((va <= VM_MAXUSER_ADDRESS),
                    ("mmu_booke_enter_locked: user pmap, non user va"));
        }
-       if ((m->oflags & VPO_UNMANAGED) == 0)
-               VM_PAGE_OBJECT_BUSY_ASSERT(m);
+       if ((m->oflags & VPO_UNMANAGED) == 0) {
+               if ((pmap_flags & PMAP_ENTER_QUICK_LOCKED) == 0)
+                       VM_PAGE_OBJECT_BUSY_ASSERT(m);
+               else
+                       VM_OBJECT_ASSERT_LOCKED(m->object);
+       }
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
 
@@ -2447,7 +2451,7 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offs
        while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
                mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
                    prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-                   PMAP_ENTER_NOSLEEP, 0);
+                   PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
                m = TAILQ_NEXT(m, listq);
        }
        rw_wunlock(&pvh_global_lock);
@@ -2462,8 +2466,8 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offse
        rw_wlock(&pvh_global_lock);
        PMAP_LOCK(pmap);
        mmu_booke_enter_locked(mmu, pmap, va, m,
-           prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
-           0);
+           prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP |
+           PMAP_ENTER_QUICK_LOCKED, 0);
        rw_wunlock(&pvh_global_lock);
        PMAP_UNLOCK(pmap);
 }

Modified: head/sys/powerpc/include/pmap.h
==============================================================================
--- head/sys/powerpc/include/pmap.h     Wed Oct 16 07:09:00 2019        
(r353621)
+++ head/sys/powerpc/include/pmap.h     Wed Oct 16 07:09:15 2019        
(r353622)
@@ -80,6 +80,8 @@
 struct pmap;
 typedef struct pmap *pmap_t;
 
+#define        PMAP_ENTER_QUICK_LOCKED 0x10000000
+
 #if !defined(NPMAPS)
 #define        NPMAPS          32768
 #endif /* !defined(NPMAPS) */
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to