The branch main has been updated by alc:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=0d2f98c2f0928a8ee8446c3e5e0e4fb93f1dd9c5

commit 0d2f98c2f0928a8ee8446c3e5e0e4fb93f1dd9c5
Author:     Alan Cox <a...@freebsd.org>
AuthorDate: 2023-06-17 17:18:33 +0000
Commit:     Alan Cox <a...@freebsd.org>
CommitDate: 2023-06-24 18:09:04 +0000

    amd64 pmap: Tidy up pmap_promote_pde() calls
    
    Since pmap_ps_enabled() is true by default, check it inside of
    pmap_promote_pde() instead of at every call site.
    
    Modify pmap_promote_pde() to return true if the promotion succeeded and
    false otherwise.  Use this return value in a couple places.
    
    Reviewed by:    kib, markj
    Differential Revision:  https://reviews.freebsd.org/D40744
---
 sys/amd64/amd64/pmap.c | 36 ++++++++++++++++++------------------
 1 file changed, 18 insertions(+), 18 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 3cb02a4f9daa..3215a7f8d559 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -436,7 +436,7 @@ pt_entry_t pg_nx;
 static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
     "VM/pmap parameters");
 
-static int pg_ps_enabled = 1;
+static int __read_frequently pg_ps_enabled = 1;
 SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
     &pg_ps_enabled, 0, "Are large page mappings enabled?");
 
@@ -1318,7 +1318,7 @@ static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t 
pa, int mode);
 static vm_page_t pmap_large_map_getptp_unlocked(void);
 static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
 #if VM_NRESERVLEVEL > 0
-static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
+static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
     vm_page_t mpte, struct rwlock **lockp);
 #endif
 static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t 
sva,
@@ -6856,7 +6856,7 @@ pmap_pde_ept_executable(pmap_t pmap, pd_entry_t pde)
  * aligned, contiguous physical memory and (2) the 4KB page mappings must have
  * identical characteristics. 
  */
-static void
+static bool
 pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte,
     struct rwlock **lockp)
 {
@@ -6865,6 +6865,10 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, 
vm_offset_t va, vm_page_t mpte,
        pt_entry_t allpte_PG_A, PG_A, PG_G, PG_M, PG_PKU_MASK, PG_RW, PG_V;
        int PG_PTE_CACHE;
 
+       PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+       if (!pmap_ps_enabled(pmap))
+               return (false);
+
        PG_A = pmap_accessed_bit(pmap);
        PG_G = pmap_global_bit(pmap);
        PG_M = pmap_modified_bit(pmap);
@@ -6873,8 +6877,6 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, 
vm_offset_t va, vm_page_t mpte,
        PG_PKU_MASK = pmap_pku_mask_bit(pmap);
        PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
 
-       PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-
        /*
         * Examine the first PTE in the specified PTP.  Abort if this PTE is
         * ineligible for promotion due to hardware errata, invalid, or does
@@ -6883,12 +6885,12 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, 
vm_offset_t va, vm_page_t mpte,
        firstpte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
        newpde = *firstpte;
        if (!pmap_allow_2m_x_page(pmap, pmap_pde_ept_executable(pmap, newpde)))
-               return;
+               return (false);
        if ((newpde & ((PG_FRAME & PDRMASK) | PG_V)) != PG_V) {
                counter_u64_add(pmap_pde_p_failures, 1);
                CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
                    " in pmap %p", va, pmap);
-               return;
+               return (false);
        }
 
        /*
@@ -6933,7 +6935,7 @@ setpde:
                        counter_u64_add(pmap_pde_p_failures, 1);
                        CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
                            " in pmap %p", va, pmap);
-                       return;
+                       return (false);
                }
 setpte:
                if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
@@ -6952,7 +6954,7 @@ setpte:
                        counter_u64_add(pmap_pde_p_failures, 1);
                        CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#lx"
                            " in pmap %p", va, pmap);
-                       return;
+                       return (false);
                }
                allpte_PG_A &= oldpte;
                pa -= PAGE_SIZE;
@@ -6993,7 +6995,7 @@ setpte:
                CTR2(KTR_PMAP,
                    "pmap_promote_pde: failure for va %#lx in pmap %p", va,
                    pmap);
-               return;
+               return (false);
        }
 
        /*
@@ -7018,6 +7020,7 @@ setpte:
        counter_u64_add(pmap_pde_promotions, 1);
        CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
            " in pmap %p", va, pmap);
+       return (true);
 }
 #endif /* VM_NRESERVLEVEL > 0 */
 
@@ -7391,10 +7394,9 @@ unchanged:
         * populated, then attempt promotion.
         */
        if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
-           pmap_ps_enabled(pmap) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0)
-               pmap_promote_pde(pmap, pde, va, mpte, &lock);
+               (void)pmap_promote_pde(pmap, pde, va, mpte, &lock);
 #endif
 
        rv = KERN_SUCCESS;
@@ -7782,18 +7784,17 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
         * attempt promotion.
         */
        if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
-           pmap_ps_enabled(pmap) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0) {
                if (pde == NULL)
                        pde = pmap_pde(pmap, va);
-               pmap_promote_pde(pmap, pde, va, mpte, lockp);
 
                /*
                 * If promotion succeeds, then the next call to this function
                 * should not be given the unmapped PTP as a hint.
                 */
-               mpte = NULL;
+               if (pmap_promote_pde(pmap, pde, va, mpte, lockp))
+                       mpte = NULL;
        }
 #endif
 
@@ -10359,10 +10360,9 @@ pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t 
va, int ftype)
        m = PHYS_TO_VM_PAGE(*pte & PG_FRAME);
 
        if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
-           pmap_ps_enabled(pmap) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
-           vm_reserv_level_iffullpop(m) == 0) {
-               pmap_promote_pde(pmap, pde, va, mpte, &lock);
+           vm_reserv_level_iffullpop(m) == 0 &&
+           pmap_promote_pde(pmap, pde, va, mpte, &lock)) {
 #ifdef INVARIANTS
                atomic_add_long(&ad_emulation_superpage_promotions, 1);
 #endif

Reply via email to