The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=808f5ac3c6dcbe38f505c0c843b0a10ae154e6ec

commit 808f5ac3c6dcbe38f505c0c843b0a10ae154e6ec
Author:     Bojan Novković <bojan.novko...@fer.hr>
AuthorDate: 2023-10-09 00:38:08 +0000
Commit:     Mark Johnston <ma...@freebsd.org>
CommitDate: 2023-10-09 00:40:44 +0000

    arm64: Add a leaf PTP when pmap_enter(psind=1) creates a wired mapping
    
    Let pmap_enter_l2() create wired mappings.  In particular, allocate a
    leaf PTP for use during demotion.  This is a step towards reverting
    commit 64087fd7f372.
    
    Reviewed by:    alc, markj
    Sponsored by:   Google, Inc. (GSoC 2023)
    MFC after:      2 weeks
    Differential Revision:  https://reviews.freebsd.org/D41634
---
 sys/arm64/arm64/pmap.c | 29 +++++++++++++++++++++++++++++
 1 file changed, 29 insertions(+)

diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index a30a037c8d4a..6f2afa0b98a3 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -4797,6 +4797,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
new_l2, u_int flags,
        struct spglist free;
        pd_entry_t *l2, old_l2;
        vm_page_t l2pg, mt;
+       vm_page_t uwptpg;
 
        PMAP_LOCK_ASSERT(pmap, MA_OWNED);
        KASSERT(ADDR_IS_CANONICAL(va),
@@ -4864,6 +4865,24 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
new_l2, u_int flags,
                }
        }
 
+       /*
+        * Allocate leaf ptpage for wired userspace pages.
+        */
+       uwptpg = NULL;
+       if ((new_l2 & ATTR_SW_WIRED) != 0 && pmap != kernel_pmap) {
+               uwptpg = vm_page_alloc_noobj(VM_ALLOC_WIRED);
+               if (uwptpg == NULL) {
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
+               uwptpg->pindex = pmap_l2_pindex(va);
+               if (pmap_insert_pt_page(pmap, uwptpg, true, false)) {
+                       vm_page_unwire_noq(uwptpg);
+                       vm_page_free(uwptpg);
+                       return (KERN_RESOURCE_SHORTAGE);
+               }
+               pmap_resident_count_inc(pmap, 1);
+               uwptpg->ref_count = NL3PG;
+       }
        if ((new_l2 & ATTR_SW_MANAGED) != 0) {
                /*
                 * Abort this mapping if its PV entry could not be created.
@@ -4871,6 +4890,16 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t 
new_l2, u_int flags,
                if (!pmap_pv_insert_l2(pmap, va, new_l2, flags, lockp)) {
                        if (l2pg != NULL)
                                pmap_abort_ptp(pmap, va, l2pg);
+                       if (uwptpg != NULL) {
+                               mt = pmap_remove_pt_page(pmap, va);
+                               KASSERT(mt == uwptpg,
+                                   ("removed pt page %p, expected %p", mt,
+                                   uwptpg));
+                               pmap_resident_count_dec(pmap, 1);
+                               uwptpg->ref_count = 1;
+                               vm_page_unwire_noq(uwptpg);
+                               vm_page_free(uwptpg);
+                       }
                        CTR2(KTR_PMAP,
                            "pmap_enter_l2: failure for va %#lx in pmap %p",
                            va, pmap);

Reply via email to