The branch main has been updated by alc:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=f1d73aacdc47529310e2302094685295c032e28f

commit f1d73aacdc47529310e2302094685295c032e28f
Author:     Alan Cox <a...@freebsd.org>
AuthorDate: 2024-06-02 08:56:47 +0000
Commit:     Alan Cox <a...@freebsd.org>
CommitDate: 2024-06-04 05:38:05 +0000

    pmap: Skip some superpage promotion attempts that will fail
    
    Implement a simple heuristic to skip pointless promotion attempts by
    pmap_enter_quick_locked() and moea64_enter().  Specifically, when
    vm_fault() calls pmap_enter_quick() to map neighboring pages at the end
    of a copy-on-write fault, there is no point in attempting promotion in
    pmap_enter_quick_locked() and moea64_enter().  Promotion will fail
    because the base pages have differing protection.
    
    Reviewed by:    kib
    Differential Revision:  https://reviews.freebsd.org/D45431
    MFC after:      1 week
---
 sys/amd64/amd64/pmap.c      |  3 ++-
 sys/arm64/arm64/pmap.c      |  3 ++-
 sys/i386/i386/pmap.c        |  3 ++-
 sys/powerpc/aim/mmu_oea64.c |  9 +++++++--
 sys/riscv/riscv/pmap.c      |  3 ++-
 sys/vm/vm.h                 |  1 +
 sys/vm/vm_fault.c           | 11 ++++++++++-
 7 files changed, 26 insertions(+), 7 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 8105c9d92478..2f3119aede67 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -7818,7 +7818,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
         * If both the PTP and the reservation are fully populated, then
         * attempt promotion.
         */
-       if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
+       if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
+           (mpte == NULL || mpte->ref_count == NPTEPG) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0) {
                if (pde == NULL)
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index aaba6ca189a1..b6bc113ba8a4 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -6052,7 +6052,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
         * If both the PTP and the reservation are fully populated, then
         * attempt promotion.
         */
-       if ((mpte == NULL || mpte->ref_count == NL3PG) &&
+       if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
+           (mpte == NULL || mpte->ref_count == NL3PG) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0) {
                if (l2 == NULL)
diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c
index 40d8ceaf42b9..5808c31a99af 100644
--- a/sys/i386/i386/pmap.c
+++ b/sys/i386/i386/pmap.c
@@ -4250,7 +4250,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
         * If both the PTP and the reservation are fully populated, then
         * attempt promotion.
         */
-       if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
+       if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
+           (mpte == NULL || mpte->ref_count == NPTEPG) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0) {
                if (pde == NULL)
diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c
index 391f90bb04eb..273dc38214e2 100644
--- a/sys/powerpc/aim/mmu_oea64.c
+++ b/sys/powerpc/aim/mmu_oea64.c
@@ -1755,10 +1755,14 @@ out:
         * If the VA of the entered page is not aligned with its PA,
         * don't try page promotion as it is not possible.
         * This reduces the number of promotion failures dramatically.
+        *
+        * Ignore VM_PROT_NO_PROMOTE unless PMAP_ENTER_QUICK_LOCKED.
         */
        if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL &&
            (pvo->pvo_vaddr & PVO_MANAGED) != 0 &&
            (va & HPT_SP_MASK) == (pa & HPT_SP_MASK) &&
+           ((prot & VM_PROT_NO_PROMOTE) == 0 ||
+           (flags & PMAP_ENTER_QUICK_LOCKED) == 0) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0)
                moea64_sp_promote(pmap, va, m);
@@ -1850,8 +1854,9 @@ moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
     vm_prot_t prot)
 {
 
-       moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
-           PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
+       moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE |
+           VM_PROT_NO_PROMOTE), PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED,
+           0);
 }
 
 vm_paddr_t
diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c
index 1e507f62696e..e8504bcb0f59 100644
--- a/sys/riscv/riscv/pmap.c
+++ b/sys/riscv/riscv/pmap.c
@@ -3519,7 +3519,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 
vm_page_t m,
         * If both the PTP and the reservation are fully populated, then attempt
         * promotion.
         */
-       if ((mpte == NULL || mpte->ref_count == Ln_ENTRIES) &&
+       if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
+           (mpte == NULL || mpte->ref_count == Ln_ENTRIES) &&
            (m->flags & PG_FICTITIOUS) == 0 &&
            vm_reserv_level_iffullpop(m) == 0) {
                if (l2 == NULL)
diff --git a/sys/vm/vm.h b/sys/vm/vm.h
index 5fac6b3f2e78..b7d149a2fca2 100644
--- a/sys/vm/vm.h
+++ b/sys/vm/vm.h
@@ -76,6 +76,7 @@ typedef u_char vm_prot_t;     /* protection codes */
 #define        VM_PROT_COPY            ((vm_prot_t) 0x08)      /* copy-on-read 
*/
 #define        VM_PROT_PRIV_FLAG       ((vm_prot_t) 0x10)
 #define        VM_PROT_FAULT_LOOKUP    VM_PROT_PRIV_FLAG
+#define        VM_PROT_NO_PROMOTE      VM_PROT_PRIV_FLAG
 #define        VM_PROT_QUICK_NOFAULT   VM_PROT_PRIV_FLAG       /* same to save 
bits */
 
 #define        VM_PROT_ALL             
(VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index fbe0a117a388..3da411643f26 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -1891,6 +1891,7 @@ vm_fault_prefault(const struct faultstate *fs, 
vm_offset_t addra,
        vm_offset_t addr, starta;
        vm_pindex_t pindex;
        vm_page_t m;
+       vm_prot_t prot;
        int i;
 
        pmap = fs->map->pmap;
@@ -1906,6 +1907,14 @@ vm_fault_prefault(const struct faultstate *fs, 
vm_offset_t addra,
                if (starta < entry->start)
                        starta = entry->start;
        }
+       prot = entry->protection;
+
+       /*
+        * If pmap_enter() has enabled write access on a nearby mapping, then
+        * don't attempt promotion, because it will fail.
+        */
+       if ((fs->prot & VM_PROT_WRITE) != 0)
+               prot |= VM_PROT_NO_PROMOTE;
 
        /*
         * Generate the sequence of virtual addresses that are candidates for
@@ -1949,7 +1958,7 @@ vm_fault_prefault(const struct faultstate *fs, 
vm_offset_t addra,
                }
                if (vm_page_all_valid(m) &&
                    (m->flags & PG_FICTITIOUS) == 0)
-                       pmap_enter_quick(pmap, addr, m, entry->protection);
+                       pmap_enter_quick(pmap, addr, m, prot);
                if (!obj_locked || lobject != entry->object.vm_object)
                        VM_OBJECT_RUNLOCK(lobject);
        }

Reply via email to