The branch main has been updated by dougm:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=6b33d9dc46c2f1cbb91273771198b71d7cbdb328

commit 6b33d9dc46c2f1cbb91273771198b71d7cbdb328
Author:     Doug Moore <do...@freebsd.org>
AuthorDate: 2025-02-27 16:55:33 +0000
Commit:     Doug Moore <do...@freebsd.org>
CommitDate: 2025-02-27 16:55:33 +0000

    vm_page: expose page_alloc_after
    
    vm_page_alloc() just calls vm_page_alloc_after(), after it has found
    the predecessor of a page parameter. Many callers of vm_page_alloc()
    already know that predecessor. Letting them pass that to
    vm_page_alloc_after() directly could save a little redundant
    calculation.
    
    Reviewed by:    alc
    Tested by:      pho
    Differential Revision:  https://reviews.freebsd.org/D49103
---
 sys/vm/swap_pager.c  | 23 ++++++++++++++++-------
 sys/vm/vm_fault.c    | 10 ++++++----
 sys/vm/vm_page.c     |  8 +++-----
 sys/vm/vm_page.h     |  1 +
 sys/vm/vnode_pager.c | 22 ++++++++++++++--------
 5 files changed, 40 insertions(+), 24 deletions(-)

diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c
index 3d02f365cad9..dbe0b6710367 100644
--- a/sys/vm/swap_pager.c
+++ b/sys/vm/swap_pager.c
@@ -1415,9 +1415,11 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, 
vm_object_t object,
         * Allocate readahead and readbehind pages.
         */
        if (rbehind != NULL) {
+               pindex = ma[0]->pindex;
+               /* Stepping backward from pindex, mpred doesn't change. */
                for (i = 1; i <= *rbehind; i++) {
-                       p = vm_page_alloc(object, ma[0]->pindex - i,
-                           VM_ALLOC_NORMAL);
+                       p = vm_page_alloc_after(object, pindex - i,
+                           VM_ALLOC_NORMAL, mpred);
                        if (p == NULL)
                                break;
                        p->oflags |= VPO_SWAPINPROG;
@@ -1426,9 +1428,11 @@ swap_pager_getpages_locked(struct pctrie_iter *blks, 
vm_object_t object,
                *rbehind = i - 1;
        }
        if (rahead != NULL) {
+               p = ma[reqcount - 1];
+               pindex = p->pindex;
                for (i = 0; i < *rahead; i++) {
-                       p = vm_page_alloc(object,
-                           ma[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
+                       p = vm_page_alloc_after(object, pindex + i + 1,
+                           VM_ALLOC_NORMAL, p);
                        if (p == NULL)
                                break;
                        p->oflags |= VPO_SWAPINPROG;
@@ -1982,9 +1986,14 @@ swap_pager_swapoff_object(struct swdevt *sp, vm_object_t 
object)
                        if (m != NULL) {
                                if (!vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL))
                                        break;
-                       } else if ((m = vm_page_alloc(object, blks.index + i,
-                           VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL)) == NULL)
-                               break;
+                       } else {
+                               m = vm_radix_iter_lookup_le(&pages,
+                                   blks.index + i);
+                               m = vm_page_alloc_after(object, blks.index + i,
+                                   VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL, m);
+                               if (m == NULL)
+                                       break;
+                       }
 
                        /* Get the page from swap, and restart the scan. */
                        vm_object_pip_add(object, 1);
diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 8c7fe9e37af1..c97a7cd998df 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -2085,7 +2085,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map 
__unused,
        vm_pindex_t dst_pindex, pindex, src_pindex;
        vm_prot_t access, prot;
        vm_offset_t vaddr;
-       vm_page_t dst_m;
+       vm_page_t dst_m, mpred;
        vm_page_t src_m;
        bool upgrade;
 
@@ -2157,9 +2157,11 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map 
__unused,
         * with the source object, all of its pages must be dirtied,
         * regardless of whether they can be written.
         */
+       mpred = (src_object == dst_object) ?
+          vm_page_mpred(src_object, src_pindex) : NULL;
        for (vaddr = dst_entry->start, dst_pindex = 0;
            vaddr < dst_entry->end;
-           vaddr += PAGE_SIZE, dst_pindex++) {
+           vaddr += PAGE_SIZE, dst_pindex++, mpred = dst_m) {
 again:
                /*
                 * Find the page in the source object, and copy it in.
@@ -2197,9 +2199,9 @@ again:
                        /*
                         * Allocate a page in the destination object.
                         */
-                       dst_m = vm_page_alloc(dst_object, (src_object ==
+                       dst_m = vm_page_alloc_after(dst_object, (src_object ==
                            dst_object ? src_pindex : 0) + dst_pindex,
-                           VM_ALLOC_NORMAL);
+                           VM_ALLOC_NORMAL, mpred);
                        if (dst_m == NULL) {
                                VM_OBJECT_WUNLOCK(dst_object);
                                VM_OBJECT_RUNLOCK(object);
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index e4c2aadf5d56..e9c371e62d70 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -162,8 +162,6 @@ SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | 
CTLFLAG_RD |
 
 static uma_zone_t fakepg_zone;
 
-static vm_page_t vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
-    int req, vm_page_t mpred);
 static void vm_page_alloc_check(vm_page_t m);
 static vm_page_t vm_page_alloc_nofree_domain(int domain, int req);
 static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m,
@@ -2173,7 +2171,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int 
req)
  * the resident page in the object with largest index smaller than the given
  * page index, or NULL if no such page exists.
  */
-static vm_page_t
+vm_page_t
 vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex,
     int req, vm_page_t mpred)
 {
@@ -5045,8 +5043,8 @@ retrylookup:
                                    !vm_page_tryxbusy(ma[i]))
                                        break;
                        } else {
-                               ma[i] = vm_page_alloc(object, m->pindex + i,
-                                   VM_ALLOC_NORMAL);
+                               ma[i] = vm_page_alloc_after(object,
+                                   m->pindex + i, VM_ALLOC_NORMAL, ma[i - 1]);
                                if (ma[i] == NULL)
                                        break;
                        }
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 5a166d9ba44c..05c16212a995 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -608,6 +608,7 @@ void vm_page_activate (vm_page_t);
 void vm_page_advise(vm_page_t m, int advice);
 vm_page_t vm_page_mpred(vm_object_t, vm_pindex_t);
 vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int);
+vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t);
 vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int,
     vm_page_t);
 vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index 842d4ab89b90..d5d312b3cf71 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -1042,19 +1042,23 @@ vnode_pager_generic_getpages(struct vnode *vp, 
vm_page_t *m, int count,
        i = bp->b_npages = 0;
        if (rbehind) {
                vm_pindex_t startpindex, tpindex;
-               vm_page_t p;
+               vm_page_t mpred, p;
 
                VM_OBJECT_WLOCK(object);
                startpindex = m[0]->pindex - rbehind;
-               if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL &&
-                   p->pindex >= startpindex)
-                       startpindex = p->pindex + 1;
+               if ((mpred = TAILQ_PREV(m[0], pglist, listq)) != NULL &&
+                   mpred->pindex >= startpindex)
+                       startpindex = mpred->pindex + 1;
 
-               /* tpindex is unsigned; beware of numeric underflow. */
+               /*
+                * tpindex is unsigned; beware of numeric underflow.
+                * Stepping backward from pindex, mpred doesn't change.
+                */
                for (tpindex = m[0]->pindex - 1;
                    tpindex >= startpindex && tpindex < m[0]->pindex;
                    tpindex--, i++) {
-                       p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
+                       p = vm_page_alloc_after(object, tpindex,
+                           VM_ALLOC_NORMAL, mpred);
                        if (p == NULL) {
                                /* Shift the array. */
                                for (int j = 0; j < i; j++)
@@ -1089,9 +1093,11 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t 
*m, int count,
                if (endpindex > object->size)
                        endpindex = object->size;
 
-               for (tpindex = m[count - 1]->pindex + 1;
+               p = m[count - 1];
+               for (tpindex = p->pindex + 1;
                    tpindex < endpindex; i++, tpindex++) {
-                       p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
+                       p = vm_page_alloc_after(object, tpindex,
+                           VM_ALLOC_NORMAL, p);
                        if (p == NULL)
                                break;
                        bp->b_pages[i] = p;

Reply via email to