The branch main has been updated by dougm:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=5611a38d818587b307e1fb110f72d2996c170035

commit 5611a38d818587b307e1fb110f72d2996c170035
Author:     Doug Moore <do...@freebsd.org>
AuthorDate: 2025-02-21 21:11:13 +0000
Commit:     Doug Moore <do...@freebsd.org>
CommitDate: 2025-02-21 21:11:13 +0000

    vm_page: define partial page invalidate
    
    Two different functions in different files do the same thing - fill a
    partial page with zeroes. Add that functionality to vm_page.c and
    remove it elsewhere to avoid code duplication.
    
    Reviewed by:    kib
    Differential Revision:  https://reviews.freebsd.org/D49096
---
 sys/fs/tmpfs/tmpfs_subr.c | 47 ++++----------------------------------------
 sys/kern/uipc_shm.c       | 47 ++++----------------------------------------
 sys/vm/vm_page.c          | 50 +++++++++++++++++++++++++++++++++++++++++++++++
 sys/vm/vm_page.h          |  2 ++
 4 files changed, 60 insertions(+), 86 deletions(-)

diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 41d1f27caf13..0cac19ed3780 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -493,50 +493,11 @@ static int
 tmpfs_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
     int end, boolean_t ignerr)
 {
-       vm_page_t m;
-       int rv, error;
-
-       VM_OBJECT_ASSERT_WLOCKED(object);
-       KASSERT(base >= 0, ("%s: base %d", __func__, base));
-       KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
-           end));
-       error = 0;
-
-retry:
-       m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
-       if (m != NULL) {
-               MPASS(vm_page_all_valid(m));
-       } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
-               m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL |
-                   VM_ALLOC_WAITFAIL);
-               if (m == NULL)
-                       goto retry;
-               vm_object_pip_add(object, 1);
-               VM_OBJECT_WUNLOCK(object);
-               rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
-               VM_OBJECT_WLOCK(object);
-               vm_object_pip_wakeup(object);
-               if (rv == VM_PAGER_OK) {
-                       /*
-                        * Since the page was not resident, and therefore not
-                        * recently accessed, immediately enqueue it for
-                        * asynchronous laundering.  The current operation is
-                        * not regarded as an access.
-                        */
-                       vm_page_launder(m);
-               } else {
-                       vm_page_free(m);
-                       m = NULL;
-                       if (!ignerr)
-                               error = EIO;
-               }
-       }
-       if (m != NULL) {
-               pmap_zero_page_area(m, base, end - base);
-               vm_page_set_dirty(m);
-               vm_page_xunbusy(m);
-       }
+       int error;
 
+       error = vm_page_partial_page_invalidate(object, idx, base, end);
+       if (ignerr)
+               error = 0;
        return (error);
 }
 
diff --git a/sys/kern/uipc_shm.c b/sys/kern/uipc_shm.c
index 026611a59593..083e95432208 100644
--- a/sys/kern/uipc_shm.c
+++ b/sys/kern/uipc_shm.c
@@ -697,51 +697,12 @@ static int
 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
     int end)
 {
-       vm_page_t m;
-       int rv;
+       int error;
 
-       VM_OBJECT_ASSERT_WLOCKED(object);
-       KASSERT(base >= 0, ("%s: base %d", __func__, base));
-       KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
-           end));
-
-retry:
-       m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
-       if (m != NULL) {
-               MPASS(vm_page_all_valid(m));
-       } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
-               m = vm_page_alloc(object, idx,
-                   VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
-               if (m == NULL)
-                       goto retry;
-               vm_object_pip_add(object, 1);
+       error = vm_page_partial_page_invalidate(object, idx, base, end);
+       if (error == EIO)
                VM_OBJECT_WUNLOCK(object);
-               rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
-               VM_OBJECT_WLOCK(object);
-               vm_object_pip_wakeup(object);
-               if (rv == VM_PAGER_OK) {
-                       /*
-                        * Since the page was not resident, and therefore not
-                        * recently accessed, immediately enqueue it for
-                        * asynchronous laundering.  The current operation is
-                        * not regarded as an access.
-                        */
-                       vm_page_launder(m);
-               } else {
-                       vm_page_free(m);
-                       VM_OBJECT_WUNLOCK(object);
-                       return (EIO);
-               }
-       }
-       if (m != NULL) {
-               pmap_zero_page_area(m, base, end - base);
-               KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
-                   __func__, m));
-               vm_page_set_dirty(m);
-               vm_page_xunbusy(m);
-       }
-
-       return (0);
+       return (error);
 }
 
 static int
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index c105aafca40f..f0f3c1e85564 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -5086,6 +5086,56 @@ out:
        return (VM_PAGER_OK);
 }
 
+/*
+ * Fill a partial page with zeroes.
+ */
+int
+vm_page_partial_page_invalidate(vm_object_t object, vm_pindex_t pindex,
+    int base, int end)
+{
+       vm_page_t m;
+       int rv;
+
+       VM_OBJECT_ASSERT_WLOCKED(object);
+       KASSERT(base >= 0, ("%s: base %d", __func__, base));
+       KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
+           end));
+
+retry:
+       m = vm_page_grab(object, pindex, VM_ALLOC_NOCREAT);
+       if (m != NULL) {
+               MPASS(vm_page_all_valid(m));
+       } else if (vm_pager_has_page(object, pindex, NULL, NULL)) {
+               m = vm_page_alloc(object, pindex,
+                   VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
+               if (m == NULL)
+                       goto retry;
+               vm_object_pip_add(object, 1);
+               VM_OBJECT_WUNLOCK(object);
+               rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
+               VM_OBJECT_WLOCK(object);
+               vm_object_pip_wakeup(object);
+               if (rv != VM_PAGER_OK) {
+                       vm_page_free(m);
+                       return (EIO);
+               }
+
+               /*
+                * Since the page was not resident, and therefore not recently
+                * accessed, immediately enqueue it for asynchronous laundering.
+                * The current operation is not regarded as an access.
+                */
+               vm_page_launder(m);
+       } else
+               return (0);
+
+       pmap_zero_page_area(m, base, end - base);
+       KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid", __func__, m));
+       vm_page_set_dirty(m);
+       vm_page_xunbusy(m);
+       return (0);
+}
+
 /*
  * Locklessly grab a valid page.  If the page is not valid or not yet
  * allocated this will fall back to the object lock method.
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 744688bf789b..b568fddab8d4 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -627,6 +627,8 @@ vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int 
req, u_long npages,
     vm_memattr_t memattr);
 void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
 bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose);
+int vm_page_partial_page_invalidate(vm_object_t object, vm_pindex_t pindex,
+    int base, int end);
 vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int);
 vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int);
 int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags,

Reply via email to