The branch stable/13 has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=61c3b6832d3bcd10430b8badac55e0d4fb16db20

commit 61c3b6832d3bcd10430b8badac55e0d4fb16db20
Author:     Mark Johnston <ma...@freebsd.org>
AuthorDate: 2021-11-24 18:42:32 +0000
Commit:     Mark Johnston <ma...@freebsd.org>
CommitDate: 2021-12-08 13:39:47 +0000

    vm_fault: Move nera into faultstate
    
    This makes it easier to factor out pieces of vm_fault().  No functional
    change intended.
    
    Obtained from:  jeff (object_concurrency patches)
    Reviewed by:    kib
    
    (cherry picked from commit 45c09a74d681cfcad0a556b753cd56f6084ace46)
---
 sys/vm/vm_fault.c | 62 ++++++++++++++++++++++++++++++-------------------------
 1 file changed, 34 insertions(+), 28 deletions(-)

diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 646f9ddb9017..d85f4b524b72 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -125,9 +125,12 @@ struct faultstate {
        vm_prot_t       fault_type;
        vm_prot_t       prot;
        int             fault_flags;
+       boolean_t       wired;
+
+       /* Control state. */
        struct timeval  oom_start_time;
        bool            oom_started;
-       boolean_t       wired;
+       int             nera;
 
        /* Page reference for cow. */
        vm_page_t m_cow;
@@ -1180,7 +1183,7 @@ vm_fault_allocate(struct faultstate *fs)
  * pages at the same time.
  */
 static int
-vm_fault_getpages(struct faultstate *fs, int nera, int *behindp, int *aheadp)
+vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
 {
        vm_offset_t e_end, e_start;
        int ahead, behind, cluster_offset, rv;
@@ -1198,6 +1201,20 @@ vm_fault_getpages(struct faultstate *fs, int nera, int 
*behindp, int *aheadp)
        e_end = fs->entry->end;
        behavior = vm_map_entry_behavior(fs->entry);
 
+       /*
+        * If the pager for the current object might have
+        * the page, then determine the number of additional
+        * pages to read and potentially reprioritize
+        * previously read pages for earlier reclamation.
+        * These operations should only be performed once per
+        * page fault.  Even if the current pager doesn't
+        * have the page, the number of additional pages to
+        * read will apply to subsequent objects in the
+        * shadow chain.
+        */
+       if (fs->nera == -1 && !P_KILLED(curproc))
+               fs->nera = vm_fault_readahead(fs);
+
        /*
         * Release the map lock before locking the vnode or
         * sleeping in the pager.  (If the current object has
@@ -1217,15 +1234,15 @@ vm_fault_getpages(struct faultstate *fs, int nera, int 
*behindp, int *aheadp)
         * Page in the requested page and hint the pager,
         * that it may bring up surrounding pages.
         */
-       if (nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
+       if (fs->nera == -1 || behavior == MAP_ENTRY_BEHAV_RANDOM ||
            P_KILLED(curproc)) {
                behind = 0;
                ahead = 0;
        } else {
                /* Is this a sequential fault? */
-               if (nera > 0) {
+               if (fs->nera > 0) {
                        behind = 0;
-                       ahead = nera;
+                       ahead = fs->nera;
                } else {
                        /*
                         * Request a cluster of pages that is
@@ -1257,8 +1274,14 @@ vm_fault_getpages(struct faultstate *fs, int nera, int 
*behindp, int *aheadp)
         * outside the range of the pager, clean up and return
         * an error.
         */
-       if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD)
+       if (rv == VM_PAGER_ERROR || rv == VM_PAGER_BAD) {
+               VM_OBJECT_WLOCK(fs->object);
+               fault_page_free(&fs->m);
+               unlock_and_deallocate(fs);
                return (KERN_OUT_OF_BOUNDS);
+       }
+       KASSERT(rv == VM_PAGER_FAIL,
+           ("%s: unepxected pager error %d", __func__, rv));
        return (KERN_NOT_RECEIVER);
 }
 
@@ -1303,7 +1326,7 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t 
fault_type,
 {
        struct faultstate fs;
        int ahead, behind, faultcount;
-       int nera, result, rv;
+       int result, rv;
        bool dead, hardfault;
 
        VM_CNT_INC(v_vm_faults);
@@ -1318,8 +1341,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t 
fault_type,
        fs.map = map;
        fs.lookup_still_valid = false;
        fs.oom_started = false;
+       fs.nera = -1;
        faultcount = 0;
-       nera = -1;
        hardfault = false;
 
 RetryFault:
@@ -1486,21 +1509,7 @@ RetryFault:
                         */
                        VM_OBJECT_WUNLOCK(fs.object);
 
-                       /*
-                        * If the pager for the current object might have
-                        * the page, then determine the number of additional
-                        * pages to read and potentially reprioritize
-                        * previously read pages for earlier reclamation.
-                        * These operations should only be performed once per
-                        * page fault.  Even if the current pager doesn't
-                        * have the page, the number of additional pages to
-                        * read will apply to subsequent objects in the
-                        * shadow chain.
-                        */
-                       if (nera == -1 && !P_KILLED(curproc))
-                               nera = vm_fault_readahead(&fs);
-
-                       rv = vm_fault_getpages(&fs, nera, &behind, &ahead);
+                       rv = vm_fault_getpages(&fs, &behind, &ahead);
                        if (rv == KERN_SUCCESS) {
                                faultcount = behind + 1 + ahead;
                                hardfault = true;
@@ -1508,12 +1517,9 @@ RetryFault:
                        }
                        if (rv == KERN_RESOURCE_SHORTAGE)
                                goto RetryFault;
-                       VM_OBJECT_WLOCK(fs.object);
-                       if (rv == KERN_OUT_OF_BOUNDS) {
-                               fault_page_free(&fs.m);
-                               unlock_and_deallocate(&fs);
+                       if (rv == KERN_OUT_OF_BOUNDS)
                                return (rv);
-                       }
+                       VM_OBJECT_WLOCK(fs.object);
                }
 
                /*

Reply via email to