The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=f1b642c255a2931fd793219d11562310a2ce49c8

commit f1b642c255a2931fd793219d11562310a2ce49c8
Author:     Mark Johnston <ma...@freebsd.org>
AuthorDate: 2021-11-24 18:43:26 +0000
Commit:     Mark Johnston <ma...@freebsd.org>
CommitDate: 2021-11-24 19:02:55 +0000

    vm_fault: Introduce a fault_status enum for internal return types
    
    Rather than overloading the meanings of the Mach statuses, introduce a
    new set for use internally in the fault code.  This makes the control
    flow easier to follow and provides some extra error checking when a
    fault status variable is used in a switch statement.
    
    vm_fault_lookup() and vm_fault_relookup() continue to use Mach statuses
    for now, as there isn't much benefit to converting them and they
    effectively pass through a status from vm_map_lookup().
    
    Obtained from:  jeff (object_concurrency patches)
    Reviewed by:    kib
    MFC after:      2 weeks
    Differential Revision:  https://reviews.freebsd.org/D33017
---
 sys/vm/vm_fault.c | 202 ++++++++++++++++++++++++++++++------------------------
 1 file changed, 111 insertions(+), 91 deletions(-)

diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c
index 75868c17d32d..0e4d9fd33c6f 100644
--- a/sys/vm/vm_fault.c
+++ b/sys/vm/vm_fault.c
@@ -155,6 +155,19 @@ struct faultstate {
        struct vnode    *vp;
 };
 
+/*
+ * Return codes for internal fault routines.
+ */
+enum fault_status {
+       FAULT_SUCCESS = 1,      /* Return success to user. */
+       FAULT_FAILURE,          /* Return failure to user. */
+       FAULT_CONTINUE,         /* Continue faulting. */
+       FAULT_RESTART,          /* Restart fault. */
+       FAULT_OUT_OF_BOUNDS,    /* Invalid address for pager. */
+       FAULT_HARD,             /* Performed I/O. */
+       FAULT_SOFT,             /* Found valid page. */
+};
+
 static void vm_fault_dontneed(const struct faultstate *fs, vm_offset_t vaddr,
            int ahead);
 static void vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
@@ -298,7 +311,7 @@ vm_fault_dirty(struct faultstate *fs, vm_page_t m)
 /*
  * Unlocks fs.first_object and fs.map on success.
  */
-static int
+static enum fault_status
 vm_fault_soft_fast(struct faultstate *fs)
 {
        vm_page_t m, m_map;
@@ -306,17 +319,20 @@ vm_fault_soft_fast(struct faultstate *fs)
        vm_page_t m_super;
        int flags;
 #endif
-       int psind, rv;
+       int psind;
        vm_offset_t vaddr;
+       enum fault_status res;
 
        MPASS(fs->vp == NULL);
+
+       res = FAULT_SUCCESS;
        vaddr = fs->vaddr;
        vm_object_busy(fs->first_object);
        m = vm_page_lookup(fs->first_object, fs->first_pindex);
        /* A busy page can be mapped for read|execute access. */
        if (m == NULL || ((fs->prot & VM_PROT_WRITE) != 0 &&
            vm_page_busied(m)) || !vm_page_all_valid(m)) {
-               rv = KERN_FAILURE;
+               res = FAULT_FAILURE;
                goto out;
        }
        m_map = m;
@@ -351,10 +367,12 @@ vm_fault_soft_fast(struct faultstate *fs)
                }
        }
 #endif
-       rv = pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type |
-           PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind);
-       if (rv != KERN_SUCCESS)
+       if (pmap_enter(fs->map->pmap, vaddr, m_map, fs->prot, fs->fault_type |
+           PMAP_ENTER_NOSLEEP | (fs->wired ? PMAP_ENTER_WIRED : 0), psind) !=
+           KERN_SUCCESS) {
+               res = FAULT_FAILURE;
                goto out;
+       }
        if (fs->m_hold != NULL) {
                (*fs->m_hold) = m;
                vm_page_wire(m);
@@ -368,7 +386,7 @@ vm_fault_soft_fast(struct faultstate *fs)
 
 out:
        vm_object_unbusy(fs->first_object);
-       return (rv);
+       return (res);
 }
 
 static void
@@ -417,13 +435,14 @@ vm_fault_populate_cleanup(vm_object_t object, vm_pindex_t 
first,
        }
 }
 
-static int
+static enum fault_status
 vm_fault_populate(struct faultstate *fs)
 {
        vm_offset_t vaddr;
        vm_page_t m;
        vm_pindex_t map_first, map_last, pager_first, pager_last, pidx;
        int bdry_idx, i, npages, psind, rv;
+       enum fault_status res;
 
        MPASS(fs->object == fs->first_object);
        VM_OBJECT_ASSERT_WLOCKED(fs->first_object);
@@ -436,6 +455,8 @@ vm_fault_populate(struct faultstate *fs)
        unlock_map(fs);
        unlock_vp(fs);
 
+       res = FAULT_SUCCESS;
+
        /*
         * Call the pager (driver) populate() method.
         *
@@ -456,11 +477,11 @@ vm_fault_populate(struct faultstate *fs)
                 */
                vm_fault_restore_map_lock(fs);
                if (fs->map->timestamp != fs->map_generation)
-                       return (KERN_RESTART);
-               return (KERN_NOT_RECEIVER);
+                       return (FAULT_RESTART);
+               return (FAULT_CONTINUE);
        }
        if (rv != VM_PAGER_OK)
-               return (KERN_FAILURE); /* AKA SIGSEGV */
+               return (FAULT_FAILURE); /* AKA SIGSEGV */
 
        /* Ensure that the driver is obeying the interface. */
        MPASS(pager_first <= pager_last);
@@ -480,7 +501,7 @@ vm_fault_populate(struct faultstate *fs)
                        if (m != fs->m)
                                vm_page_xunbusy(m);
                }
-               return (KERN_RESTART);
+               return (FAULT_RESTART);
        }
 
        /*
@@ -510,8 +531,10 @@ vm_fault_populate(struct faultstate *fs)
                    PMAP_ENTER_LARGEPAGE, bdry_idx);
                VM_OBJECT_WLOCK(fs->first_object);
                vm_page_xunbusy(m);
-               if (rv != KERN_SUCCESS)
+               if (rv != KERN_SUCCESS) {
+                       res = FAULT_FAILURE;
                        goto out;
+               }
                if ((fs->fault_flags & VM_FAULT_WIRE) != 0) {
                        for (i = 0; i < atop(pagesizes[bdry_idx]); i++)
                                vm_page_wire(m + i);
@@ -596,7 +619,7 @@ vm_fault_populate(struct faultstate *fs)
        }
 out:
        curthread->td_ru.ru_majflt++;
-       return (rv);
+       return (res);
 }
 
 static int prot_fault_translation;
@@ -699,18 +722,18 @@ vm_fault_trap(vm_map_t map, vm_offset_t vaddr, vm_prot_t 
fault_type,
        return (result);
 }
 
-static int
+static enum fault_status
 vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
 {
        struct vnode *vp;
        int error, locked;
 
        if (fs->object->type != OBJT_VNODE)
-               return (KERN_SUCCESS);
+               return (FAULT_CONTINUE);
        vp = fs->object->handle;
        if (vp == fs->vp) {
                ASSERT_VOP_LOCKED(vp, "saved vnode is not locked");
-               return (KERN_SUCCESS);
+               return (FAULT_CONTINUE);
        }
 
        /*
@@ -732,7 +755,7 @@ vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
        error = vget(vp, locked | LK_CANRECURSE | LK_NOWAIT);
        if (error == 0) {
                fs->vp = vp;
-               return (KERN_SUCCESS);
+               return (FAULT_CONTINUE);
        }
 
        vhold(vp);
@@ -744,7 +767,7 @@ vm_fault_lock_vnode(struct faultstate *fs, bool objlocked)
        vdrop(vp);
        fs->vp = vp;
        KASSERT(error == 0, ("vm_fault: vget failed %d", error));
-       return (KERN_RESOURCE_SHORTAGE);
+       return (FAULT_RESTART);
 }
 
 /*
@@ -1112,33 +1135,35 @@ vm_fault_allocate_oom(struct faultstate *fs)
 /*
  * Allocate a page directly or via the object populate method.
  */
-static int
+static enum fault_status
 vm_fault_allocate(struct faultstate *fs)
 {
        struct domainset *dset;
-       int rv;
+       enum fault_status res;
 
        if ((fs->object->flags & OBJ_SIZEVNLOCK) != 0) {
-               rv = vm_fault_lock_vnode(fs, true);
-               MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE);
-               if (rv == KERN_RESOURCE_SHORTAGE)
-                       return (rv);
+               res = vm_fault_lock_vnode(fs, true);
+               MPASS(res == FAULT_CONTINUE || res == FAULT_RESTART);
+               if (res == FAULT_RESTART)
+                       return (res);
        }
 
-       if (fs->pindex >= fs->object->size)
-               return (KERN_OUT_OF_BOUNDS);
+       if (fs->pindex >= fs->object->size) {
+               unlock_and_deallocate(fs);
+               return (FAULT_OUT_OF_BOUNDS);
+       }
 
        if (fs->object == fs->first_object &&
            (fs->first_object->flags & OBJ_POPULATE) != 0 &&
            fs->first_object->shadow_count == 0) {
-               rv = vm_fault_populate(fs);
-               switch (rv) {
-               case KERN_SUCCESS:
-               case KERN_FAILURE:
-               case KERN_PROTECTION_FAILURE:
-               case KERN_RESTART:
-                       return (rv);
-               case KERN_NOT_RECEIVER:
+               res = vm_fault_populate(fs);
+               switch (res) {
+               case FAULT_SUCCESS:
+               case FAULT_FAILURE:
+               case FAULT_RESTART:
+                       unlock_and_deallocate(fs);
+                       return (res);
+               case FAULT_CONTINUE:
                        /*
                         * Pager's populate() method
                         * returned VM_PAGER_BAD.
@@ -1174,11 +1199,11 @@ vm_fault_allocate(struct faultstate *fs)
        if (fs->m == NULL) {
                if (vm_fault_allocate_oom(fs))
                        vm_waitpfault(dset, vm_pfault_oom_wait * hz);
-               return (KERN_RESOURCE_SHORTAGE);
+               return (FAULT_RESTART);
        }
        fs->oom_started = false;
 
-       return (KERN_NOT_RECEIVER);
+       return (FAULT_CONTINUE);
 }
 
 /*
@@ -1186,11 +1211,12 @@ vm_fault_allocate(struct faultstate *fs)
  * that the pager has it, and potentially retrieve additional
  * pages at the same time.
  */
-static int
+static enum fault_status
 vm_fault_getpages(struct faultstate *fs, int *behindp, int *aheadp)
 {
        vm_offset_t e_end, e_start;
        int ahead, behind, cluster_offset, rv;
+       enum fault_status status;
        u_char behavior;
 
        /*
@@ -1227,10 +1253,10 @@ vm_fault_getpages(struct faultstate *fs, int *behindp, 
int *aheadp)
         */
        unlock_map(fs);
 
-       rv = vm_fault_lock_vnode(fs, false);
-       MPASS(rv == KERN_SUCCESS || rv == KERN_RESOURCE_SHORTAGE);
-       if (rv == KERN_RESOURCE_SHORTAGE)
-               return (rv);
+       status = vm_fault_lock_vnode(fs, false);
+       MPASS(status == FAULT_CONTINUE || status == FAULT_RESTART);
+       if (status == FAULT_RESTART)
+               return (status);
        KASSERT(fs->vp == NULL || !fs->map->system_map,
            ("vm_fault: vnode-backed object mapped by system map"));
 
@@ -1269,7 +1295,7 @@ vm_fault_getpages(struct faultstate *fs, int *behindp, 
int *aheadp)
        *aheadp = ahead;
        rv = vm_pager_get_pages(fs->object, &fs->m, 1, behindp, aheadp);
        if (rv == VM_PAGER_OK)
-               return (KERN_SUCCESS);
+               return (FAULT_HARD);
        if (rv == VM_PAGER_ERROR)
                printf("vm_fault: pager read error, pid %d (%s)\n",
                    curproc->p_pid, curproc->p_comm);
@@ -1282,11 +1308,11 @@ vm_fault_getpages(struct faultstate *fs, int *behindp, 
int *aheadp)
                VM_OBJECT_WLOCK(fs->object);
                fault_page_free(&fs->m);
                unlock_and_deallocate(fs);
-               return (KERN_OUT_OF_BOUNDS);
+               return (FAULT_OUT_OF_BOUNDS);
        }
        KASSERT(rv == VM_PAGER_FAIL,
            ("%s: unepxected pager error %d", __func__, rv));
-       return (KERN_NOT_RECEIVER);
+       return (FAULT_CONTINUE);
 }
 
 /*
@@ -1329,8 +1355,8 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t 
fault_type,
     int fault_flags, vm_page_t *m_hold)
 {
        struct faultstate fs;
-       int ahead, behind, faultcount;
-       int result, rv;
+       int ahead, behind, faultcount, rv;
+       enum fault_status res;
        bool dead, hardfault;
 
        VM_CNT_INC(v_vm_faults);
@@ -1356,11 +1382,11 @@ RetryFault:
         * Find the backing store object and offset into it to begin the
         * search.
         */
-       result = vm_fault_lookup(&fs);
-       if (result != KERN_SUCCESS) {
-               if (result == KERN_RESOURCE_SHORTAGE)
+       rv = vm_fault_lookup(&fs);
+       if (rv != KERN_SUCCESS) {
+               if (rv == KERN_RESOURCE_SHORTAGE)
                        goto RetryFault;
-               return (result);
+               return (rv);
        }
 
        /*
@@ -1374,9 +1400,9 @@ RetryFault:
            (fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) == 0 &&
            (fs.fault_flags & (VM_FAULT_WIRE | VM_FAULT_DIRTY)) == 0) {
                VM_OBJECT_RLOCK(fs.first_object);
-               rv = vm_fault_soft_fast(&fs);
-               if (rv == KERN_SUCCESS)
-                       return (rv);
+               res = vm_fault_soft_fast(&fs);
+               if (res == FAULT_SUCCESS)
+                       return (KERN_SUCCESS);
                if (!VM_OBJECT_TRYUPGRADE(fs.first_object)) {
                        VM_OBJECT_RUNLOCK(fs.first_object);
                        VM_OBJECT_WLOCK(fs.first_object);
@@ -1406,23 +1432,20 @@ RetryFault:
        fs.pindex = fs.first_pindex;
 
        if ((fs.entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) != 0) {
-               rv = vm_fault_allocate(&fs);
-               switch (rv) {
-               case KERN_RESTART:
-                       unlock_and_deallocate(&fs);
-                       /* FALLTHROUGH */
-               case KERN_RESOURCE_SHORTAGE:
+               res = vm_fault_allocate(&fs);
+               switch (res) {
+               case FAULT_RESTART:
                        goto RetryFault;
-               case KERN_SUCCESS:
-               case KERN_FAILURE:
-               case KERN_PROTECTION_FAILURE:
-               case KERN_OUT_OF_BOUNDS:
-                       unlock_and_deallocate(&fs);
-                       return (rv);
-               case KERN_NOT_RECEIVER:
+               case FAULT_SUCCESS:
+                       return (KERN_SUCCESS);
+               case FAULT_FAILURE:
+                       return (KERN_FAILURE);
+               case FAULT_OUT_OF_BOUNDS:
+                       return (KERN_OUT_OF_BOUNDS);
+               case FAULT_CONTINUE:
                        break;
                default:
-                       panic("vm_fault: Unhandled rv %d", rv);
+                       panic("vm_fault: Unhandled status %d", res);
                }
        }
 
@@ -1474,23 +1497,20 @@ RetryFault:
                 */
                if (fs.m == NULL && (fs.object->type != OBJT_DEFAULT ||
                    fs.object == fs.first_object)) {
-                       rv = vm_fault_allocate(&fs);
-                       switch (rv) {
-                       case KERN_RESTART:
-                               unlock_and_deallocate(&fs);
-                               /* FALLTHROUGH */
-                       case KERN_RESOURCE_SHORTAGE:
+                       res = vm_fault_allocate(&fs);
+                       switch (res) {
+                       case FAULT_RESTART:
                                goto RetryFault;
-                       case KERN_SUCCESS:
-                       case KERN_FAILURE:
-                       case KERN_PROTECTION_FAILURE:
-                       case KERN_OUT_OF_BOUNDS:
-                               unlock_and_deallocate(&fs);
-                               return (rv);
-                       case KERN_NOT_RECEIVER:
+                       case FAULT_SUCCESS:
+                               return (KERN_SUCCESS);
+                       case FAULT_FAILURE:
+                               return (KERN_FAILURE);
+                       case FAULT_OUT_OF_BOUNDS:
+                               return (KERN_OUT_OF_BOUNDS);
+                       case FAULT_CONTINUE:
                                break;
                        default:
-                               panic("vm_fault: Unhandled rv %d", rv);
+                               panic("vm_fault: Unhandled status %d", res);
                        }
                }
 
@@ -1513,16 +1533,16 @@ RetryFault:
                         */
                        VM_OBJECT_WUNLOCK(fs.object);
 
-                       rv = vm_fault_getpages(&fs, &behind, &ahead);
-                       if (rv == KERN_SUCCESS) {
+                       res = vm_fault_getpages(&fs, &behind, &ahead);
+                       if (res == FAULT_SUCCESS) {
                                faultcount = behind + 1 + ahead;
                                hardfault = true;
                                break; /* break to PAGE HAS BEEN FOUND. */
                        }
-                       if (rv == KERN_RESOURCE_SHORTAGE)
+                       if (res == FAULT_RESTART)
                                goto RetryFault;
-                       if (rv == KERN_OUT_OF_BOUNDS)
-                               return (rv);
+                       if (res == FAULT_OUT_OF_BOUNDS)
+                               return (KERN_OUT_OF_BOUNDS);
                        VM_OBJECT_WLOCK(fs.object);
                }
 
@@ -1583,12 +1603,12 @@ RetryFault:
         * lookup.
         */
        if (!fs.lookup_still_valid) {
-               result = vm_fault_relookup(&fs);
-               if (result != KERN_SUCCESS) {
+               rv = vm_fault_relookup(&fs);
+               if (rv != KERN_SUCCESS) {
                        fault_deallocate(&fs);
-                       if (result == KERN_RESTART)
+                       if (rv == KERN_RESTART)
                                goto RetryFault;
-                       return (result);
+                       return (rv);
                }
        }
        VM_OBJECT_ASSERT_UNLOCKED(fs.object);

Reply via email to