Author: markj
Date: Wed Jan 10 20:39:26 2018
New Revision: 327785
URL: https://svnweb.freebsd.org/changeset/base/327785

Log:
  MFC r325530 (jeff), r325566 (kib), r325588 (kib):
  Replace many instances of VM_WAIT with blocking page allocation flags.

Modified:
  stable/11/sys/amd64/amd64/pmap.c
  stable/11/sys/amd64/amd64/uma_machdep.c
  stable/11/sys/arm64/arm64/uma_machdep.c
  stable/11/sys/fs/tmpfs/tmpfs_subr.c
  stable/11/sys/kern/uipc_shm.c
  stable/11/sys/kern/vfs_bio.c
  stable/11/sys/mips/mips/uma_machdep.c
  stable/11/sys/powerpc/aim/mmu_oea64.c
  stable/11/sys/powerpc/aim/slb.c
  stable/11/sys/powerpc/powerpc/uma_machdep.c
  stable/11/sys/sparc64/sparc64/vm_machdep.c
  stable/11/sys/vm/phys_pager.c
  stable/11/sys/vm/swap_pager.c
  stable/11/sys/vm/uma.h
  stable/11/sys/vm/uma_core.c
  stable/11/sys/vm/vm_kern.c
  stable/11/sys/vm/vm_object.c
  stable/11/sys/vm/vm_page.c
  stable/11/sys/vm/vm_page.h
  stable/11/sys/vm/vm_radix.c
  stable/11/sys/vm/vm_radix.h
  stable/11/sys/x86/iommu/intel_utils.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/amd64/amd64/pmap.c
==============================================================================
--- stable/11/sys/amd64/amd64/pmap.c    Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/amd64/amd64/pmap.c    Wed Jan 10 20:39:26 2018        
(r327785)
@@ -2416,9 +2416,8 @@ pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, i
        /*
         * allocate the page directory page
         */
-       while ((pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
-           VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
-               VM_WAIT;
+       pml4pg = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+           VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK);
 
        pml4phys = VM_PAGE_TO_PHYS(pml4pg);
        pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(pml4phys);

Modified: stable/11/sys/amd64/amd64/uma_machdep.c
==============================================================================
--- stable/11/sys/amd64/amd64/uma_machdep.c     Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/amd64/amd64/uma_machdep.c     Wed Jan 10 20:39:26 2018        
(r327785)
@@ -46,20 +46,12 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_in
        vm_page_t m;
        vm_paddr_t pa;
        void *va;
-       int pflags;
 
        *flags = UMA_SLAB_PRIV;
-       pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
-       for (;;) {
-               m = vm_page_alloc(NULL, 0, pflags);
-               if (m == NULL) {
-                       if (wait & M_NOWAIT)
-                               return (NULL);
-                       else
-                               VM_WAIT;
-               } else
-                       break;
-       }
+       m = vm_page_alloc(NULL, 0,
+           malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+       if (m == NULL)
+               return (NULL);
        pa = m->phys_addr;
        if ((wait & M_NODUMP) == 0)
                dump_add_page(pa);

Modified: stable/11/sys/arm64/arm64/uma_machdep.c
==============================================================================
--- stable/11/sys/arm64/arm64/uma_machdep.c     Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/arm64/arm64/uma_machdep.c     Wed Jan 10 20:39:26 2018        
(r327785)
@@ -46,20 +46,12 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_in
        vm_page_t m;
        vm_paddr_t pa;
        void *va;
-       int pflags;
 
        *flags = UMA_SLAB_PRIV;
-       pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
-       for (;;) {
-               m = vm_page_alloc(NULL, 0, pflags);
-               if (m == NULL) {
-                       if (wait & M_NOWAIT)
-                               return (NULL);
-                       else
-                               VM_WAIT;
-               } else
-                       break;
-       }
+       m = vm_page_alloc(NULL, 0,
+           malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+       if (m == NULL)
+               return (NULL);
        pa = m->phys_addr;
        if ((wait & M_NODUMP) == 0)
                dump_add_page(pa);

Modified: stable/11/sys/fs/tmpfs/tmpfs_subr.c
==============================================================================
--- stable/11/sys/fs/tmpfs/tmpfs_subr.c Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/fs/tmpfs/tmpfs_subr.c Wed Jan 10 20:39:26 2018        
(r327785)
@@ -1401,13 +1401,10 @@ retry:
                                        goto retry;
                                MPASS(m->valid == VM_PAGE_BITS_ALL);
                        } else if (vm_pager_has_page(uobj, idx, NULL, NULL)) {
-                               m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL);
-                               if (m == NULL) {
-                                       VM_OBJECT_WUNLOCK(uobj);
-                                       VM_WAIT;
-                                       VM_OBJECT_WLOCK(uobj);
+                               m = vm_page_alloc(uobj, idx, VM_ALLOC_NORMAL |
+                                   VM_ALLOC_WAITFAIL);
+                               if (m == NULL)
                                        goto retry;
-                               }
                                rv = vm_pager_get_pages(uobj, &m, 1, NULL,
                                    NULL);
                                vm_page_lock(m);

Modified: stable/11/sys/kern/uipc_shm.c
==============================================================================
--- stable/11/sys/kern/uipc_shm.c       Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/kern/uipc_shm.c       Wed Jan 10 20:39:26 2018        
(r327785)
@@ -448,13 +448,10 @@ retry:
                                if (vm_page_sleep_if_busy(m, "shmtrc"))
                                        goto retry;
                        } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
-                               m = vm_page_alloc(object, idx, VM_ALLOC_NORMAL);
-                               if (m == NULL) {
-                                       VM_OBJECT_WUNLOCK(object);
-                                       VM_WAIT;
-                                       VM_OBJECT_WLOCK(object);
+                               m = vm_page_alloc(object, idx,
+                                   VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
+                               if (m == NULL)
                                        goto retry;
-                               }
                                rv = vm_pager_get_pages(object, &m, 1, NULL,
                                    NULL);
                                vm_page_lock(m);

Modified: stable/11/sys/kern/vfs_bio.c
==============================================================================
--- stable/11/sys/kern/vfs_bio.c        Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/kern/vfs_bio.c        Wed Jan 10 20:39:26 2018        
(r327785)
@@ -4470,18 +4470,14 @@ vm_hold_load_pages(struct buf *bp, vm_offset_t from, v
        index = (from - trunc_page((vm_offset_t)bp->b_data)) >> PAGE_SHIFT;
 
        for (pg = from; pg < to; pg += PAGE_SIZE, index++) {
-tryagain:
                /*
                 * note: must allocate system pages since blocking here
                 * could interfere with paging I/O, no matter which
                 * process we are.
                 */
                p = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ |
-                   VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT));
-               if (p == NULL) {
-                       VM_WAIT;
-                       goto tryagain;
-               }
+                   VM_ALLOC_WIRED | VM_ALLOC_COUNT((to - pg) >> PAGE_SHIFT) |
+                   VM_ALLOC_WAITOK);
                pmap_qenter(pg, &p, 1);
                bp->b_pages[index] = p;
        }

Modified: stable/11/sys/mips/mips/uma_machdep.c
==============================================================================
--- stable/11/sys/mips/mips/uma_machdep.c       Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/mips/mips/uma_machdep.c       Wed Jan 10 20:39:26 2018        
(r327785)
@@ -50,6 +50,10 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_in
 
        *flags = UMA_SLAB_PRIV;
        pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
+#ifndef __mips_n64
+       pflags &= ~(VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
+       pflags |= VM_ALLOC_NOWAIT;
+#endif
 
        for (;;) {
                m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags);

Modified: stable/11/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/11/sys/powerpc/aim/mmu_oea64.c       Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/powerpc/aim/mmu_oea64.c       Wed Jan 10 20:39:26 2018        
(r327785)
@@ -1515,7 +1515,7 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes
        struct pvo_entry *pvo;
         vm_offset_t va;
         vm_page_t m;
-        int pflags, needed_lock;
+        int needed_lock;
 
        /*
         * This entire routine is a horrible hack to avoid bothering kmem
@@ -1526,17 +1526,11 @@ moea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes
 
        *flags = UMA_SLAB_PRIV;
        needed_lock = !PMAP_LOCKED(kernel_pmap);
-       pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
 
-        for (;;) {
-                m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
-                if (m == NULL) {
-                        if (wait & M_NOWAIT)
-                                return (NULL);
-                        VM_WAIT;
-                } else
-                        break;
-        }
+       m = vm_page_alloc(NULL, 0,
+           malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
+       if (m == NULL)
+               return (NULL);
 
        va = VM_PAGE_TO_PHYS(m);
 

Modified: stable/11/sys/powerpc/aim/slb.c
==============================================================================
--- stable/11/sys/powerpc/aim/slb.c     Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/powerpc/aim/slb.c     Wed Jan 10 20:39:26 2018        
(r327785)
@@ -483,24 +483,16 @@ slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u
        static vm_offset_t realmax = 0;
        void *va;
        vm_page_t m;
-       int pflags;
 
        if (realmax == 0)
                realmax = platform_real_maxaddr();
 
        *flags = UMA_SLAB_PRIV;
-       pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
-
-       for (;;) {
-               m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax,
-                   PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
-               if (m == NULL) {
-                       if (wait & M_NOWAIT)
-                               return (NULL);
-                       VM_WAIT;
-               } else
-                        break;
-        }
+       m = vm_page_alloc_contig(NULL, 0,
+           malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
+           1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
+       if (m == NULL)
+               return (NULL);
 
        va = (void *) VM_PAGE_TO_PHYS(m);
 

Modified: stable/11/sys/powerpc/powerpc/uma_machdep.c
==============================================================================
--- stable/11/sys/powerpc/powerpc/uma_machdep.c Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/powerpc/powerpc/uma_machdep.c Wed Jan 10 20:39:26 2018        
(r327785)
@@ -55,20 +55,13 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_in
        void *va;
        vm_paddr_t pa;
        vm_page_t m;
-       int pflags;
        
        *flags = UMA_SLAB_PRIV;
-       pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
 
-       for (;;) {
-               m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
-               if (m == NULL) {
-                       if (wait & M_NOWAIT)
-                               return (NULL);
-                       VM_WAIT;
-               } else
-                       break;
-       }
+       m = vm_page_alloc(NULL, 0,
+           malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
+       if (m == NULL) 
+               return (NULL);
 
        pa = VM_PAGE_TO_PHYS(m);
 

Modified: stable/11/sys/sparc64/sparc64/vm_machdep.c
==============================================================================
--- stable/11/sys/sparc64/sparc64/vm_machdep.c  Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/sparc64/sparc64/vm_machdep.c  Wed Jan 10 20:39:26 2018        
(r327785)
@@ -394,24 +394,16 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_in
 {
        vm_paddr_t pa;
        vm_page_t m;
-       int pflags;
        void *va;
 
        PMAP_STATS_INC(uma_nsmall_alloc);
 
        *flags = UMA_SLAB_PRIV;
-       pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
 
-       for (;;) {
-               m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
-               if (m == NULL) {
-                       if (wait & M_NOWAIT)
-                               return (NULL);
-                       else
-                               VM_WAIT;
-               } else
-                       break;
-       }
+       m = vm_page_alloc(NULL, 0, 
+           malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
+       if (m == NULL)
+               return (NULL);
 
        pa = VM_PAGE_TO_PHYS(m);
        if (dcache_color_ignore == 0 && m->md.color != DCACHE_COLOR(pa)) {

Modified: stable/11/sys/vm/phys_pager.c
==============================================================================
--- stable/11/sys/vm/phys_pager.c       Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/vm/phys_pager.c       Wed Jan 10 20:39:26 2018        
(r327785)
@@ -209,13 +209,10 @@ retry:
                if (m == NULL) {
                        ahead = MIN(end - i, PHYSALLOC);
                        m = vm_page_alloc(object, i, VM_ALLOC_NORMAL |
-                           VM_ALLOC_ZERO | VM_ALLOC_COUNT(ahead));
-                       if (m == NULL) {
-                               VM_OBJECT_WUNLOCK(object);
-                               VM_WAIT;
-                               VM_OBJECT_WLOCK(object);
+                           VM_ALLOC_ZERO | VM_ALLOC_WAITFAIL |
+                           VM_ALLOC_COUNT(ahead));
+                       if (m == NULL)
                                goto retry;
-                       }
                        if ((m->flags & PG_ZERO) == 0)
                                pmap_zero_page(m);
                        m->valid = VM_PAGE_BITS_ALL;

Modified: stable/11/sys/vm/swap_pager.c
==============================================================================
--- stable/11/sys/vm/swap_pager.c       Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/vm/swap_pager.c       Wed Jan 10 20:39:26 2018        
(r327785)
@@ -1808,7 +1808,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t p
                                vm_pageout_oom(VM_OOM_SWAPZ);
                                pause("swzonxb", 10);
                        } else
-                               VM_WAIT;
+                               uma_zwait(swblk_zone);
                        VM_OBJECT_WLOCK(object);
                        sb = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
                            rdpi);
@@ -1838,7 +1838,7 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t p
                                vm_pageout_oom(VM_OOM_SWAPZ);
                                pause("swzonxp", 10);
                        } else
-                               VM_WAIT;
+                               uma_zwait(swpctrie_zone);
                        VM_OBJECT_WLOCK(object);
                        sb1 = SWAP_PCTRIE_LOOKUP(&object->un_pager.swp.swp_blks,
                            rdpi);

Modified: stable/11/sys/vm/uma.h
==============================================================================
--- stable/11/sys/vm/uma.h      Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/uma.h      Wed Jan 10 20:39:26 2018        (r327785)
@@ -366,6 +366,11 @@ uma_zfree(uma_zone_t zone, void *item)
 }
 
 /*
+ * Wait until the specified zone can allocate an item.
+ */
+void uma_zwait(uma_zone_t zone);
+
+/*
  * XXX The rest of the prototypes in this header are h0h0 magic for the VM.
  * If you think you need to use it for a normal zone you're probably incorrect.
  */

Modified: stable/11/sys/vm/uma_core.c
==============================================================================
--- stable/11/sys/vm/uma_core.c Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/uma_core.c Wed Jan 10 20:39:26 2018        (r327785)
@@ -1127,7 +1127,9 @@ noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t 
        npages = howmany(bytes, PAGE_SIZE);
        while (npages > 0) {
                p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
-                   VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
+                   VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
+                   ((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
+                   VM_ALLOC_NOWAIT));
                if (p != NULL) {
                        /*
                         * Since the page does not belong to an object, its
@@ -1137,11 +1139,6 @@ noobj_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t 
                        npages--;
                        continue;
                }
-               if (wait & M_WAITOK) {
-                       VM_WAIT;
-                       continue;
-               }
-
                /*
                 * Page allocation failed, free intermediate pages and
                 * exit.
@@ -2080,6 +2077,15 @@ uma_zdestroy(uma_zone_t zone)
        sx_slock(&uma_drain_lock);
        zone_free_item(zones, zone, NULL, SKIP_NONE);
        sx_sunlock(&uma_drain_lock);
+}
+
+void
+uma_zwait(uma_zone_t zone)
+{
+       void *item;
+
+       item = uma_zalloc_arg(zone, NULL, M_WAITOK);
+       uma_zfree(zone, item);
 }
 
 /* See uma.h */

Modified: stable/11/sys/vm/vm_kern.c
==============================================================================
--- stable/11/sys/vm/vm_kern.c  Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/vm_kern.c  Wed Jan 10 20:39:26 2018        (r327785)
@@ -172,6 +172,8 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flag
                return (0);
        offset = addr - VM_MIN_KERNEL_ADDRESS;
        pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
+       pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
+       pflags |= VM_ALLOC_NOWAIT;
        VM_OBJECT_WLOCK(object);
        for (i = 0; i < size; i += PAGE_SIZE) {
                tries = 0;
@@ -227,6 +229,8 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, i
                return (0);
        offset = addr - VM_MIN_KERNEL_ADDRESS;
        pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
+       pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
+       pflags |= VM_ALLOC_NOWAIT;
        npages = atop(size);
        VM_OBJECT_WLOCK(object);
        tries = 0;
@@ -338,10 +342,13 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_siz
 
        offset = addr - VM_MIN_KERNEL_ADDRESS;
        pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
+       pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
+       if (flags & M_WAITOK)
+               pflags |= VM_ALLOC_WAITFAIL;
 
        i = 0;
-retry:
        VM_OBJECT_WLOCK(object);
+retry:
        mpred = vm_radix_lookup_le(&object->rtree, atop(offset + i));
        for (; i < size; i += PAGE_SIZE, mpred = m) {
                m = vm_page_alloc_after(object, atop(offset + i), pflags,
@@ -353,11 +360,9 @@ retry:
                 * aren't on any queues.
                 */
                if (m == NULL) {
-                       VM_OBJECT_WUNLOCK(object);
-                       if ((flags & M_NOWAIT) == 0) {
-                               VM_WAIT;
+                       if ((flags & M_NOWAIT) == 0)
                                goto retry;
-                       }
+                       VM_OBJECT_WUNLOCK(object);
                        kmem_unback(object, addr, i);
                        return (KERN_NO_SPACE);
                }

Modified: stable/11/sys/vm/vm_object.c
==============================================================================
--- stable/11/sys/vm/vm_object.c        Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/vm/vm_object.c        Wed Jan 10 20:39:26 2018        
(r327785)
@@ -1461,7 +1461,7 @@ retry:
                if (vm_page_rename(m, new_object, idx)) {
                        VM_OBJECT_WUNLOCK(new_object);
                        VM_OBJECT_WUNLOCK(orig_object);
-                       VM_WAIT;
+                       vm_radix_wait();
                        VM_OBJECT_WLOCK(orig_object);
                        VM_OBJECT_WLOCK(new_object);
                        goto retry;
@@ -1523,8 +1523,9 @@ vm_object_collapse_scan_wait(vm_object_t object, vm_pa
                vm_page_lock(p);
        VM_OBJECT_WUNLOCK(object);
        VM_OBJECT_WUNLOCK(backing_object);
+       /* The page is only NULL when rename fails. */
        if (p == NULL)
-               VM_WAIT;
+               vm_radix_wait();
        else
                vm_page_busy_sleep(p, "vmocol", false);
        VM_OBJECT_WLOCK(object);

Modified: stable/11/sys/vm/vm_page.c
==============================================================================
--- stable/11/sys/vm/vm_page.c  Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/vm_page.c  Wed Jan 10 20:39:26 2018        (r327785)
@@ -167,6 +167,7 @@ static void vm_page_insert_radixdone(vm_page_t m, vm_o
     vm_page_t mpred);
 static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
     vm_paddr_t high);
+static int vm_page_alloc_fail(vm_object_t object, int req);
 
 SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
 
@@ -1606,6 +1607,8 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
            ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
            (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
            ("inconsistent object(%p)/req(%x)", object, req));
+       KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
+           ("Can't sleep and retry object insertion."));
        KASSERT(mpred == NULL || mpred->pindex < pindex,
            ("mpred %p doesn't precede pindex 0x%jx", mpred,
            (uintmax_t)pindex));
@@ -1627,6 +1630,7 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
         * Allocate a page if the number of free pages exceeds the minimum
         * for the request class.
         */
+again:
        mtx_lock(&vm_page_queue_free_mtx);
        if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
            (req_class == VM_ALLOC_SYSTEM &&
@@ -1659,10 +1663,8 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
                /*
                 * Not allocatable, give up.
                 */
-               mtx_unlock(&vm_page_queue_free_mtx);
-               atomic_add_int(&vm_pageout_deficit,
-                   max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
-               pagedaemon_wakeup();
+               if (vm_page_alloc_fail(object, req))
+                       goto again;
                return (NULL);
        }
 
@@ -1716,6 +1718,11 @@ vm_page_alloc_after(vm_object_t object, vm_pindex_t pi
                        m->busy_lock = VPB_UNBUSIED;
                        /* Don't change PG_ZERO. */
                        vm_page_free_toq(m);
+                       if (req & VM_ALLOC_WAITFAIL) {
+                               VM_OBJECT_WUNLOCK(object);
+                               vm_radix_wait();
+                               VM_OBJECT_WLOCK(object);
+                       }
                        return (NULL);
                }
 
@@ -1793,6 +1800,8 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t p
            (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
            ("vm_page_alloc_contig: inconsistent object(%p)/req(%x)", object,
            req));
+       KASSERT(object == NULL || (req & VM_ALLOC_WAITOK) == 0,
+           ("Can't sleep and retry object insertion."));
        if (object != NULL) {
                VM_OBJECT_ASSERT_WLOCKED(object);
                KASSERT((object->flags & OBJ_FICTITIOUS) == 0,
@@ -1818,6 +1827,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t p
         * Can we allocate the pages without the number of free pages falling
         * below the lower bound for the allocation class?
         */
+again:
        mtx_lock(&vm_page_queue_free_mtx);
        if (vm_cnt.v_free_count >= npages + vm_cnt.v_free_reserved ||
            (req_class == VM_ALLOC_SYSTEM &&
@@ -1839,9 +1849,8 @@ retry:
                        m_ret = vm_phys_alloc_contig(npages, low, high,
                            alignment, boundary);
        } else {
-               mtx_unlock(&vm_page_queue_free_mtx);
-               atomic_add_int(&vm_pageout_deficit, npages);
-               pagedaemon_wakeup();
+               if (vm_page_alloc_fail(object, req))
+                       goto again;
                return (NULL);
        }
        if (m_ret != NULL) {
@@ -1910,6 +1919,11 @@ retry:
                                        /* Don't change PG_ZERO. */
                                        vm_page_free_toq(m);
                                }
+                               if (req & VM_ALLOC_WAITFAIL) {
+                                       VM_OBJECT_WUNLOCK(object);
+                                       vm_radix_wait();
+                                       VM_OBJECT_WLOCK(object);
+                               }
                                return (NULL);
                        }
                        mpred = m;
@@ -1982,18 +1996,17 @@ vm_page_alloc_freelist(int flind, int req)
        /*
         * Do not allocate reserved pages unless the req has asked for it.
         */
+again:
        mtx_lock(&vm_page_queue_free_mtx);
        if (vm_cnt.v_free_count > vm_cnt.v_free_reserved ||
            (req_class == VM_ALLOC_SYSTEM &&
            vm_cnt.v_free_count > vm_cnt.v_interrupt_free_min) ||
            (req_class == VM_ALLOC_INTERRUPT &&
-           vm_cnt.v_free_count > 0))
+           vm_cnt.v_free_count > 0)) {
                m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
-       else {
-               mtx_unlock(&vm_page_queue_free_mtx);
-               atomic_add_int(&vm_pageout_deficit,
-                   max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
-               pagedaemon_wakeup();
+       } else {
+               if (vm_page_alloc_fail(NULL, req))
+                       goto again;
                return (NULL);
        }
        if (m == NULL) {
@@ -2557,11 +2570,11 @@ vm_page_reclaim_contig(int req, u_long npages, vm_padd
  *     Sleep until free pages are available for allocation.
  *     - Called in various places before memory allocations.
  */
-void
-vm_wait(void)
+static void
+_vm_wait(void)
 {
 
-       mtx_lock(&vm_page_queue_free_mtx);
+       mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
        if (curproc == pageproc) {
                vm_pageout_pages_needed = 1;
                msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
@@ -2579,7 +2592,47 @@ vm_wait(void)
        }
 }
 
+void
+vm_wait(void)
+{
+
+       mtx_lock(&vm_page_queue_free_mtx);
+       _vm_wait();
+}
+
 /*
+ *     vm_page_alloc_fail:
+ *
+ *     Called when a page allocation function fails.  Informs the
+ *     pagedaemon and performs the requested wait.  Requires the
+ *     page_queue_free and object lock on entry.  Returns with the
+ *     object lock held and free lock released.  Returns an error when
+ *     retry is necessary.
+ *
+ */
+static int
+vm_page_alloc_fail(vm_object_t object, int req)
+{
+
+       mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+
+       atomic_add_int(&vm_pageout_deficit,
+           max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
+       pagedaemon_wakeup();
+       if (req & (VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL)) {
+               if (object != NULL) 
+                       VM_OBJECT_WUNLOCK(object);
+               _vm_wait();
+               if (object != NULL) 
+                       VM_OBJECT_WLOCK(object);
+               if (req & VM_ALLOC_WAITOK)
+                       return (EAGAIN);
+       } else
+               mtx_unlock(&vm_page_queue_free_mtx);
+       return (0);
+}
+
+/*
  *     vm_waitpfault:  (also see VM_WAITPFAULT macro)
  *
  *     Sleep until free pages are available for allocation.
@@ -3190,11 +3243,16 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, i
 {
        vm_page_t m;
        int sleep;
+       int pflags;
 
        VM_OBJECT_ASSERT_WLOCKED(object);
        KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
            (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
            ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
+       pflags = allocflags &
+           ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
+       if ((allocflags & VM_ALLOC_NOWAIT) == 0)
+               pflags |= VM_ALLOC_WAITFAIL;
 retrylookup:
        if ((m = vm_page_lookup(object, pindex)) != NULL) {
                sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
@@ -3228,13 +3286,10 @@ retrylookup:
                        return (m);
                }
        }
-       m = vm_page_alloc(object, pindex, allocflags);
+       m = vm_page_alloc(object, pindex, pflags);
        if (m == NULL) {
                if ((allocflags & VM_ALLOC_NOWAIT) != 0)
                        return (NULL);
-               VM_OBJECT_WUNLOCK(object);
-               VM_WAIT;
-               VM_OBJECT_WLOCK(object);
                goto retrylookup;
        }
        if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
@@ -3273,6 +3328,7 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pin
     vm_page_t *ma, int count)
 {
        vm_page_t m, mpred;
+       int pflags;
        int i;
        bool sleep;
 
@@ -3287,6 +3343,10 @@ vm_page_grab_pages(vm_object_t object, vm_pindex_t pin
            ("vm_page_grab_pages: VM_ALLOC_SBUSY/IGN_SBUSY mismatch"));
        if (count == 0)
                return (0);
+       pflags = allocflags & ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK |
+           VM_ALLOC_WAITFAIL | VM_ALLOC_IGN_SBUSY);
+       if ((allocflags & VM_ALLOC_NOWAIT) == 0)
+               pflags |= VM_ALLOC_WAITFAIL;
        i = 0;
 retrylookup:
        m = vm_radix_lookup_le(&object->rtree, pindex + i);
@@ -3327,14 +3387,10 @@ retrylookup:
                                vm_page_sbusy(m);
                } else {
                        m = vm_page_alloc_after(object, pindex + i,
-                           (allocflags & ~VM_ALLOC_IGN_SBUSY) |
-                           VM_ALLOC_COUNT(count - i), mpred);
+                           pflags | VM_ALLOC_COUNT(count - i), mpred);
                        if (m == NULL) {
                                if ((allocflags & VM_ALLOC_NOWAIT) != 0)
                                        break;
-                               VM_OBJECT_WUNLOCK(object);
-                               VM_WAIT;
-                               VM_OBJECT_WLOCK(object);
                                goto retrylookup;
                        }
                }

Modified: stable/11/sys/vm/vm_page.h
==============================================================================
--- stable/11/sys/vm/vm_page.h  Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/vm_page.h  Wed Jan 10 20:39:26 2018        (r327785)
@@ -408,6 +408,8 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
 #define VM_ALLOC_INTERRUPT     1
 #define VM_ALLOC_SYSTEM                2
 #define        VM_ALLOC_CLASS_MASK     3
+#define        VM_ALLOC_WAITOK         0x0008  /* (acf) Sleep and retry */
+#define        VM_ALLOC_WAITFAIL       0x0010  /* (acf) Sleep and return error 
*/
 #define        VM_ALLOC_WIRED          0x0020  /* (acfgp) Allocate a wired 
page */
 #define        VM_ALLOC_ZERO           0x0040  /* (acfgp) Allocate a prezeroed 
page */
 #define        VM_ALLOC_NOOBJ          0x0100  /* (acg) No associated object */
@@ -417,7 +419,7 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
 #define        VM_ALLOC_IGN_SBUSY      0x1000  /* (gp) Ignore shared busy flag 
*/
 #define        VM_ALLOC_NODUMP         0x2000  /* (ag) don't include in dump */
 #define        VM_ALLOC_SBUSY          0x4000  /* (acgp) Shared busy the page 
*/
-#define        VM_ALLOC_NOWAIT         0x8000  /* (gp) Do not sleep */
+#define        VM_ALLOC_NOWAIT         0x8000  /* (acfgp) Do not sleep */
 #define        VM_ALLOC_COUNT_SHIFT    16
 #define        VM_ALLOC_COUNT(count)   ((count) << VM_ALLOC_COUNT_SHIFT)
 
@@ -436,6 +438,10 @@ malloc2vm_flags(int malloc_flags)
                pflags |= VM_ALLOC_ZERO;
        if ((malloc_flags & M_NODUMP) != 0)
                pflags |= VM_ALLOC_NODUMP;
+       if ((malloc_flags & M_NOWAIT))
+               pflags |= VM_ALLOC_NOWAIT;
+       if ((malloc_flags & M_WAITOK))
+               pflags |= VM_ALLOC_WAITOK;
        return (pflags);
 }
 #endif

Modified: stable/11/sys/vm/vm_radix.c
==============================================================================
--- stable/11/sys/vm/vm_radix.c Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/vm_radix.c Wed Jan 10 20:39:26 2018        (r327785)
@@ -775,6 +775,12 @@ vm_radix_replace(struct vm_radix *rtree, vm_page_t new
        panic("%s: original replacing page not found", __func__);
 }
 
+void
+vm_radix_wait(void)
+{
+       uma_zwait(vm_radix_node_zone);
+}
+
 #ifdef DDB
 /*
  * Show details about the given radix node.

Modified: stable/11/sys/vm/vm_radix.h
==============================================================================
--- stable/11/sys/vm/vm_radix.h Wed Jan 10 20:36:16 2018        (r327784)
+++ stable/11/sys/vm/vm_radix.h Wed Jan 10 20:39:26 2018        (r327785)
@@ -36,6 +36,7 @@
 #ifdef _KERNEL
 
 int            vm_radix_insert(struct vm_radix *rtree, vm_page_t page);
+void           vm_radix_wait(void);
 boolean_t      vm_radix_is_singleton(struct vm_radix *rtree);
 vm_page_t      vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index);
 vm_page_t      vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index);

Modified: stable/11/sys/x86/iommu/intel_utils.c
==============================================================================
--- stable/11/sys/x86/iommu/intel_utils.c       Wed Jan 10 20:36:16 2018        
(r327784)
+++ stable/11/sys/x86/iommu/intel_utils.c       Wed Jan 10 20:39:26 2018        
(r327785)
@@ -257,9 +257,12 @@ vm_page_t
 dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int flags)
 {
        vm_page_t m;
-       int zeroed;
+       int zeroed, aflags;
 
        zeroed = (flags & DMAR_PGF_ZERO) != 0 ? VM_ALLOC_ZERO : 0;
+       aflags = zeroed | VM_ALLOC_NOBUSY | VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP |
+           ((flags & DMAR_PGF_WAITOK) != 0 ? VM_ALLOC_WAITFAIL :
+           VM_ALLOC_NOWAIT);
        for (;;) {
                if ((flags & DMAR_PGF_OBJL) == 0)
                        VM_OBJECT_WLOCK(obj);
@@ -269,8 +272,7 @@ dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int fla
                                VM_OBJECT_WUNLOCK(obj);
                        break;
                }
-               m = vm_page_alloc_contig(obj, idx, VM_ALLOC_NOBUSY |
-                   VM_ALLOC_SYSTEM | VM_ALLOC_NODUMP | zeroed, 1, 0,
+               m = vm_page_alloc_contig(obj, idx, aflags, 1, 0,
                    dmar_high, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
                if ((flags & DMAR_PGF_OBJL) == 0)
                        VM_OBJECT_WUNLOCK(obj);
@@ -282,11 +284,6 @@ dmar_pgalloc(vm_object_t obj, vm_pindex_t idx, int fla
                }
                if ((flags & DMAR_PGF_WAITOK) == 0)
                        break;
-               if ((flags & DMAR_PGF_OBJL) != 0)
-                       VM_OBJECT_WUNLOCK(obj);
-               VM_WAIT;
-               if ((flags & DMAR_PGF_OBJL) != 0)
-                       VM_OBJECT_WLOCK(obj);
        }
        return (m);
 }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to