Author: glebius
Date: Wed Feb  7 18:32:51 2018
New Revision: 328982
URL: https://svnweb.freebsd.org/changeset/base/328982

Log:
  Fix three miscalculations in amount of boot pages:
  
  o Most of startup zones have struct uma_slab embedded into the slab,
    so provide macro UMA_SLAB_SPACE and use it instead of UMA_SLAB_SIZE,
    when calculating how many pages would certain kind of allocations
    require. Some zones are offpage, so we might have a positive inaccuracy.
  o The keg for the zone of zones is allocated "dynamically", so we
    need +1 when calculating amount of pages for kegs. [1]
  o The zones of zones and zones of kegs have arbitrary alignment of 32,
    and this also needs to be accounted for. [2]
  
  While here, spread more comments and improve diagnostic messages.
  
  Reported by:  pho [1], jtl [2]

Modified:
  head/sys/kern/subr_vmem.c
  head/sys/vm/uma_core.c
  head/sys/vm/uma_int.h
  head/sys/vm/vm_page.c

Modified: head/sys/kern/subr_vmem.c
==============================================================================
--- head/sys/kern/subr_vmem.c   Wed Feb  7 18:18:33 2018        (r328981)
+++ head/sys/kern/subr_vmem.c   Wed Feb  7 18:32:51 2018        (r328982)
@@ -667,7 +667,8 @@ int
 vmem_startup_count(void)
 {
 
-       return (howmany(BT_MAXALLOC, UMA_SLAB_SIZE / sizeof(struct vmem_btag)));
+       return (howmany(BT_MAXALLOC,
+           UMA_SLAB_SPACE / sizeof(struct vmem_btag)));
 }
 #endif
 

Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c      Wed Feb  7 18:18:33 2018        (r328981)
+++ head/sys/vm/uma_core.c      Wed Feb  7 18:32:51 2018        (r328982)
@@ -1102,7 +1102,7 @@ startup_alloc(uma_zone_t zone, vm_size_t bytes, int do
        }
        mtx_unlock(&uma_boot_pages_mtx);
        if (booted < BOOT_PAGEALLOC)
-               panic("UMA: Increase vm.boot_pages");
+               panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
        /*
         * Now that we've booted reset these users to their real allocator.
         */
@@ -1785,6 +1785,8 @@ zone_foreach(void (*zfunc)(uma_zone_t))
  * zone of zones and zone of kegs are accounted separately.
  */
 #define        UMA_BOOT_ZONES  11
+/* Zone of zones and zone of kegs have arbitrary alignment. */
+#define        UMA_BOOT_ALIGN  32
 static int zsize, ksize;
 int
 uma_startup_count(int zones)
@@ -1797,26 +1799,36 @@ uma_startup_count(int zones)
            (sizeof(struct uma_cache) * (mp_maxid + 1)) +
            (sizeof(struct uma_zone_domain) * vm_ndomains);
 
-       /* Memory for the zone of zones and zone of kegs. */
+       /*
+        * Memory for the zone of kegs and its keg,
+        * and for zone of zones.
+        */
        pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
            roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
 
        zones += UMA_BOOT_ZONES;
 
-       /* Memory for startup zones, UMA and VM, ... */
+       /* Memory for the rest of startup zones, UMA and VM, ... */
        if (zsize > UMA_SLAB_SIZE)
-               pages += zones * howmany(zsize, UMA_SLAB_SIZE);
+               pages += zones * howmany(roundup2(zsize, UMA_BOOT_ALIGN),
+                   UMA_SLAB_SIZE);
        else
-               pages += howmany(zones, UMA_SLAB_SIZE / zsize);
+               pages += howmany(zones,
+                   UMA_SLAB_SPACE / roundup2(zsize, UMA_BOOT_ALIGN));
 
-       /* ... and their kegs. */
-       pages += howmany(zones, UMA_SLAB_SIZE / ksize);
+       /* ... and their kegs. Note that zone of zones allocates a keg! */
+       pages += howmany(zones + 1,
+           UMA_SLAB_SPACE / roundup2(ksize, UMA_BOOT_ALIGN));
 
        /*
-        * Take conservative approach that every zone
-        * is going to allocate hash.
+        * Most of startup zones are not going to be offpages, that's
+        * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
+        * calculations.  Some large bucket zones will be offpage, and
+        * thus will allocate hashes.  We take conservative approach
+        * and assume that all zones may allocate hash.  This may give
+        * us some positive inaccuracy, usually an extra single page.
         */
-       pages += howmany(zones, UMA_SLAB_SIZE /
+       pages += howmany(zones, UMA_SLAB_SPACE /
            (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
 
        return (pages);
@@ -1856,7 +1868,7 @@ uma_startup(void *mem, int npages)
        args.uminit = zero_init;
        args.fini = NULL;
        args.keg = masterkeg;
-       args.align = 32 - 1;
+       args.align = UMA_BOOT_ALIGN - 1;
        args.flags = UMA_ZFLAG_INTERNAL;
        zone_ctor(kegs, zsize, &args, M_WAITOK);
 
@@ -1871,7 +1883,7 @@ uma_startup(void *mem, int npages)
        args.uminit = zero_init;
        args.fini = NULL;
        args.keg = NULL;
-       args.align = 32 - 1;
+       args.align = UMA_BOOT_ALIGN - 1;
        args.flags = UMA_ZFLAG_INTERNAL;
        zone_ctor(zones, zsize, &args, M_WAITOK);
 

Modified: head/sys/vm/uma_int.h
==============================================================================
--- head/sys/vm/uma_int.h       Wed Feb  7 18:18:33 2018        (r328981)
+++ head/sys/vm/uma_int.h       Wed Feb  7 18:32:51 2018        (r328982)
@@ -138,6 +138,11 @@
 #define UMA_MAX_WASTE  10
 
 /*
+ * Size of memory in a not offpage slab available for actual items.
+ */
+#define        UMA_SLAB_SPACE  (UMA_SLAB_SIZE - sizeof(struct uma_slab))
+
+/*
  * I doubt there will be many cases where this is exceeded. This is the initial
  * size of the hash table for uma_slabs that are managed off page. This hash
  * does expand by powers of two.  Currently it doesn't get smaller.

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Wed Feb  7 18:18:33 2018        (r328981)
+++ head/sys/vm/vm_page.c       Wed Feb  7 18:32:51 2018        (r328982)
@@ -519,7 +519,8 @@ vm_page_startup(vm_offset_t vaddr)
        /* vmem_startup() calls uma_prealloc(). */
        boot_pages += vmem_startup_count();
        /* vm_map_startup() calls uma_prealloc(). */
-       boot_pages += howmany(MAX_KMAP, UMA_SLAB_SIZE / sizeof(struct vm_map));
+       boot_pages += howmany(MAX_KMAP,
+           UMA_SLAB_SPACE / sizeof(struct vm_map));
 
        /*
         * Before going fully functional kmem_init() does allocation
_______________________________________________
svn-src-head@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to