On Wed, Feb 07, 2018 at 07:46:17AM +0100, Peter Holm wrote:
P> On Tue, Feb 06, 2018 at 04:45:49PM -0800, Gleb Smirnoff wrote:
P> > On Tue, Feb 06, 2018 at 04:42:13PM -0800, Gleb Smirnoff wrote:
P> > T>   Hi Peter,
P> > T> 
P> > T>   can you please try this patch? In either case success
P> > T> or not, please provide me with dmesg. Thanks a lot!
P> > 
P> > Sorry, patch was missing one file. 99.9% this is a no-op,
P> > but better use full patch.

Let's do one more attempt. I found where I miss one keg.

-- 
Gleb Smirnoff
Index: sys/kern/subr_vmem.c
===================================================================
--- sys/kern/subr_vmem.c	(revision 328955)
+++ sys/kern/subr_vmem.c	(working copy)
@@ -667,7 +667,8 @@ int
 vmem_startup_count(void)
 {
 
-	return (howmany(BT_MAXALLOC, UMA_SLAB_SIZE / sizeof(struct vmem_btag)));
+	return (howmany(BT_MAXALLOC,
+	    UMA_SLAB_SPACE / sizeof(struct vmem_btag)));
 }
 #endif
 
Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c	(revision 328955)
+++ sys/vm/uma_core.c	(working copy)
@@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$");
 #ifdef DEBUG_MEMGUARD
 #include <vm/memguard.h>
 #endif
+#define	DIAGNOSTIC
 
 /*
  * This is the zone and keg from which all zones are spawned.
@@ -1796,28 +1797,40 @@ uma_startup_count(int zones)
 	zsize = sizeof(struct uma_zone) +
 	    (sizeof(struct uma_cache) * (mp_maxid + 1)) +
 	    (sizeof(struct uma_zone_domain) * vm_ndomains);
+	printf("ksize %d zsize %d slab %lu\n", ksize, zsize, sizeof(struct uma_slab));
 
-	/* Memory for the zone of zones and zone of kegs. */
+	/*
+	 * Memory for the zone of kegs and its keg,
+	 * and for zone of zones.
+	 */
 	pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
 	    roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
+	printf("boot_pages master %d\n", pages);
 
 	zones += UMA_BOOT_ZONES;
 
-	/* Memory for startup zones, UMA and VM, ... */
+	/* Memory for the rest of startup zones, UMA and VM, ... */
 	if (zsize > UMA_SLAB_SIZE)
 		pages += zones * howmany(zsize, UMA_SLAB_SIZE);
 	else
-		pages += howmany(zones, UMA_SLAB_SIZE / zsize);
+		pages += howmany(zones, UMA_SLAB_SPACE / zsize);
+	printf("boot_pages zones %d\n", pages);
 
-	/* ... and their kegs. */
-	pages += howmany(zones, UMA_SLAB_SIZE / ksize);
+	/* ... and their kegs. Note that zone of zones allocates a keg! */
+	pages += howmany(zones + 1, UMA_SLAB_SPACE / ksize);
+	printf("boot_pages kegs %d\n", pages);
 
 	/*
-	 * Take conservative approach that every zone
-	 * is going to allocate hash.
+	 * Most of startup zones are not going to be offpages, that's
+	 * why we use UMA_SLAB_SPACE instead of UMA_SLAB_SIZE in all
+	 * calculations.  Some large bucket zones will be offpage, and
+	 * thus will allocate hashes.  We take conservative approach
+	 * and assume that all zones may allocate hash.  This may give
+	 * us some positive imprecision, usually an extra single page.
 	 */
-	pages += howmany(zones, UMA_SLAB_SIZE /
+	pages += howmany(zones, UMA_SLAB_SPACE /
 	    (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT));
+	printf("boot_pages hash %d\n", pages);
 
 	return (pages);
 }
Index: sys/vm/uma_int.h
===================================================================
--- sys/vm/uma_int.h	(revision 328955)
+++ sys/vm/uma_int.h	(working copy)
@@ -138,6 +138,11 @@
 #define UMA_MAX_WASTE	10
 
 /*
+ * Size of memory in a not offpage slab available for actual items.
+ */
+#define	UMA_SLAB_SPACE	(UMA_SLAB_SIZE - sizeof(struct uma_slab))
+
+/*
  * I doubt there will be many cases where this is exceeded. This is the initial
  * size of the hash table for uma_slabs that are managed off page. This hash
  * does expand by powers of two.  Currently it doesn't get smaller.
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c	(revision 328955)
+++ sys/vm/vm_page.c	(working copy)
@@ -518,8 +518,11 @@ vm_page_startup(vm_offset_t vaddr)
 
 	/* vmem_startup() calls uma_prealloc(). */
 	boot_pages += vmem_startup_count();
+	printf("boot_pages vmem %d\n", boot_pages);
 	/* vm_map_startup() calls uma_prealloc(). */
-	boot_pages += howmany(MAX_KMAP, UMA_SLAB_SIZE / sizeof(struct vm_map));
+	boot_pages += howmany(MAX_KMAP,
+	    UMA_SLAB_SPACE / sizeof(struct vm_map));
+	printf("boot_pages kmap %d\n", boot_pages);
 
 	/*
 	 * Before going fully functional kmem_init() does allocation
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to