On Tue, Feb 06, 2018 at 04:42:13PM -0800, Gleb Smirnoff wrote: T> Hi Peter, T> T> can you please try this patch? In either case success T> or not, please provide me with dmesg. Thanks a lot!
Sorry, patch was missing one file. 99.9% this is a no-op, but better use full patch. -- Gleb Smirnoff
Index: sys/kern/subr_vmem.c =================================================================== --- sys/kern/subr_vmem.c (revision 328955) +++ sys/kern/subr_vmem.c (working copy) @@ -667,7 +667,8 @@ int vmem_startup_count(void) { - return (howmany(BT_MAXALLOC, UMA_SLAB_SIZE / sizeof(struct vmem_btag))); + return (howmany(BT_MAXALLOC, + UMA_SLAB_SPACE / sizeof(struct vmem_btag))); } #endif Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c (revision 328955) +++ sys/vm/uma_core.c (working copy) @@ -96,6 +96,7 @@ __FBSDID("$FreeBSD$"); #ifdef DEBUG_MEMGUARD #include <vm/memguard.h> #endif +#define DIAGNOSTIC /* * This is the zone and keg from which all zones are spawned. @@ -1800,6 +1801,7 @@ uma_startup_count(int zones) /* Memory for the zone of zones and zone of kegs. */ pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 + roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE); + printf("boot_pages master %d\n", pages); zones += UMA_BOOT_ZONES; @@ -1807,17 +1809,20 @@ uma_startup_count(int zones) if (zsize > UMA_SLAB_SIZE) pages += zones * howmany(zsize, UMA_SLAB_SIZE); else - pages += howmany(zones, UMA_SLAB_SIZE / zsize); + pages += howmany(zones, UMA_SLAB_SPACE / zsize); + printf("boot_pages zones %d\n", pages); /* ... and their kegs. */ - pages += howmany(zones, UMA_SLAB_SIZE / ksize); + pages += howmany(zones, UMA_SLAB_SPACE / ksize); + printf("boot_pages kegs %d\n", pages); /* * Take conservative approach that every zone * is going to allocate hash. */ - pages += howmany(zones, UMA_SLAB_SIZE / + pages += howmany(zones, UMA_SLAB_SPACE / (sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT)); + printf("boot_pages hash %d\n", pages); return (pages); } Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h (revision 328955) +++ sys/vm/uma_int.h (working copy) @@ -138,6 +138,11 @@ #define UMA_MAX_WASTE 10 /* + * Size of memory in a not offpage slab available for actual items. + */ +#define UMA_SLAB_SPACE (UMA_SLAB_SIZE - sizeof(struct uma_slab)) + +/* * I doubt there will be many cases where this is exceeded. This is the initial * size of the hash table for uma_slabs that are managed off page. This hash * does expand by powers of two. Currently it doesn't get smaller. Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revision 328955) +++ sys/vm/vm_page.c (working copy) @@ -518,8 +518,11 @@ vm_page_startup(vm_offset_t vaddr) /* vmem_startup() calls uma_prealloc(). */ boot_pages += vmem_startup_count(); + printf("boot_pages vmem %d\n", boot_pages); /* vm_map_startup() calls uma_prealloc(). */ - boot_pages += howmany(MAX_KMAP, UMA_SLAB_SIZE / sizeof(struct vm_map)); + boot_pages += howmany(MAX_KMAP, + UMA_SLAB_SPACE / sizeof(struct vm_map)); + printf("boot_pages kmap %d\n", boot_pages); /* * Before going fully functional kmem_init() does allocation
_______________________________________________ svn-src-all@freebsd.org mailing list https://lists.freebsd.org/mailman/listinfo/svn-src-all To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"