Author: markj
Date: Sat Jul 27 16:33:11 2019
New Revision: 350374
URL: https://svnweb.freebsd.org/changeset/base/350374

Log:
  MFC r349840:
  Add a per-CPU page cache per VM free pool.

Modified:
  stable/12/sys/vm/vm_page.c
  stable/12/sys/vm/vm_page.h
  stable/12/sys/vm/vm_pagequeue.h
Directory Properties:
  stable/12/   (props changed)

Modified: stable/12/sys/vm/vm_page.c
==============================================================================
--- stable/12/sys/vm/vm_page.c  Sat Jul 27 16:11:04 2019        (r350373)
+++ stable/12/sys/vm/vm_page.c  Sat Jul 27 16:33:11 2019        (r350374)
@@ -192,20 +192,28 @@ static void
 vm_page_init_cache_zones(void *dummy __unused)
 {
        struct vm_domain *vmd;
-       int i;
+       struct vm_pgcache *pgcache;
+       int domain, pool;
 
-       for (i = 0; i < vm_ndomains; i++) {
-               vmd = VM_DOMAIN(i);
+       for (domain = 0; domain < vm_ndomains; domain++) {
+               vmd = VM_DOMAIN(domain);
+
                /*
-                * Don't allow the page cache to take up more than .25% of
+                * Don't allow the page caches to take up more than .25% of
                 * memory.
                 */
-               if (vmd->vmd_page_count / 400 < 256 * mp_ncpus)
+               if (vmd->vmd_page_count / 400 < 256 * mp_ncpus * VM_NFREEPOOL)
                        continue;
-               vmd->vmd_pgcache = uma_zcache_create("vm pgcache",
-                   sizeof(struct vm_page), NULL, NULL, NULL, NULL,
-                   vm_page_import, vm_page_release, vmd,
-                   UMA_ZONE_NOBUCKETCACHE | UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
+               for (pool = 0; pool < VM_NFREEPOOL; pool++) {
+                       pgcache = &vmd->vmd_pgcache[pool];
+                       pgcache->domain = domain;
+                       pgcache->pool = pool;
+                       pgcache->zone = uma_zcache_create("vm pgcache",
+                           sizeof(struct vm_page), NULL, NULL, NULL, NULL,
+                           vm_page_import, vm_page_release, pgcache,
+                           UMA_ZONE_NOBUCKETCACHE | UMA_ZONE_MAXBUCKET |
+                           UMA_ZONE_VM);
+               }
        }
 }
 SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, 
NULL);
@@ -1793,7 +1801,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pind
 {
        struct vm_domain *vmd;
        vm_page_t m;
-       int flags;
+       int flags, pool;
 
        KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
            (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
@@ -1810,6 +1818,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pind
 
        flags = 0;
        m = NULL;
+       pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT;
 again:
 #if VM_NRESERVLEVEL > 0
        /*
@@ -1824,8 +1833,8 @@ again:
        }
 #endif
        vmd = VM_DOMAIN(domain);
-       if (object != NULL && vmd->vmd_pgcache != NULL) {
-               m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT);
+       if (vmd->vmd_pgcache[pool].zone != NULL) {
+               m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT);
                if (m != NULL) {
                        flags |= PG_PCPU_CACHE;
                        goto found;
@@ -1836,8 +1845,7 @@ again:
                 * If not, allocate it from the free page queues.
                 */
                vm_domain_free_lock(vmd);
-               m = vm_phys_alloc_pages(domain, object != NULL ?
-                   VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
+               m = vm_phys_alloc_pages(domain, pool, 0);
                vm_domain_free_unlock(vmd);
                if (m == NULL) {
                        vm_domain_freecnt_inc(vmd, 1);
@@ -2229,15 +2237,17 @@ static int
 vm_page_import(void *arg, void **store, int cnt, int domain, int flags)
 {
        struct vm_domain *vmd;
+       struct vm_pgcache *pgcache;
        int i;
 
-       vmd = arg;
+       pgcache = arg;
+       vmd = VM_DOMAIN(pgcache->domain);
        /* Only import if we can bring in a full bucket. */
        if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
                return (0);
        domain = vmd->vmd_domain;
        vm_domain_free_lock(vmd);
-       i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt,
+       i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
            (vm_page_t *)store);
        vm_domain_free_unlock(vmd);
        if (cnt != i)
@@ -2250,10 +2260,12 @@ static void
 vm_page_release(void *arg, void **store, int cnt)
 {
        struct vm_domain *vmd;
+       struct vm_pgcache *pgcache;
        vm_page_t m;
        int i;
 
-       vmd = arg;
+       pgcache = arg;
+       vmd = VM_DOMAIN(pgcache->domain);
        vm_domain_free_lock(vmd);
        for (i = 0; i < cnt; i++) {
                m = (vm_page_t)store[i];
@@ -3469,13 +3481,15 @@ void
 vm_page_free_toq(vm_page_t m)
 {
        struct vm_domain *vmd;
+       uma_zone_t zone;
 
        if (!vm_page_free_prep(m))
                return;
 
        vmd = vm_pagequeue_domain(m);
-       if ((m->flags & PG_PCPU_CACHE) != 0 && vmd->vmd_pgcache != NULL) {
-               uma_zfree(vmd->vmd_pgcache, m);
+       zone = vmd->vmd_pgcache[m->pool].zone;
+       if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
+               uma_zfree(zone, m);
                return;
        }
        vm_domain_free_lock(vmd);

Modified: stable/12/sys/vm/vm_page.h
==============================================================================
--- stable/12/sys/vm/vm_page.h  Sat Jul 27 16:11:04 2019        (r350373)
+++ stable/12/sys/vm/vm_page.h  Sat Jul 27 16:33:11 2019        (r350374)
@@ -376,6 +376,10 @@ extern struct mtx_padalign pa_lock[];
 /*
  * Page flags.  If changed at any other time than page allocation or
  * freeing, the modification must be protected by the vm_page lock.
+ *
+ * The PG_PCPU_CACHE flag is set at allocation time if the page was
+ * allocated from a per-CPU cache.  It is cleared the next time that the
+ * page is allocated from the physical memory allocator.
  */
 #define        PG_PCPU_CACHE   0x0001          /* was allocated from per-CPU 
caches */
 #define        PG_FICTITIOUS   0x0004          /* physical page doesn't exist 
*/

Modified: stable/12/sys/vm/vm_pagequeue.h
==============================================================================
--- stable/12/sys/vm/vm_pagequeue.h     Sat Jul 27 16:11:04 2019        
(r350373)
+++ stable/12/sys/vm/vm_pagequeue.h     Sat Jul 27 16:33:11 2019        
(r350374)
@@ -103,7 +103,11 @@ struct vm_domain {
        struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
        struct mtx_padalign vmd_free_mtx;
        struct mtx_padalign vmd_pageout_mtx;
-       uma_zone_t vmd_pgcache;         /* (c) page free cache. */
+       struct vm_pgcache {
+               int domain;
+               int pool;
+               uma_zone_t zone;
+       } vmd_pgcache[VM_NFREEPOOL];
        struct vmem *vmd_kernel_arena;  /* (c) per-domain kva R/W arena. */
        struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
        u_int vmd_domain;               /* (c) Domain number. */
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to