Author: mav
Date: Tue Nov 19 10:51:46 2013
New Revision: 258340
URL: http://svnweb.freebsd.org/changeset/base/258340

Log:
  Implement mechanism to safely but slowly purge UMA per-CPU caches.
  
  This is a last resort for very low memory condition in case other measures
  to free memory were ineffective.  Sequentially cycle through all CPUs and
  extract per-CPU cache buckets into zone cache from where they can be freed.

Modified:
  head/sys/vm/uma_core.c

Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c      Tue Nov 19 10:39:48 2013        (r258339)
+++ head/sys/vm/uma_core.c      Tue Nov 19 10:51:46 2013        (r258340)
@@ -75,6 +75,7 @@ __FBSDID("$FreeBSD$");
 #include <sys/proc.h>
 #include <sys/rwlock.h>
 #include <sys/sbuf.h>
+#include <sys/sched.h>
 #include <sys/smp.h>
 #include <sys/vmmeter.h>
 
@@ -684,6 +685,78 @@ cache_drain(uma_zone_t zone)
        ZONE_UNLOCK(zone);
 }
 
+static void
+cache_shrink(uma_zone_t zone)
+{
+
+       if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
+               return;
+
+       ZONE_LOCK(zone);
+       zone->uz_count = (zone->uz_count_min + zone->uz_count) / 2;
+       ZONE_UNLOCK(zone);
+}
+
+static void
+cache_drain_safe_cpu(uma_zone_t zone)
+{
+       uma_cache_t cache;
+
+       if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
+               return;
+
+       ZONE_LOCK(zone);
+       critical_enter();
+       cache = &zone->uz_cpu[curcpu];
+       if (cache->uc_allocbucket) {
+               LIST_INSERT_HEAD(&zone->uz_buckets, cache->uc_allocbucket,
+                   ub_link);
+               cache->uc_allocbucket = NULL;
+       }
+       if (cache->uc_freebucket) {
+               LIST_INSERT_HEAD(&zone->uz_buckets, cache->uc_freebucket,
+                   ub_link);
+               cache->uc_freebucket = NULL;
+       }
+       critical_exit();
+       ZONE_UNLOCK(zone);
+}
+
+/*
+ * Safely drain per-CPU caches of a zone(s) to alloc bucket.
+ * This is an expensive call because it needs to bind to all CPUs
+ * one by one and enter a critical section on each of them in order
+ * to safely access their cache buckets.
+ * Zone lock must not be held on call this function.
+ */
+static void
+cache_drain_safe(uma_zone_t zone)
+{
+       int cpu;
+
+       /*
+        * Polite bucket sizes shrinking was not enouth, shrink aggressively.
+        */
+       if (zone)
+               cache_shrink(zone);
+       else
+               zone_foreach(cache_shrink);
+
+       CPU_FOREACH(cpu) {
+               thread_lock(curthread);
+               sched_bind(curthread, cpu);
+               thread_unlock(curthread);
+
+               if (zone)
+                       cache_drain_safe_cpu(zone);
+               else
+                       zone_foreach(cache_drain_safe_cpu);
+       }
+       thread_lock(curthread);
+       sched_unbind(curthread);
+       thread_unlock(curthread);
+}
+
 /*
  * Drain the cached buckets from a zone.  Expects a locked zone on entry.
  */
@@ -3068,6 +3141,10 @@ uma_reclaim(void)
 #endif
        bucket_enable();
        zone_foreach(zone_drain);
+       if (vm_page_count_min()) {
+               cache_drain_safe(NULL);
+               zone_foreach(zone_drain);
+       }
        /*
         * Some slabs may have been freed but this zone will be visited early
         * we visit again so that we can free pages that are empty once other
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to