This is approximately what I am thinking.  Note that this gives us the
    flexibility to create a larger infrastructure around the bucket cache,
    such as implement per-cpu caches and so on and so forth.  What I have
    here is the minimal implementation.

                                                -Matt

Index: kern/kern_malloc.c
===================================================================
RCS file: /home/ncvs/src/sys/kern/kern_malloc.c,v
retrieving revision 1.93
diff -u -r1.93 kern_malloc.c
--- kern/kern_malloc.c  12 Sep 2001 08:37:44 -0000      1.93
+++ kern/kern_malloc.c  23 Feb 2002 22:41:56 -0000
@@ -116,6 +116,63 @@
 #endif /* INVARIANTS */
 
 /*
+ *     init_malloc_bucket:
+ *
+ *     Initialize a malloc bucket
+ */
+void
+init_malloc_bucket(struct malloc_bucket *bucket, struct malloc_type *type, int size)
+{
+    bzero(bucket, sizeof(struct malloc_bucket));
+    bucket->b_size = size;
+    bucket->b_type = type;
+    bucket->b_mtx = mtx_pool_find(bucket);
+}
+
+/*
+ *     malloc_bucket:
+ *
+ *     Allocate a structure from the supplied low-latency cache.  NULL is
+ *     returned if the cache is empty.
+ */
+void *
+malloc_bucket(struct malloc_bucket *bucket)
+{
+       void *ptr = NULL;
+
+       if (bucket->b_next) {
+               mtx_lock(bucket->b_mtx);
+               if ((ptr = bucket->b_next) != NULL) {
+                       bucket->b_next = *(caddr_t *)ptr;
+                       KASSERT(bucket->b_count > 0, ("bucket count mismatch"));
+                       --bucket->b_count;
+                       *(caddr_t *)ptr = WEIRD_ADDR;
+               } else {
+                       KASSERT(bucket->b_count == 0, ("bucket count mismatch"));
+               }
+               mtx_unlock(bucket->b_mtx);
+       }
+       return(ptr);
+}
+
+/*
+ *     free_bucket:
+ *
+ *     Free a structure to the low latency cache.
+ *
+ */
+void
+free_bucket(struct malloc_bucket *bucket, void *ptr)
+{
+       mtx_lock(bucket->b_mtx);
+       *(caddr_t *)ptr = bucket->b_next;
+       bucket->b_next = (caddr_t)ptr;
+       ++bucket->b_count;
+       mtx_unlock(bucket->b_mtx);
+       /* XXX if b_count > something, wakeup our cleaner? */
+}
+
+/*
  *     malloc:
  *
  *     Allocate a block of memory.
Index: sys/malloc.h
===================================================================
RCS file: /home/ncvs/src/sys/sys/malloc.h,v
retrieving revision 1.54
diff -u -r1.54 malloc.h
--- sys/malloc.h        10 Aug 2001 06:37:04 -0000      1.54
+++ sys/malloc.h        23 Feb 2002 22:36:09 -0000
@@ -109,6 +109,18 @@
        long    kb_couldfree;   /* over high water mark and could free */
 };
 
+/*
+ * malloc_bucket holder for fast low-latency malloc_bucket() and free_bucket()
+ * calls.
+ */
+struct malloc_bucket {
+       caddr_t b_next;
+       int     b_count;
+       int     b_size;
+       struct mtx *b_mtx;
+       struct malloc_type *b_type;
+};
+
 #ifdef _KERNEL
 #define        MINALLOCSIZE    (1 << MINBUCKET)
 #define BUCKETINDX(size) \

To Unsubscribe: send mail to [EMAIL PROTECTED]
with "unsubscribe freebsd-current" in the body of the message

Reply via email to