Module Name:    src
Committed By:   maxv
Date:           Thu Aug 15 12:06:42 UTC 2019

Modified Files:
        src/sys/kern: subr_kmem.c

Log Message:
Retire KMEM_GUARD. It has been superseded by kASan, which is much more
powerful, has much more coverage - far beyond just kmem(9) -, and also
consumes less memory.

KMEM_GUARD was a debug-only option that required special DDB tweaking, and
had no use in releases or even diagnostic kernels.

As a general rule, the policy now is to harden the pool layer by default
in GENERIC, and use kASan as a diagnostic/debug/fuzzing feature to verify
each memory allocation & access in the system.


To generate a diff of this commit:
cvs rdiff -u -r1.75 -r1.76 src/sys/kern/subr_kmem.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/kern/subr_kmem.c
diff -u src/sys/kern/subr_kmem.c:1.75 src/sys/kern/subr_kmem.c:1.76
--- src/sys/kern/subr_kmem.c:1.75	Sun Apr  7 09:20:04 2019
+++ src/sys/kern/subr_kmem.c	Thu Aug 15 12:06:42 2019
@@ -1,6 +1,6 @@
-/*	$NetBSD: subr_kmem.c,v 1.75 2019/04/07 09:20:04 maxv Exp $	*/
+/*	$NetBSD: subr_kmem.c,v 1.76 2019/08/15 12:06:42 maxv Exp $	*/
 
-/*-
+/*
  * Copyright (c) 2009-2015 The NetBSD Foundation, Inc.
  * All rights reserved.
  *
@@ -29,7 +29,7 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-/*-
+/*
  * Copyright (c)2006 YAMAMOTO Takashi,
  * All rights reserved.
  *
@@ -66,7 +66,7 @@
  *	the exact user-requested allocation size in it. When freeing, compare
  *	it with kmem_free's "size" argument.
  *
- * This option enabled on DIAGNOSTIC.
+ * This option is enabled on DIAGNOSTIC.
  *
  *  |CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|CHUNK|
  *  +-----+-----+-----+-----+-----+-----+-----+-----+-----+---+-+
@@ -77,22 +77,8 @@
  *  |Size |    Buffer usable by the caller (requested size)   |Unused\
  */
 
-/*
- * KMEM_GUARD
- *	A kernel with "option DEBUG" has "kmem_guard" debugging feature compiled
- *	in. See the comment below for what kind of bugs it tries to detect. Even
- *	if compiled in, it's disabled by default because it's very expensive.
- *	You can enable it on boot by:
- *		boot -d
- *		db> w kmem_guard_depth 0t30000
- *		db> c
- *
- *	The default value of kmem_guard_depth is 0, which means disabled.
- *	It can be changed by KMEM_GUARD_DEPTH kernel config option.
- */
-
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.75 2019/04/07 09:20:04 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: subr_kmem.c,v 1.76 2019/08/15 12:06:42 maxv Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_kmem.h"
@@ -177,8 +163,6 @@ static size_t kmem_cache_big_maxidx __re
 #endif
 
 #if defined(DEBUG) && defined(_HARDKERNEL)
-#define	KMEM_SIZE
-#define	KMEM_GUARD
 static void *kmem_freecheck;
 #endif
 
@@ -195,31 +179,12 @@ static void kmem_size_check(void *, size
 #define	kmem_size_check(p, sz)	/* nothing */
 #endif
 
-#if defined(KMEM_GUARD)
-#ifndef KMEM_GUARD_DEPTH
-#define KMEM_GUARD_DEPTH 0
-#endif
-struct kmem_guard {
-	u_int		kg_depth;
-	intptr_t *	kg_fifo;
-	u_int		kg_rotor;
-	vmem_t *	kg_vmem;
-};
-static bool kmem_guard_init(struct kmem_guard *, u_int, vmem_t *);
-static void *kmem_guard_alloc(struct kmem_guard *, size_t, bool);
-static void kmem_guard_free(struct kmem_guard *, size_t, void *);
-int kmem_guard_depth = KMEM_GUARD_DEPTH;
-static bool kmem_guard_enabled;
-static struct kmem_guard kmem_guard;
-#endif /* defined(KMEM_GUARD) */
-
 CTASSERT(KM_SLEEP == PR_WAITOK);
 CTASSERT(KM_NOSLEEP == PR_NOWAIT);
 
 /*
  * kmem_intr_alloc: allocate wired memory.
  */
-
 void *
 kmem_intr_alloc(size_t requested_size, km_flag_t kmflags)
 {
@@ -236,13 +201,6 @@ kmem_intr_alloc(size_t requested_size, k
 	KASSERT((kmflags & KM_SLEEP) || (kmflags & KM_NOSLEEP));
 	KASSERT(!(kmflags & KM_SLEEP) || !(kmflags & KM_NOSLEEP));
 
-#ifdef KMEM_GUARD
-	if (kmem_guard_enabled) {
-		return kmem_guard_alloc(&kmem_guard, requested_size,
-		    (kmflags & KM_SLEEP) != 0);
-	}
-#endif
-
 	kasan_add_redzone(&requested_size);
 	size = kmem_roundup_size(requested_size);
 	allocsz = size + SIZE_SIZE;
@@ -280,7 +238,6 @@ kmem_intr_alloc(size_t requested_size, k
 /*
  * kmem_intr_zalloc: allocate zeroed wired memory.
  */
-
 void *
 kmem_intr_zalloc(size_t size, km_flag_t kmflags)
 {
@@ -296,7 +253,6 @@ kmem_intr_zalloc(size_t size, km_flag_t 
 /*
  * kmem_intr_free: free wired memory allocated by kmem_alloc.
  */
-
 void
 kmem_intr_free(void *p, size_t requested_size)
 {
@@ -307,13 +263,6 @@ kmem_intr_free(void *p, size_t requested
 	KASSERT(p != NULL);
 	KASSERT(requested_size > 0);
 
-#ifdef KMEM_GUARD
-	if (kmem_guard_enabled) {
-		kmem_guard_free(&kmem_guard, requested_size, p);
-		return;
-	}
-#endif
-
 	kasan_add_redzone(&requested_size);
 	size = kmem_roundup_size(requested_size);
 	allocsz = size + SIZE_SIZE;
@@ -341,13 +290,12 @@ kmem_intr_free(void *p, size_t requested
 	pool_cache_put(pc, p);
 }
 
-/* ---- kmem API */
+/* -------------------------------- Kmem API -------------------------------- */
 
 /*
  * kmem_alloc: allocate wired memory.
  * => must not be called from interrupt context.
  */
-
 void *
 kmem_alloc(size_t size, km_flag_t kmflags)
 {
@@ -364,7 +312,6 @@ kmem_alloc(size_t size, km_flag_t kmflag
  * kmem_zalloc: allocate zeroed wired memory.
  * => must not be called from interrupt context.
  */
-
 void *
 kmem_zalloc(size_t size, km_flag_t kmflags)
 {
@@ -381,7 +328,6 @@ kmem_zalloc(size_t size, km_flag_t kmfla
  * kmem_free: free wired memory allocated by kmem_alloc.
  * => must not be called from interrupt context.
  */
-
 void
 kmem_free(void *p, size_t size)
 {
@@ -444,10 +390,6 @@ kmem_create_caches(const struct kmem_cac
 void
 kmem_init(void)
 {
-#ifdef KMEM_GUARD
-	kmem_guard_enabled = kmem_guard_init(&kmem_guard, kmem_guard_depth,
-	    kmem_va_arena);
-#endif
 	kmem_cache_maxidx = kmem_create_caches(kmem_cache_sizes,
 	    kmem_cache, KMEM_MAXSIZE, KMEM_SHIFT, IPL_VM);
 	kmem_cache_big_maxidx = kmem_create_caches(kmem_cache_big_sizes,
@@ -525,7 +467,7 @@ kmem_strfree(char *str)
 	kmem_free(str, strlen(str) + 1);
 }
 
-/* ------------------ DEBUG / DIAGNOSTIC ------------------ */
+/* --------------------------- DEBUG / DIAGNOSTIC --------------------------- */
 
 #if defined(KMEM_SIZE)
 static void
@@ -553,163 +495,3 @@ kmem_size_check(void *p, size_t sz)
 	hd->size = -1;
 }
 #endif /* defined(KMEM_SIZE) */
-
-#if defined(KMEM_GUARD)
-/*
- * The ultimate memory allocator for debugging, baby.  It tries to catch:
- *
- * 1. Overflow, in realtime. A guard page sits immediately after the
- *    requested area; a read/write overflow therefore triggers a page
- *    fault.
- * 2. Invalid pointer/size passed, at free. A kmem_header structure sits
- *    just before the requested area, and holds the allocated size. Any
- *    difference with what is given at free triggers a panic.
- * 3. Underflow, at free. If an underflow occurs, the kmem header will be
- *    modified, and 2. will trigger a panic.
- * 4. Use-after-free. When freeing, the memory is unmapped, and depending
- *    on the value of kmem_guard_depth, the kernel will more or less delay
- *    the recycling of that memory. Which means that any ulterior read/write
- *    access to the memory will trigger a page fault, given it hasn't been
- *    recycled yet.
- */
-
-#include <sys/atomic.h>
-#include <uvm/uvm.h>
-
-static bool
-kmem_guard_init(struct kmem_guard *kg, u_int depth, vmem_t *vm)
-{
-	vaddr_t va;
-
-	/* If not enabled, we have nothing to do. */
-	if (depth == 0) {
-		return false;
-	}
-	depth = roundup(depth, PAGE_SIZE / sizeof(void *));
-	KASSERT(depth != 0);
-
-	/*
-	 * Allocate fifo.
-	 */
-	va = uvm_km_alloc(kernel_map, depth * sizeof(void *), PAGE_SIZE,
-	    UVM_KMF_WIRED | UVM_KMF_ZERO);
-	if (va == 0) {
-		return false;
-	}
-
-	/*
-	 * Init object.
-	 */
-	kg->kg_vmem = vm;
-	kg->kg_fifo = (void *)va;
-	kg->kg_depth = depth;
-	kg->kg_rotor = 0;
-
-	printf("kmem_guard(%p): depth %d\n", kg, depth);
-	return true;
-}
-
-static void *
-kmem_guard_alloc(struct kmem_guard *kg, size_t requested_size, bool waitok)
-{
-	struct vm_page *pg;
-	vm_flag_t flags;
-	vmem_addr_t va;
-	vaddr_t loopva;
-	vsize_t loopsize;
-	size_t size;
-	void **p;
-
-	/*
-	 * Compute the size: take the kmem header into account, and add a guard
-	 * page at the end.
-	 */
-	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
-
-	/* Allocate pages of kernel VA, but do not map anything in yet. */
-	flags = VM_BESTFIT | (waitok ? VM_SLEEP : VM_NOSLEEP);
-	if (vmem_alloc(kg->kg_vmem, size, flags, &va) != 0) {
-		return NULL;
-	}
-
-	loopva = va;
-	loopsize = size - PAGE_SIZE;
-
-	while (loopsize) {
-		pg = uvm_pagealloc(NULL, loopva, NULL, 0);
-		if (__predict_false(pg == NULL)) {
-			if (waitok) {
-				uvm_wait("kmem_guard");
-				continue;
-			} else {
-				uvm_km_pgremove_intrsafe(kernel_map, va,
-				    va + size);
-				vmem_free(kg->kg_vmem, va, size);
-				return NULL;
-			}
-		}
-
-		pg->flags &= ~PG_BUSY;	/* new page */
-		UVM_PAGE_OWN(pg, NULL);
-		pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg),
-		    VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE);
-
-		loopva += PAGE_SIZE;
-		loopsize -= PAGE_SIZE;
-	}
-
-	pmap_update(pmap_kernel());
-
-	/*
-	 * Offset the returned pointer so that the unmapped guard page sits
-	 * immediately after the returned object.
-	 */
-	p = (void **)((va + (size - PAGE_SIZE) - requested_size) & ~(uintptr_t)ALIGNBYTES);
-	kmem_size_set((uint8_t *)p - SIZE_SIZE, requested_size);
-	return (void *)p;
-}
-
-static void
-kmem_guard_free(struct kmem_guard *kg, size_t requested_size, void *p)
-{
-	vaddr_t va;
-	u_int rotor;
-	size_t size;
-	uint8_t *ptr;
-
-	ptr = (uint8_t *)p - SIZE_SIZE;
-	kmem_size_check(ptr, requested_size);
-	va = trunc_page((vaddr_t)ptr);
-	size = round_page(requested_size + SIZE_SIZE) + PAGE_SIZE;
-
-	KASSERT(pmap_extract(pmap_kernel(), va, NULL));
-	KASSERT(!pmap_extract(pmap_kernel(), va + (size - PAGE_SIZE), NULL));
-
-	/*
-	 * Unmap and free the pages. The last one is never allocated.
-	 */
-	uvm_km_pgremove_intrsafe(kernel_map, va, va + size);
-	pmap_update(pmap_kernel());
-
-#if 0
-	/*
-	 * XXX: Here, we need to atomically register the va and its size in the
-	 * fifo.
-	 */
-
-	/*
-	 * Put the VA allocation into the list and swap an old one out to free.
-	 * This behaves mostly like a fifo.
-	 */
-	rotor = atomic_inc_uint_nv(&kg->kg_rotor) % kg->kg_depth;
-	va = (vaddr_t)atomic_swap_ptr(&kg->kg_fifo[rotor], (void *)va);
-	if (va != 0) {
-		vmem_free(kg->kg_vmem, va, size);
-	}
-#else
-	(void)rotor;
-	vmem_free(kg->kg_vmem, va, size);
-#endif
-}
-
-#endif /* defined(KMEM_GUARD) */

Reply via email to