With all "silently resizing" callers of ksize() refactored, remove the
logic in ksize() that would allow it to be used to effectively change
the size of an allocation (bypassing __alloc_size hints, etc). Users
wanting this feature need to either use kmalloc_size_roundup() before an
allocation, or call krealloc() directly.

For kfree_sensitive(), move the unpoisoning logic inline. Replace the
open-coded ksize() in __do_krealloc with ksize() now that it doesn't
perform unpoisoning.

Cc: Christoph Lameter <c...@linux.com>
Cc: Pekka Enberg <penb...@kernel.org>
Cc: David Rientjes <rient...@google.com>
Cc: Joonsoo Kim <iamjoonsoo....@lge.com>
Cc: Andrew Morton <a...@linux-foundation.org>
Cc: Vlastimil Babka <vba...@suse.cz>
Cc: Roman Gushchin <roman.gushc...@linux.dev>
Cc: Hyeonggon Yoo <42.hye...@gmail.com>
Cc: Andrey Ryabinin <ryabinin....@gmail.com>
Cc: Alexander Potapenko <gli...@google.com>
Cc: Andrey Konovalov <andreyk...@gmail.com>
Cc: Dmitry Vyukov <dvyu...@google.com>
Cc: Vincenzo Frascino <vincenzo.frasc...@arm.com>
Cc: linux...@kvack.org
Cc: kasan-...@googlegroups.com
Signed-off-by: Kees Cook <keesc...@chromium.org>
---
 mm/slab_common.c | 38 ++++++++++++++------------------------
 1 file changed, 14 insertions(+), 24 deletions(-)

diff --git a/mm/slab_common.c b/mm/slab_common.c
index d7420cf649f8..60b77bcdc2e3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1160,13 +1160,8 @@ __do_krealloc(const void *p, size_t new_size, gfp_t 
flags)
        void *ret;
        size_t ks;
 
-       /* Don't use instrumented ksize to allow precise KASAN poisoning. */
-       if (likely(!ZERO_OR_NULL_PTR(p))) {
-               if (!kasan_check_byte(p))
-                       return NULL;
-               ks = kfence_ksize(p) ?: __ksize(p);
-       } else
-               ks = 0;
+       /* How large is the allocation actually? */
+       ks = ksize(p);
 
        /* If the object still fits, repoison it precisely. */
        if (ks >= new_size) {
@@ -1232,8 +1227,10 @@ void kfree_sensitive(const void *p)
        void *mem = (void *)p;
 
        ks = ksize(mem);
-       if (ks)
+       if (ks) {
+               kasan_unpoison_range(mem, ks);
                memzero_explicit(mem, ks);
+       }
        kfree(mem);
 }
 EXPORT_SYMBOL(kfree_sensitive);
@@ -1242,10 +1239,11 @@ EXPORT_SYMBOL(kfree_sensitive);
  * ksize - get the actual amount of memory allocated for a given object
  * @objp: Pointer to the object
  *
- * kmalloc may internally round up allocations and return more memory
+ * kmalloc() may internally round up allocations and return more memory
  * than requested. ksize() can be used to determine the actual amount of
- * memory allocated. The caller may use this additional memory, even though
- * a smaller amount of memory was initially specified with the kmalloc call.
+ * allocated memory. The caller may NOT use this additional memory, unless
+ * it calls krealloc(). To avoid an alloc/realloc cycle, callers can use
+ * kmalloc_size_roundup() to find the size of the associated kmalloc bucket.
  * The caller must guarantee that objp points to a valid object previously
  * allocated with either kmalloc() or kmem_cache_alloc(). The object
  * must not be freed during the duration of the call.
@@ -1254,13 +1252,11 @@ EXPORT_SYMBOL(kfree_sensitive);
  */
 size_t ksize(const void *objp)
 {
-       size_t size;
-
        /*
-        * We need to first check that the pointer to the object is valid, and
-        * only then unpoison the memory. The report printed from ksize() is
-        * more useful, then when it's printed later when the behaviour could
-        * be undefined due to a potential use-after-free or double-free.
+        * We need to first check that the pointer to the object is valid.
+        * The KASAN report printed from ksize() is more useful, then when
+        * it's printed later when the behaviour could be undefined due to
+        * a potential use-after-free or double-free.
         *
         * We use kasan_check_byte(), which is supported for the hardware
         * tag-based KASAN mode, unlike kasan_check_read/write().
@@ -1274,13 +1270,7 @@ size_t ksize(const void *objp)
        if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
                return 0;
 
-       size = kfence_ksize(objp) ?: __ksize(objp);
-       /*
-        * We assume that ksize callers could use whole allocated area,
-        * so we need to unpoison this area.
-        */
-       kasan_unpoison_range(objp, size);
-       return size;
+       return kfence_ksize(objp) ?: __ksize(objp);
 }
 EXPORT_SYMBOL(ksize);
 
-- 
2.34.1

Reply via email to