From: Alexander Potapenko <gli...@google.com>

Make KFENCE compatible with KASAN. Currently this helps test KFENCE
itself, where KASAN can catch potential corruptions to KFENCE state, or
other corruptions that may be a result of freepointer corruptions in the
main allocators.

Reviewed-by: Dmitry Vyukov <dvyu...@google.com>
Reviewed-by: Jann Horn <ja...@google.com>
Co-developed-by: Marco Elver <el...@google.com>
Signed-off-by: Marco Elver <el...@google.com>
Signed-off-by: Alexander Potapenko <gli...@google.com>
---
v7:
* Remove EXPERT restriction for enabling KASAN+KFENCE. In future, MTE-based 
KASAN
  without stack traces will benefit from having KFENCE (which has stack
  traces). Removing EXPERT restriction allows this for production
  builds. The Kconfig help-text should still make it clear that in most
  cases KFENCE+KASAN does not make sense.
* Also skip kasan_poison_shadow() if KFENCE object. It turns out that
  kernel/scs.c is a user of kasan_{poison,unpoison}_object_data().
* Add Jann's Reviewed-by.

v5:
* Also guard kasan_unpoison_shadow with is_kfence_address(), as it may
  be called from SL*B internals, currently ksize().
* Make kasan_record_aux_stack() compatible with KFENCE, which may be
  called from outside KASAN runtime.
---
 lib/Kconfig.kfence |  2 +-
 mm/kasan/common.c  | 19 +++++++++++++++++++
 mm/kasan/generic.c |  3 ++-
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index b209cd02042b..d2e3c6724226 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE
 
 menuconfig KFENCE
        bool "KFENCE: low-overhead sampling-based memory safety error detector"
-       depends on HAVE_ARCH_KFENCE && !KASAN && (SLAB || SLUB)
+       depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
        depends on JUMP_LABEL # To ensure performance, require jump labels
        select STACKTRACE
        help
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 950fd372a07e..de92da1b637a 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/kasan.h>
 #include <linux/kernel.h>
+#include <linux/kfence.h>
 #include <linux/kmemleak.h>
 #include <linux/linkage.h>
 #include <linux/memblock.h>
@@ -124,6 +125,10 @@ void kasan_poison_shadow(const void *address, size_t size, 
u8 value)
         */
        address = reset_tag(address);
 
+       /* Skip KFENCE memory if called explicitly outside of sl*b. */
+       if (is_kfence_address(address))
+               return;
+
        shadow_start = kasan_mem_to_shadow(address);
        shadow_end = kasan_mem_to_shadow(address + size);
 
@@ -141,6 +146,14 @@ void kasan_unpoison_shadow(const void *address, size_t 
size)
         */
        address = reset_tag(address);
 
+       /*
+        * Skip KFENCE memory if called explicitly outside of sl*b. Also note
+        * that calls to ksize(), where size is not a multiple of machine-word
+        * size, would otherwise poison the invalid portion of the word.
+        */
+       if (is_kfence_address(address))
+               return;
+
        kasan_poison_shadow(address, size, tag);
 
        if (size & KASAN_SHADOW_MASK) {
@@ -396,6 +409,9 @@ static bool __kasan_slab_free(struct kmem_cache *cache, 
void *object,
        tagged_object = object;
        object = reset_tag(object);
 
+       if (is_kfence_address(object))
+               return false;
+
        if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
            object)) {
                kasan_report_invalid_free(tagged_object, ip);
@@ -444,6 +460,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, 
const void *object,
        if (unlikely(object == NULL))
                return NULL;
 
+       if (is_kfence_address(object))
+               return (void *)object;
+
        redzone_start = round_up((unsigned long)(object + size),
                                KASAN_SHADOW_SCALE_SIZE);
        redzone_end = round_up((unsigned long)object + cache->object_size,
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 248264b9cb76..1069ecd1cd55 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -21,6 +21,7 @@
 #include <linux/init.h>
 #include <linux/kasan.h>
 #include <linux/kernel.h>
+#include <linux/kfence.h>
 #include <linux/kmemleak.h>
 #include <linux/linkage.h>
 #include <linux/memblock.h>
@@ -332,7 +333,7 @@ void kasan_record_aux_stack(void *addr)
        struct kasan_alloc_meta *alloc_info;
        void *object;
 
-       if (!(page && PageSlab(page)))
+       if (is_kfence_address(addr) || !(page && PageSlab(page)))
                return;
 
        cache = page->slab_cache;
-- 
2.29.1.341.ge80a0c044ae-goog

Reply via email to