Replace the existing kasan_arch_is_ready() calls with kasan_enabled().
Drop checks where the caller is already under kasan_enabled() condition.

Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218315
Signed-off-by: Sabyrzhan Tasbolatov <snovit...@gmail.com>
---
 mm/kasan/common.c  |  8 ++++----
 mm/kasan/generic.c |  6 +++---
 mm/kasan/kasan.h   |  6 ------
 mm/kasan/shadow.c  | 15 +++------------
 4 files changed, 10 insertions(+), 25 deletions(-)

diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 525194da25f..0f3648335a6 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -257,7 +257,7 @@ static inline void poison_slab_object(struct kmem_cache 
*cache, void *object,
 bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
                                unsigned long ip)
 {
-       if (!kasan_arch_is_ready() || is_kfence_address(object))
+       if (!kasan_enabled() || is_kfence_address(object))
                return false;
        return check_slab_allocation(cache, object, ip);
 }
@@ -265,7 +265,7 @@ bool __kasan_slab_pre_free(struct kmem_cache *cache, void 
*object,
 bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
                       bool still_accessible)
 {
-       if (!kasan_arch_is_ready() || is_kfence_address(object))
+       if (!kasan_enabled() || is_kfence_address(object))
                return false;
 
        poison_slab_object(cache, object, init, still_accessible);
@@ -289,7 +289,7 @@ bool __kasan_slab_free(struct kmem_cache *cache, void 
*object, bool init,
 
 static inline bool check_page_allocation(void *ptr, unsigned long ip)
 {
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return false;
 
        if (ptr != page_address(virt_to_head_page(ptr))) {
@@ -518,7 +518,7 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long 
ip)
                return true;
        }
 
-       if (is_kfence_address(ptr) || !kasan_arch_is_ready())
+       if (is_kfence_address(ptr) || !kasan_enabled())
                return true;
 
        slab = folio_slab(folio);
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index ab9ab30caf4..af2f2077a45 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -176,7 +176,7 @@ static __always_inline bool check_region_inline(const void 
*addr,
                                                size_t size, bool write,
                                                unsigned long ret_ip)
 {
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return true;
 
        if (unlikely(size == 0))
@@ -204,7 +204,7 @@ bool kasan_byte_accessible(const void *addr)
 {
        s8 shadow_byte;
 
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return true;
 
        shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
@@ -506,7 +506,7 @@ static void release_alloc_meta(struct kasan_alloc_meta 
*meta)
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
 {
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return;
 
        /* Check if free meta is valid. */
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 129178be5e6..e0ffc16495d 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -544,12 +544,6 @@ static inline void kasan_poison_last_granule(const void 
*address, size_t size) {
 
 #endif /* CONFIG_KASAN_GENERIC */
 
-#ifndef kasan_arch_is_ready
-static inline bool kasan_arch_is_ready(void)   { return true; }
-#elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
-#error kasan_arch_is_ready only works in KASAN generic outline mode!
-#endif
-
 #if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
 
 void kasan_kunit_test_suite_start(void);
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d2c70cd2afb..9db8548ccb4 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -125,7 +125,7 @@ void kasan_poison(const void *addr, size_t size, u8 value, 
bool init)
 {
        void *shadow_start, *shadow_end;
 
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return;
 
        /*
@@ -150,9 +150,6 @@ EXPORT_SYMBOL_GPL(kasan_poison);
 #ifdef CONFIG_KASAN_GENERIC
 void kasan_poison_last_granule(const void *addr, size_t size)
 {
-       if (!kasan_arch_is_ready())
-               return;
-
        if (size & KASAN_GRANULE_MASK) {
                u8 *shadow = (u8 *)kasan_mem_to_shadow(addr + size);
                *shadow = size & KASAN_GRANULE_MASK;
@@ -390,7 +387,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned 
long size)
        unsigned long shadow_start, shadow_end;
        int ret;
 
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return 0;
 
        if (!is_vmalloc_or_module_addr((void *)addr))
@@ -560,7 +557,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned 
long end,
        unsigned long region_start, region_end;
        unsigned long size;
 
-       if (!kasan_arch_is_ready())
+       if (!kasan_enabled())
                return;
 
        region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
@@ -611,9 +608,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned 
long size,
         * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
         */
 
-       if (!kasan_arch_is_ready())
-               return (void *)start;
-
        if (!is_vmalloc_or_module_addr(start))
                return (void *)start;
 
@@ -636,9 +630,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned 
long size,
  */
 void __kasan_poison_vmalloc(const void *start, unsigned long size)
 {
-       if (!kasan_arch_is_ready())
-               return;
-
        if (!is_vmalloc_or_module_addr(start))
                return;
 
-- 
2.34.1


Reply via email to