On Wed, Jun 25, 2025 at 02:52:20PM +0500, Sabyrzhan Tasbolatov wrote: > Call kasan_init_generic() which enables the static flag > to mark generic KASAN initialized, otherwise it's an inline stub. > > Replace `kasan_arch_is_ready` with `kasan_enabled`. > Delete the flag `kasan_early_stage` in favor of the global static key > enabled via kasan_enabled(). > > printk banner is printed earlier right where `kasan_early_stage` > was flipped, just to keep the same flow. > > Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218315 > Signed-off-by: Sabyrzhan Tasbolatov <snovit...@gmail.com> > --- > arch/loongarch/include/asm/kasan.h | 7 ------- > arch/loongarch/mm/kasan_init.c | 7 ++----- > 2 files changed, 2 insertions(+), 12 deletions(-) > > diff --git a/arch/loongarch/include/asm/kasan.h > b/arch/loongarch/include/asm/kasan.h > index 7f52bd31b9d..b0b74871257 100644 > --- a/arch/loongarch/include/asm/kasan.h > +++ b/arch/loongarch/include/asm/kasan.h > @@ -66,7 +66,6 @@ > #define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + > XKPRANGE_WC_KASAN_OFFSET) > #define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + > XKVRANGE_VC_KASAN_OFFSET) > > -extern bool kasan_early_stage; > extern unsigned char kasan_early_shadow_page[PAGE_SIZE]; > > #define kasan_mem_to_shadow kasan_mem_to_shadow > @@ -75,12 +74,6 @@ void *kasan_mem_to_shadow(const void *addr); > #define kasan_shadow_to_mem kasan_shadow_to_mem > const void *kasan_shadow_to_mem(const void *shadow_addr); > > -#define kasan_arch_is_ready kasan_arch_is_ready > -static __always_inline bool kasan_arch_is_ready(void) > -{ > - return !kasan_early_stage; > -} > - > #define addr_has_metadata addr_has_metadata > static __always_inline bool addr_has_metadata(const void *addr) > { > diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c > index d2681272d8f..cf8315f9119 100644 > --- a/arch/loongarch/mm/kasan_init.c > +++ b/arch/loongarch/mm/kasan_init.c > @@ -40,11 +40,9 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata > __aligned(PAGE_SIZE); > #define __pte_none(early, pte) (early ? pte_none(pte) : \ > ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page))) > > -bool kasan_early_stage = true; > - > void *kasan_mem_to_shadow(const void *addr) > { > - if (!kasan_arch_is_ready()) { > + if (!kasan_enabled()) { > return (void *)(kasan_early_shadow_page); > } else { > unsigned long maddr = (unsigned long)addr; > @@ -298,7 +296,7 @@ void __init kasan_init(void) > kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START), > kasan_mem_to_shadow((void > *)KFENCE_AREA_END)); > > - kasan_early_stage = false; > + kasan_init_generic(); > > /* Populate the linear mapping */ > for_each_mem_range(i, &pa_start, &pa_end) { > @@ -329,5 +327,4 @@ void __init kasan_init(void) > > /* At this point kasan is fully initialized. Enable error messages */ > init_task.kasan_depth = 0; > - pr_info("KernelAddressSanitizer initialized.\n"); > }
This one is weird because its the only arch that does things after marking early_state false. Is that really correct, or should kasan_init_generic() be last, like all the other architectures? Also, please move init_task.kasan_depth = 0 into the generic thing. ARM64 might have fooled you with the wrapper function, but they all do this right before that pr_info you're taking out.