Thanks :) > > Changes since v1: > https://lore.kernel.org/linux-um/20241017231007.1500497-2-david...@google.com/ > - Use force_arg_align_pointer on real_init() instead of naked on > __start, which works with clang.
I already applied it, so need to fix on top of it now, not replace it. However I was just playing with the below - was just looking at the size though, but what do you think? johannes >From 57c5a80a4db2de33a11a5a20fcbea8f3643844f5 Mon Sep 17 00:00:00 2001 From: Johannes Berg <johannes.b...@intel.com> Date: Tue, 22 Oct 2024 11:48:21 +0200 Subject: [PATCH] um: make stub_exe _start() pure inline asm Since __attribute__((naked)) cannot be used with functions containing C statements, just generate the few instructions it needs in assembly directly. Fixes: 8508a5e0e9db ("um: Fix misaligned stack in stub_exe") Link: https://lore.kernel.org/linux-um/cabvgosnth-uoofmp5hwmxjx_f1osmnvdhgkrkm4uz6dfm2l...@mail.gmail.com/ Signed-off-by: Johannes Berg <johannes.b...@intel.com> --- arch/um/kernel/skas/stub_exe.c | 8 +------- arch/x86/um/shared/sysdep/stub_32.h | 8 ++++++++ arch/x86/um/shared/sysdep/stub_64.h | 8 ++++++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/arch/um/kernel/skas/stub_exe.c b/arch/um/kernel/skas/stub_exe.c index 722ce6267476..a61f9c008233 100644 --- a/arch/um/kernel/skas/stub_exe.c +++ b/arch/um/kernel/skas/stub_exe.c @@ -81,11 +81,5 @@ noinline static void real_init(void) __attribute__((naked)) void _start(void) { - char *alloc; - - /* Make enough space for the stub (including space for alignment) */ - alloc = __builtin_alloca((1 + 2 * STUB_DATA_PAGES - 1) * UM_KERN_PAGE_SIZE); - asm volatile("" : "+r,m"(alloc) : : "memory"); - - real_init(); + stub_start(real_init); } diff --git a/arch/x86/um/shared/sysdep/stub_32.h b/arch/x86/um/shared/sysdep/stub_32.h index 631a18d0ff44..760e8ce8093f 100644 --- a/arch/x86/um/shared/sysdep/stub_32.h +++ b/arch/x86/um/shared/sysdep/stub_32.h @@ -123,4 +123,12 @@ static __always_inline void *get_stub_data(void) return (void *)ret; } + +#define stub_start(fn) \ + asm volatile ( \ + "subl %0,%%esp ;" \ + "movl %1, %%eax ; " \ + "call *%%eax ;" \ + :: "i" ((STUB_DATA_PAGES + 1) * UM_KERN_PAGE_SIZE), \ + "i" (&fn)) #endif diff --git a/arch/x86/um/shared/sysdep/stub_64.h b/arch/x86/um/shared/sysdep/stub_64.h index 17153dfd780a..148bf423289e 100644 --- a/arch/x86/um/shared/sysdep/stub_64.h +++ b/arch/x86/um/shared/sysdep/stub_64.h @@ -126,4 +126,12 @@ static __always_inline void *get_stub_data(void) return (void *)ret; } + +#define stub_start(fn) \ + asm volatile ( \ + "subq %0,%%rsp ;" \ + "movq %1,%%rax ;" \ + "call *%%rax ;" \ + :: "i" ((STUB_DATA_PAGES + 1) * UM_KERN_PAGE_SIZE), \ + "i" (&fn)) #endif -- 2.47.0