On Fri, Apr 04, 2025 at 05:05:19PM +0200, Johannes Berg wrote: > From: Johannes Berg <johannes.b...@intel.com> > > Nathan reported [1] that when built with clang, the um kernel > crashes pretty much immediately. This turned out to be an issue > with the inline assembly I had added, when clang used %rax/%eax > for both operands. Reorder it so current->thread.segv_continue > is written first, and then the lifetime of _faulted won't have > overlap with the lifetime of segv_continue. > > In the email thread Benjamin also pointed out that current->mm > is only NULL for true kernel tasks, but we could do this for a > userspace task, so the current->thread.segv_continue logic must > be lifted out of the mm==NULL check. > > Finally, while looking at this, put a barrier() so the NULL > assignment to thread.segv_continue cannot be reorder before > the possibly faulting operation. > > Reported-by: Nathan Chancellor <nat...@kernel.org> > Closes: https://lore.kernel.org/r/20250402221254.GA384@ax162 [1] > Fixes: d1d7f01f7cd3 ("um: mark rodata read-only and implement _nofault > accesses") > Signed-off-by: Johannes Berg <johannes.b...@intel.com>
Thanks, this makes my test case happy. Tested-by: Nathan Chancellor <nat...@kernel.org> > --- > arch/um/include/asm/uaccess.h | 2 ++ > arch/um/kernel/trap.c | 26 ++++++++++++------------ > arch/x86/um/shared/sysdep/faultinfo_32.h | 2 +- > arch/x86/um/shared/sysdep/faultinfo_64.h | 2 +- > 4 files changed, 17 insertions(+), 15 deletions(-) > > diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h > index 3a08f9029a3f..1c6e0ae41b0c 100644 > --- a/arch/um/include/asm/uaccess.h > +++ b/arch/um/include/asm/uaccess.h > @@ -55,6 +55,7 @@ do { > \ > goto err_label; \ > } \ > *((type *)dst) = get_unaligned((type *)(src)); \ > + barrier(); \ > current->thread.segv_continue = NULL; \ > } while (0) > > @@ -66,6 +67,7 @@ do { > \ > if (__faulted) \ > goto err_label; \ > put_unaligned(*((type *)src), (type *)(dst)); \ > + barrier(); \ > current->thread.segv_continue = NULL; \ > } while (0) > > diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c > index ce073150dc20..ef2272e92a43 100644 > --- a/arch/um/kernel/trap.c > +++ b/arch/um/kernel/trap.c > @@ -225,20 +225,20 @@ unsigned long segv(struct faultinfo fi, unsigned long > ip, int is_user, > panic("Failed to sync kernel TLBs: %d", err); > goto out; > } > - else if (current->mm == NULL) { > - if (current->pagefault_disabled) { > - if (!mc) { > - show_regs(container_of(regs, struct pt_regs, > regs)); > - panic("Segfault with pagefaults disabled but no > mcontext"); > - } > - if (!current->thread.segv_continue) { > - show_regs(container_of(regs, struct pt_regs, > regs)); > - panic("Segfault without recovery target"); > - } > - mc_set_rip(mc, current->thread.segv_continue); > - current->thread.segv_continue = NULL; > - goto out; > + else if (current->pagefault_disabled) { > + if (!mc) { > + show_regs(container_of(regs, struct pt_regs, regs)); > + panic("Segfault with pagefaults disabled but no > mcontext"); > } > + if (!current->thread.segv_continue) { > + show_regs(container_of(regs, struct pt_regs, regs)); > + panic("Segfault without recovery target"); > + } > + mc_set_rip(mc, current->thread.segv_continue); > + current->thread.segv_continue = NULL; > + goto out; > + } > + else if (current->mm == NULL) { > show_regs(container_of(regs, struct pt_regs, regs)); > panic("Segfault with no mm"); > } > diff --git a/arch/x86/um/shared/sysdep/faultinfo_32.h > b/arch/x86/um/shared/sysdep/faultinfo_32.h > index ab5c8e47049c..9193a7790a71 100644 > --- a/arch/x86/um/shared/sysdep/faultinfo_32.h > +++ b/arch/x86/um/shared/sysdep/faultinfo_32.h > @@ -31,8 +31,8 @@ struct faultinfo { > > #define ___backtrack_faulted(_faulted) > \ > asm volatile ( \ > - "mov $0, %0\n" \ > "movl $__get_kernel_nofault_faulted_%=,%1\n" \ > + "mov $0, %0\n" \ > "jmp _end_%=\n" \ > "__get_kernel_nofault_faulted_%=:\n" \ > "mov $1, %0;" \ > diff --git a/arch/x86/um/shared/sysdep/faultinfo_64.h > b/arch/x86/um/shared/sysdep/faultinfo_64.h > index 26fb4835d3e9..61e4ca1e0ab5 100644 > --- a/arch/x86/um/shared/sysdep/faultinfo_64.h > +++ b/arch/x86/um/shared/sysdep/faultinfo_64.h > @@ -31,8 +31,8 @@ struct faultinfo { > > #define ___backtrack_faulted(_faulted) > \ > asm volatile ( \ > - "mov $0, %0\n" \ > "movq $__get_kernel_nofault_faulted_%=,%1\n" \ > + "mov $0, %0\n" \ > "jmp _end_%=\n" \ > "__get_kernel_nofault_faulted_%=:\n" \ > "mov $1, %0;" \ > -- > 2.49.0 >