From: Tiwei Bie <tiwei....@antgroup.com> The PID of the stub process can be obtained from current_mm_id(). There is no need to track it via userspace_pid[]. Stop doing that to simplify the code.
Signed-off-by: Tiwei Bie <tiwei....@antgroup.com> --- arch/um/include/asm/mmu_context.h | 9 +-------- arch/um/include/shared/skas/mm_id.h | 2 -- arch/um/include/shared/skas/skas.h | 1 - arch/um/kernel/exec.c | 2 -- arch/um/kernel/skas/process.c | 2 -- arch/um/os-Linux/skas/process.c | 16 ++++++---------- arch/x86/um/tls_32.c | 2 +- 7 files changed, 8 insertions(+), 26 deletions(-) diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index 23dcc914d44e..0bbb24868557 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -16,11 +16,6 @@ #define activate_mm activate_mm static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) { - /* - * This is called by fs/exec.c and sys_unshare() - * when the new ->mm is used for the first time. - */ - __switch_mm(&new->context.id); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, @@ -28,11 +23,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, { unsigned cpu = smp_processor_id(); - if(prev != next){ + if (prev != next) { cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); - if(next != &init_mm) - __switch_mm(&next->context.id); } } diff --git a/arch/um/include/shared/skas/mm_id.h b/arch/um/include/shared/skas/mm_id.h index 89df9a55fbea..4f977ef5dda5 100644 --- a/arch/um/include/shared/skas/mm_id.h +++ b/arch/um/include/shared/skas/mm_id.h @@ -19,8 +19,6 @@ struct mm_id { int syscall_fd_map[STUB_MAX_FDS]; }; -void __switch_mm(struct mm_id *mm_idp); - void notify_mm_kill(int pid); #endif diff --git a/arch/um/include/shared/skas/skas.h b/arch/um/include/shared/skas/skas.h index 7d1de4cab551..807514e10538 100644 --- a/arch/um/include/shared/skas/skas.h +++ b/arch/um/include/shared/skas/skas.h @@ -9,7 +9,6 @@ #include <sysdep/ptrace.h> extern int using_seccomp; -extern int userspace_pid[]; extern void new_thread_handler(void); extern void handle_syscall(struct uml_pt_regs *regs); diff --git a/arch/um/kernel/exec.c b/arch/um/kernel/exec.c index cb8b5cd9285c..13812fa97eee 100644 --- a/arch/um/kernel/exec.c +++ b/arch/um/kernel/exec.c @@ -26,8 +26,6 @@ void flush_thread(void) get_safe_registers(current_pt_regs()->regs.gp, current_pt_regs()->regs.fp); - - __switch_mm(¤t->mm->context.id); } void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c index 05dcdc057af9..5881b17eb987 100644 --- a/arch/um/kernel/skas/process.c +++ b/arch/um/kernel/skas/process.c @@ -26,8 +26,6 @@ static int __init start_kernel_proc(void *unused) return 0; } -extern int userspace_pid[]; - static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE); int __init start_uml(void) diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index e90a0658bba8..702ff2aeb32a 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -434,7 +434,6 @@ static int __init init_stub_exe_fd(void) __initcall(init_stub_exe_fd); int using_seccomp; -int userspace_pid[NR_CPUS]; /** * start_userspace() - prepare a new userspace process @@ -553,7 +552,7 @@ extern unsigned long tt_extra_sched_jiffies; void userspace(struct uml_pt_regs *regs) { - int err, status, op, pid = userspace_pid[0]; + int err, status, op; siginfo_t si_ptrace; siginfo_t *si; int sig; @@ -562,6 +561,8 @@ void userspace(struct uml_pt_regs *regs) interrupt_end(); while (1) { + struct mm_id *mm_id = current_mm_id(); + /* * When we are in time-travel mode, userspace can theoretically * do a *lot* of work without being scheduled. The problem with @@ -590,7 +591,6 @@ void userspace(struct uml_pt_regs *regs) current_mm_sync(); if (using_seccomp) { - struct mm_id *mm_id = current_mm_id(); struct stub_data *proc_data = (void *) mm_id->stack; err = set_stub_state(regs, proc_data, singlestepping()); @@ -644,8 +644,10 @@ void userspace(struct uml_pt_regs *regs) GET_FAULTINFO_FROM_MC(regs->faultinfo, mcontext); } } else { + int pid = mm_id->pid; + /* Flush out any pending syscalls */ - err = syscall_stub_flush(current_mm_id()); + err = syscall_stub_flush(mm_id); if (err) { if (err == -ENOMEM) report_enomem(); @@ -776,7 +778,6 @@ void userspace(struct uml_pt_regs *regs) __func__, sig); fatal_sigsegv(); } - pid = userspace_pid[0]; interrupt_end(); /* Avoid -ERESTARTSYS handling in host */ @@ -901,8 +902,3 @@ void reboot_skas(void) block_signals_trace(); UML_LONGJMP(&initial_jmpbuf, noreboot ? INIT_JMP_HALT : INIT_JMP_REBOOT); } - -void __switch_mm(struct mm_id *mm_idp) -{ - userspace_pid[0] = mm_idp->pid; -} diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c index cb3f17627d16..1909c2e640b2 100644 --- a/arch/x86/um/tls_32.c +++ b/arch/x86/um/tls_32.c @@ -186,7 +186,7 @@ int arch_switch_tls(struct task_struct *to) /* * We have no need whatsoever to switch TLS for kernel threads; beyond * that, that would also result in us calling os_set_thread_area with - * userspace_pid[cpu] == 0, which gives an error. + * task->mm == NULL, which would cause a crash. */ if (likely(to->mm)) return load_TLS(O_FORCE, to); -- 2.34.1