On Tue 21-05-19 12:06:11, Daniel Vetter wrote:
> In some special cases we must not block, but there's not a
> spinlock, preempt-off, irqs-off or similar critical section already
> that arms the might_sleep() debug checks. Add a non_block_start/end()
> pair to annotate these.
> 
> This will be used in the oom paths of mmu-notifiers, where blocking is
> not allowed to make sure there's forward progress. Quoting Michal:
> 
> "The notifier is called from quite a restricted context - oom_reaper -
> which shouldn't depend on any locks or sleepable conditionals. The code
> should be swift as well but we mostly do care about it to make a forward
> progress. Checking for sleepable context is the best thing we could come
> up with that would describe these demands at least partially."
> 
> Peter also asked whether we want to catch spinlocks on top, but Michal
> said those are less of a problem because spinlocks can't have an
> indirect dependency upon the page allocator and hence close the loop
> with the oom reaper.
> 
> Suggested by Michal Hocko.
> 
> v2:
> - Improve commit message (Michal)
> - Also check in schedule, not just might_sleep (Peter)
> 
> v3: It works better when I actually squash in the fixup I had lying
> around :-/
> 
> Cc: Peter Zijlstra <pet...@infradead.org>
> Cc: Ingo Molnar <mi...@redhat.com>
> Cc: Andrew Morton <a...@linux-foundation.org>
> Cc: Michal Hocko <mho...@suse.com>
> Cc: David Rientjes <rient...@google.com>
> Cc: "Christian König" <christian.koe...@amd.com>
> Cc: Daniel Vetter <daniel.vet...@ffwll.ch>
> Cc: "Jérôme Glisse" <jgli...@redhat.com>
> Cc: linux...@kvack.org
> Cc: Masahiro Yamada <yamada.masah...@socionext.com>
> Cc: Wei Wang <w...@google.com>
> Cc: Andy Shevchenko <andriy.shevche...@linux.intel.com>
> Cc: Thomas Gleixner <t...@linutronix.de>
> Cc: Jann Horn <ja...@google.com>
> Cc: Feng Tang <feng.t...@intel.com>
> Cc: Kees Cook <keesc...@chromium.org>
> Cc: Randy Dunlap <rdun...@infradead.org>
> Cc: linux-ker...@vger.kernel.org
> Acked-by: Christian König <christian.koe...@amd.com> (v1)
> Signed-off-by: Daniel Vetter <daniel.vet...@intel.com>

I like this in general. The implementation looks reasonable to me but I
didn't check deeply enough to give my R-by or A-by.

> ---
>  include/linux/kernel.h | 10 +++++++++-
>  include/linux/sched.h  |  4 ++++
>  kernel/sched/core.c    | 19 ++++++++++++++-----
>  3 files changed, 27 insertions(+), 6 deletions(-)
> 
> diff --git a/include/linux/kernel.h b/include/linux/kernel.h
> index 74b1ee9027f5..b5f2c2ff0eab 100644
> --- a/include/linux/kernel.h
> +++ b/include/linux/kernel.h
> @@ -214,7 +214,9 @@ extern void __cant_sleep(const char *file, int line, int 
> preempt_offset);
>   * might_sleep - annotation for functions that can sleep
>   *
>   * this macro will print a stack trace if it is executed in an atomic
> - * context (spinlock, irq-handler, ...).
> + * context (spinlock, irq-handler, ...). Additional sections where blocking 
> is
> + * not allowed can be annotated with non_block_start() and non_block_end()
> + * pairs.
>   *
>   * This is a useful debugging help to be able to catch problems early and not
>   * be bitten later when the calling function happens to sleep when it is not
> @@ -230,6 +232,10 @@ extern void __cant_sleep(const char *file, int line, int 
> preempt_offset);
>  # define cant_sleep() \
>       do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
>  # define sched_annotate_sleep()      (current->task_state_change = 0)
> +# define non_block_start() \
> +     do { current->non_block_count++; } while (0)
> +# define non_block_end() \
> +     do { WARN_ON(current->non_block_count-- == 0); } while (0)
>  #else
>    static inline void ___might_sleep(const char *file, int line,
>                                  int preempt_offset) { }
> @@ -238,6 +244,8 @@ extern void __cant_sleep(const char *file, int line, int 
> preempt_offset);
>  # define might_sleep() do { might_resched(); } while (0)
>  # define cant_sleep() do { } while (0)
>  # define sched_annotate_sleep() do { } while (0)
> +# define non_block_start() do { } while (0)
> +# define non_block_end() do { } while (0)
>  #endif
>  
>  #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index 11837410690f..7f5b293e72df 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -908,6 +908,10 @@ struct task_struct {
>       struct mutex_waiter             *blocked_on;
>  #endif
>  
> +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
> +     int                             non_block_count;
> +#endif
> +
>  #ifdef CONFIG_TRACE_IRQFLAGS
>       unsigned int                    irq_events;
>       unsigned long                   hardirq_enable_ip;
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 102dfcf0a29a..ed7755a28465 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -3264,13 +3264,22 @@ static noinline void __schedule_bug(struct 
> task_struct *prev)
>  /*
>   * Various schedule()-time debugging checks and statistics:
>   */
> -static inline void schedule_debug(struct task_struct *prev)
> +static inline void schedule_debug(struct task_struct *prev, bool preempt)
>  {
>  #ifdef CONFIG_SCHED_STACK_END_CHECK
>       if (task_stack_end_corrupted(prev))
>               panic("corrupted stack end detected inside scheduler\n");
>  #endif
>  
> +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
> +     if (!preempt && prev->state && prev->non_block_count) {
> +             printk(KERN_ERR "BUG: scheduling in a non-blocking section: 
> %s/%d/%i\n",
> +                     prev->comm, prev->pid, prev->non_block_count);
> +             dump_stack();
> +             add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
> +     }
> +#endif
> +
>       if (unlikely(in_atomic_preempt_off())) {
>               __schedule_bug(prev);
>               preempt_count_set(PREEMPT_DISABLED);
> @@ -3377,7 +3386,7 @@ static void __sched notrace __schedule(bool preempt)
>       rq = cpu_rq(cpu);
>       prev = rq->curr;
>  
> -     schedule_debug(prev);
> +     schedule_debug(prev, preempt);
>  
>       if (sched_feat(HRTICK))
>               hrtick_clear(rq);
> @@ -6102,7 +6111,7 @@ void ___might_sleep(const char *file, int line, int 
> preempt_offset)
>       rcu_sleep_check();
>  
>       if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
> -          !is_idle_task(current)) ||
> +          !is_idle_task(current) && !current->non_block_count) ||
>           system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
>           oops_in_progress)
>               return;
> @@ -6118,8 +6127,8 @@ void ___might_sleep(const char *file, int line, int 
> preempt_offset)
>               "BUG: sleeping function called from invalid context at %s:%d\n",
>                       file, line);
>       printk(KERN_ERR
> -             "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
> -                     in_atomic(), irqs_disabled(),
> +             "in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, 
> name: %s\n",
> +                     in_atomic(), irqs_disabled(), current->non_block_count,
>                       current->pid, current->comm);
>  
>       if (task_stack_end_corrupted(current))
> -- 
> 2.20.1
> 

-- 
Michal Hocko
SUSE Labs

Reply via email to