On Thu, 22 Aug 2019 15:43:49 +0300, Vlad Buslov wrote:
> diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
> index 02a547aa77c0..bda42f1b5514 100644
> --- a/net/sched/cls_api.c
> +++ b/net/sched/cls_api.c
> @@ -3076,11 +3076,28 @@ __tc_setup_cb_call(struct tcf_block *block, enum 
> tc_setup_type type,
>  int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
>                    void *type_data, bool err_stop, bool rtnl_held)
>  {
> +     bool take_rtnl = false;

Should we perhaps speculatively:

         bool take_rtnl = READ_ONCE(block->lockeddevcnt);

here? It shouldn't hurt, really, and otherwise every offload that
requires rtnl will have to re-lock cb_lock, every single time..

>       int ok_count;
>  
> +retry:
> +     if (take_rtnl)
> +             rtnl_lock();
>       down_read(&block->cb_lock);
> +     /* Need to obtain rtnl lock if block is bound to devs that require it.
> +      * In block bind code cb_lock is obtained while holding rtnl, so we must
> +      * obtain the locks in same order here.
> +      */
> +     if (!rtnl_held && !take_rtnl && block->lockeddevcnt) {
> +             up_read(&block->cb_lock);
> +             take_rtnl = true;
> +             goto retry;
> +     }
> +
>       ok_count = __tc_setup_cb_call(block, type, type_data, err_stop);
> +
>       up_read(&block->cb_lock);
> +     if (take_rtnl)
> +             rtnl_unlock();
>       return ok_count;
>  }
>  EXPORT_SYMBOL(tc_setup_cb_call);

Reply via email to