----- On Nov 12, 2018, at 2:44 PM, Geneviève Bastien gbast...@versatic.net 
wrote:

> Trace events are already present for the receive entry points, to indicate
> how the reception entered the stack.
> 
> This patch adds the corresponding exit trace events that will bound the
> reception such that all events occurring between the entry and the exit
> can be considered as part of the reception context. This greatly helps
> for dependency and root cause analyses.
> 
> Without this, it is impossible to determine whether a sched_wakeup
> event following a netif_receive_skb event is the result of the packet
> reception or a simple coincidence after further processing by the
> thread.

As discussed over IRC, it is _possible_ to use kretprobes to instrument
the exit, but it is not practical. A v3 will be sent soon to clarify
this part of the commit message.

Also, I wonder if we should use "net_dev_template_exit" for the event
class rather than "net_dev_template_return" ?

Thanks,

Mathieu

> 
> Signed-off-by: Geneviève Bastien <gbast...@versatic.net>
> CC: Mathieu Desnoyers <mathieu.desnoy...@efficios.com>
> CC: Steven Rostedt <rost...@goodmis.org>
> CC: Ingo Molnar <mi...@redhat.com>
> CC: David S. Miller <da...@davemloft.net>
> ---
> Changes in v2:
>  - Add the return value to tracepoints where applicable
>  - Verify if tracepoint is enabled before walking list in
>    netif_receive_skb_list
> ---
> include/trace/events/net.h | 78 ++++++++++++++++++++++++++++++++++++++
> net/core/dev.c             | 38 ++++++++++++++++---
> 2 files changed, 110 insertions(+), 6 deletions(-)
> 
> diff --git a/include/trace/events/net.h b/include/trace/events/net.h
> index 00aa72ce0e7c..cff1a7b9d0bb 100644
> --- a/include/trace/events/net.h
> +++ b/include/trace/events/net.h
> @@ -117,6 +117,42 @@ DECLARE_EVENT_CLASS(net_dev_template,
>               __get_str(name), __entry->skbaddr, __entry->len)
> )
> 
> +DECLARE_EVENT_CLASS(net_dev_template_return,
> +
> +     TP_PROTO(struct sk_buff *skb, int ret),
> +
> +     TP_ARGS(skb, ret),
> +
> +     TP_STRUCT__entry(
> +             __field(void *, skbaddr)
> +             __field(int,    ret)
> +     ),
> +
> +     TP_fast_assign(
> +             __entry->skbaddr = skb;
> +             __entry->ret = ret;
> +     ),
> +
> +     TP_printk("skbaddr=%p ret=%d", __entry->skbaddr, __entry->ret)
> +)
> +
> +DECLARE_EVENT_CLASS(net_dev_template_simple,
> +
> +     TP_PROTO(struct sk_buff *skb),
> +
> +     TP_ARGS(skb),
> +
> +     TP_STRUCT__entry(
> +             __field(void *, skbaddr)
> +     ),
> +
> +     TP_fast_assign(
> +             __entry->skbaddr = skb;
> +     ),
> +
> +     TP_printk("skbaddr=%p", __entry->skbaddr)
> +)
> +
> DEFINE_EVENT(net_dev_template, net_dev_queue,
> 
>       TP_PROTO(struct sk_buff *skb),
> @@ -244,6 +280,48 @@ DEFINE_EVENT(net_dev_rx_verbose_template,
> netif_rx_ni_entry,
>       TP_ARGS(skb)
> );
> 
> +DEFINE_EVENT(net_dev_template_return, napi_gro_frags_exit,
> +
> +     TP_PROTO(struct sk_buff *skb, int ret),
> +
> +     TP_ARGS(skb, ret)
> +);
> +
> +DEFINE_EVENT(net_dev_template_return, napi_gro_receive_exit,
> +
> +     TP_PROTO(struct sk_buff *skb, int ret),
> +
> +     TP_ARGS(skb, ret)
> +);
> +
> +DEFINE_EVENT(net_dev_template_return, netif_receive_skb_exit,
> +
> +     TP_PROTO(struct sk_buff *skb, int ret),
> +
> +     TP_ARGS(skb, ret)
> +);
> +
> +DEFINE_EVENT(net_dev_template_simple, netif_receive_skb_list_exit,
> +
> +     TP_PROTO(struct sk_buff *skb),
> +
> +     TP_ARGS(skb)
> +);
> +
> +DEFINE_EVENT(net_dev_template_return, netif_rx_exit,
> +
> +     TP_PROTO(struct sk_buff *skb, int ret),
> +
> +     TP_ARGS(skb, ret)
> +);
> +
> +DEFINE_EVENT(net_dev_template_return, netif_rx_ni_exit,
> +
> +     TP_PROTO(struct sk_buff *skb, int ret),
> +
> +     TP_ARGS(skb, ret)
> +);
> +
> #endif /* _TRACE_NET_H */
> 
> /* This part must be outside protection */
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 0ffcbdd55fa9..c4dc5df34abe 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -4520,9 +4520,14 @@ static int netif_rx_internal(struct sk_buff *skb)
> 
> int netif_rx(struct sk_buff *skb)
> {
> +     int ret;
> +
>       trace_netif_rx_entry(skb);
> 
> -     return netif_rx_internal(skb);
> +     ret = netif_rx_internal(skb);
> +     trace_netif_rx_exit(skb, ret);
> +
> +     return ret;
> }
> EXPORT_SYMBOL(netif_rx);
> 
> @@ -4537,6 +4542,7 @@ int netif_rx_ni(struct sk_buff *skb)
>       if (local_softirq_pending())
>               do_softirq();
>       preempt_enable();
> +     trace_netif_rx_ni_exit(skb, err);
> 
>       return err;
> }
> @@ -5222,9 +5228,14 @@ static void netif_receive_skb_list_internal(struct
> list_head *head)
>  */
> int netif_receive_skb(struct sk_buff *skb)
> {
> +     int ret;
> +
>       trace_netif_receive_skb_entry(skb);
> 
> -     return netif_receive_skb_internal(skb);
> +     ret = netif_receive_skb_internal(skb);
> +     trace_netif_receive_skb_exit(skb, ret);
> +
> +     return ret;
> }
> EXPORT_SYMBOL(netif_receive_skb);
> 
> @@ -5244,9 +5255,15 @@ void netif_receive_skb_list(struct list_head *head)
> 
>       if (list_empty(head))
>               return;
> -     list_for_each_entry(skb, head, list)
> -             trace_netif_receive_skb_list_entry(skb);
> +     if (trace_netif_receive_skb_list_entry_enabled()) {
> +             list_for_each_entry(skb, head, list)
> +                     trace_netif_receive_skb_list_entry(skb);
> +     }
>       netif_receive_skb_list_internal(head);
> +     if (trace_netif_receive_skb_list_exit_enabled()) {
> +             list_for_each_entry(skb, head, list)
> +                     trace_netif_receive_skb_list_exit(skb);
> +     }
> }
> EXPORT_SYMBOL(netif_receive_skb_list);
> 
> @@ -5634,12 +5651,17 @@ static gro_result_t napi_skb_finish(gro_result_t ret,
> struct sk_buff *skb)
> 
> gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
> {
> +     gro_result_t ret;
> +
>       skb_mark_napi_id(skb, napi);
>       trace_napi_gro_receive_entry(skb);
> 
>       skb_gro_reset_offset(skb);
> 
> -     return napi_skb_finish(dev_gro_receive(napi, skb), skb);
> +     ret = napi_skb_finish(dev_gro_receive(napi, skb), skb);
> +     trace_napi_gro_receive_exit(skb, ret);
> +
> +     return ret;
> }
> EXPORT_SYMBOL(napi_gro_receive);
> 
> @@ -5753,6 +5775,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct
> *napi)
> 
> gro_result_t napi_gro_frags(struct napi_struct *napi)
> {
> +     gro_result_t ret;
>       struct sk_buff *skb = napi_frags_skb(napi);
> 
>       if (!skb)
> @@ -5760,7 +5783,10 @@ gro_result_t napi_gro_frags(struct napi_struct *napi)
> 
>       trace_napi_gro_frags_entry(skb);
> 
> -     return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
> +     ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
> +     trace_napi_gro_frags_exit(skb, ret);
> +
> +     return ret;
> }
> EXPORT_SYMBOL(napi_gro_frags);
> 
> --
> 2.19.1

-- 
Mathieu Desnoyers
EfficiOS Inc.
http://www.efficios.com

Reply via email to