On Thu, 2019-05-23 at 10:54 -0700, Stephen Hemminger wrote: > When a device is stacked like (team, bonding, failsafe or netvsc) the > XDP generic program for the parent device was not called. > > Move the call to XDP generic inside __netif_receive_skb_core where > it can be done multiple times for stacked case. > > Suggested-by: Jiri Pirko <j...@resnulli.us> > Fixes: d445516966dc ("net: xdp: support xdp generic on virtual > devices") > Signed-off-by: Stephen Hemminger <sthem...@microsoft.com> > --- > v1 - call xdp_generic in netvsc handler > v2 - do xdp_generic in generic rx handler processing > v3 - move xdp_generic call inside the another pass loop > > net/core/dev.c | 56 ++++++++++------------------------------------ > ---- > 1 file changed, 11 insertions(+), 45 deletions(-) > > diff --git a/net/core/dev.c b/net/core/dev.c > index b6b8505cfb3e..696776e14d00 100644 > --- a/net/core/dev.c > +++ b/net/core/dev.c > @@ -4502,23 +4502,6 @@ static int netif_rx_internal(struct sk_buff > *skb) > > trace_netif_rx(skb); > > - if (static_branch_unlikely(&generic_xdp_needed_key)) { > - int ret; > - > - preempt_disable(); > - rcu_read_lock(); > - ret = do_xdp_generic(rcu_dereference(skb->dev- > >xdp_prog), skb); > - rcu_read_unlock(); > - preempt_enable(); > - > - /* Consider XDP consuming the packet a success from > - * the netdev point of view we do not want to count > - * this as an error. > - */ > - if (ret != XDP_PASS) > - return NET_RX_SUCCESS; > - } > -
Adding Jesper, There is a small behavioral change due to this patch, the XDP program after this patch will run on the RPS CPU, if configured, which could cause some behavioral changes in xdp_redirect_cpu: bpf_redirect_map(cpu_map). Maybe this is acceptable, but it should be documented, as the current assumption dictates: XDP program runs on the core where the XDP frame/SKB was first seen. > #ifdef CONFIG_RPS > if (static_branch_unlikely(&rps_needed)) { > struct rps_dev_flow voidflow, *rflow = &voidflow; > @@ -4858,6 +4841,17 @@ static int __netif_receive_skb_core(struct > sk_buff *skb, bool pfmemalloc, > > __this_cpu_inc(softnet_data.processed); > > + if (static_branch_unlikely(&generic_xdp_needed_key)) { > + int ret2; > + > + preempt_disable(); > + ret2 = do_xdp_generic(rcu_dereference(skb->dev- > >xdp_prog), skb); > + preempt_enable(); > + > + if (ret2 != XDP_PASS) > + return NET_RX_DROP; > + } > + > if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || > skb->protocol == cpu_to_be16(ETH_P_8021AD)) { > skb = skb_vlan_untag(skb); > @@ -5178,19 +5172,6 @@ static int netif_receive_skb_internal(struct > sk_buff *skb) > if (skb_defer_rx_timestamp(skb)) > return NET_RX_SUCCESS; > > - if (static_branch_unlikely(&generic_xdp_needed_key)) { > - int ret; > - > - preempt_disable(); > - rcu_read_lock(); > - ret = do_xdp_generic(rcu_dereference(skb->dev- > >xdp_prog), skb); > - rcu_read_unlock(); > - preempt_enable(); > - > - if (ret != XDP_PASS) > - return NET_RX_DROP; > - } > - > rcu_read_lock(); > #ifdef CONFIG_RPS > if (static_branch_unlikely(&rps_needed)) { > @@ -5224,21 +5205,6 @@ static void > netif_receive_skb_list_internal(struct list_head *head) > } > list_splice_init(&sublist, head); > > - if (static_branch_unlikely(&generic_xdp_needed_key)) { > - preempt_disable(); > - rcu_read_lock(); > - list_for_each_entry_safe(skb, next, head, list) { > - xdp_prog = rcu_dereference(skb->dev->xdp_prog); > - skb_list_del_init(skb); > - if (do_xdp_generic(xdp_prog, skb) == XDP_PASS) > - list_add_tail(&skb->list, &sublist); > - } > - rcu_read_unlock(); > - preempt_enable(); > - /* Put passed packets back on main list */ > - list_splice_init(&sublist, head); > - } > - > rcu_read_lock(); > #ifdef CONFIG_RPS > if (static_branch_unlikely(&rps_needed)) {