This avoids an indirect call in the receive path for TCP and UDP packets. TCP takes precedence on UDP, so that we have a single additional conditional in the common case.
Signed-off-by: Paolo Abeni <pab...@redhat.com> --- include/net/inet_common.h | 7 +++++++ net/ipv4/af_inet.c | 11 +++++++++-- net/ipv4/tcp_offload.c | 5 +++++ net/ipv4/udp_offload.c | 5 +++++ net/ipv6/ip6_offload.c | 10 ++++++++-- net/ipv6/tcpv6_offload.c | 5 +++++ net/ipv6/udp_offload.c | 5 +++++ 7 files changed, 44 insertions(+), 4 deletions(-) diff --git a/include/net/inet_common.h b/include/net/inet_common.h index 56e7592811ea..667bb8247f9a 100644 --- a/include/net/inet_common.h +++ b/include/net/inet_common.h @@ -56,4 +56,11 @@ static inline void inet_ctl_sock_destroy(struct sock *sk) sock_release(sk->sk_socket); } +#define indirect_call_gro_receive(name, cb, head, skb) \ +({ \ + unlikely(gro_recursion_inc_test(skb)) ? \ + NAPI_GRO_CB(skb)->flush |= 1, NULL : \ + INDIRECT_CALL_2(cb, name, head, skb); \ +}) + #endif diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 04ab7ebd6e9b..774f183f56e3 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1385,6 +1385,8 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, } EXPORT_SYMBOL(inet_gso_segment); +INDIRECT_CALLABLE_DECLARE_2(struct sk_buff *, transport4_gro_receive, + struct list_head *head, struct sk_buff *skb); struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) { const struct net_offload *ops; @@ -1494,7 +1496,8 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb) skb_gro_pull(skb, sizeof(*iph)); skb_set_transport_header(skb, skb_gro_offset(skb)); - pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); + pp = indirect_call_gro_receive(transport4_gro_receive, + ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -1558,6 +1561,8 @@ int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) return -EINVAL; } +INDIRECT_CALLABLE_DECLARE_2(int, transport4_gro_complete, struct sk_buff *skb, + int); int inet_gro_complete(struct sk_buff *skb, int nhoff) { __be16 newlen = htons(skb->len - nhoff); @@ -1583,7 +1588,9 @@ int inet_gro_complete(struct sk_buff *skb, int nhoff) * because any hdr with option will have been flushed in * inet_gro_receive(). */ - err = ops->callbacks.gro_complete(skb, nhoff + sizeof(*iph)); + err = INDIRECT_CALL_2(ops->callbacks.gro_complete, + transport4_gro_complete, skb, + nhoff + sizeof(*iph)); out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index 870b0a335061..3d5dfac4cd1b 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -10,6 +10,7 @@ * TCPv4 GSO/GRO support */ +#include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> #include <net/tcp.h> #include <net/protocol.h> @@ -317,6 +318,8 @@ static struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff * return tcp_gro_receive(head, skb); } +INDIRECT_CALLABLE(tcp4_gro_receive, 2, struct sk_buff *, transport4_gro_receive, + struct list_head *, struct sk_buff *); static int tcp4_gro_complete(struct sk_buff *skb, int thoff) { @@ -332,6 +335,8 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff) return tcp_gro_complete(skb); } +INDIRECT_CALLABLE(tcp4_gro_complete, 2, int, transport4_gro_complete, + struct sk_buff *, int); static const struct net_offload tcpv4_offload = { .callbacks = { diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 0646d61f4fa8..c3c5b237c8e0 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -13,6 +13,7 @@ #include <linux/skbuff.h> #include <net/udp.h> #include <net/protocol.h> +#include <net/inet_common.h> static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, netdev_features_t features, @@ -477,6 +478,8 @@ static struct sk_buff *udp4_gro_receive(struct list_head *head, NAPI_GRO_CB(skb)->flush = 1; return NULL; } +INDIRECT_CALLABLE(udp4_gro_receive, 1, struct sk_buff *, transport4_gro_receive, + struct list_head *, struct sk_buff *); static int udp_gro_complete_segment(struct sk_buff *skb) { @@ -536,6 +539,8 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); } +INDIRECT_CALLABLE(udp4_gro_complete, 1, int, transport4_gro_complete, + struct sk_buff *, int); static const struct net_offload udpv4_offload = { .callbacks = { diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index a1c2bfb2ce0d..eeca4164a155 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -164,6 +164,8 @@ static int ipv6_exthdrs_len(struct ipv6hdr *iph, return len; } +INDIRECT_CALLABLE_DECLARE_2(struct sk_buff *, transport6_gro_receive, + struct list_head *head, struct sk_buff *skb); static struct sk_buff *ipv6_gro_receive(struct list_head *head, struct sk_buff *skb) { @@ -260,7 +262,8 @@ static struct sk_buff *ipv6_gro_receive(struct list_head *head, skb_gro_postpull_rcsum(skb, iph, nlen); - pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); + pp = indirect_call_gro_receive(transport6_gro_receive, + ops->callbacks.gro_receive, head, skb); out_unlock: rcu_read_unlock(); @@ -303,6 +306,8 @@ static struct sk_buff *ip4ip6_gro_receive(struct list_head *head, return inet_gro_receive(head, skb); } +INDIRECT_CALLABLE_DECLARE_2(int, transport6_gro_complete, struct sk_buff *skb, + int); static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; @@ -322,7 +327,8 @@ static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out_unlock; - err = ops->callbacks.gro_complete(skb, nhoff); + err = INDIRECT_CALL_2(ops->callbacks.gro_complete, + transport6_gro_complete, skb, nhoff); out_unlock: rcu_read_unlock(); diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index e72947c99454..3c85afc4cf43 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -9,6 +9,7 @@ * * TCPv6 GSO/GRO support */ +#include <linux/indirect_call_wrapper.h> #include <linux/skbuff.h> #include <net/protocol.h> #include <net/tcp.h> @@ -28,6 +29,8 @@ static struct sk_buff *tcp6_gro_receive(struct list_head *head, return tcp_gro_receive(head, skb); } +INDIRECT_CALLABLE(tcp6_gro_receive, 2, struct sk_buff *, transport6_gro_receive, + struct list_head *, struct sk_buff *); static int tcp6_gro_complete(struct sk_buff *skb, int thoff) { @@ -40,6 +43,8 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff) return tcp_gro_complete(skb); } +INDIRECT_CALLABLE(tcp6_gro_complete, 2, int, transport6_gro_complete, + struct sk_buff *, int); static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, netdev_features_t features) diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 828b2457f97b..ce4d491c583c 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -11,6 +11,7 @@ */ #include <linux/skbuff.h> #include <linux/netdevice.h> +#include <linux/indirect_call_wrapper.h> #include <net/protocol.h> #include <net/ipv6.h> #include <net/udp.h> @@ -141,6 +142,8 @@ static struct sk_buff *udp6_gro_receive(struct list_head *head, NAPI_GRO_CB(skb)->flush = 1; return NULL; } +INDIRECT_CALLABLE(udp6_gro_receive, 1, struct sk_buff *, transport6_gro_receive, + struct list_head *, struct sk_buff *); static int udp6_gro_complete(struct sk_buff *skb, int nhoff) { @@ -153,6 +156,8 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb); } +INDIRECT_CALLABLE(udp6_gro_complete, 1, int, transport6_gro_complete, + struct sk_buff *, int); static const struct net_offload udpv6_offload = { .callbacks = { -- 2.19.2