This avoids another indirect call for UDP GRO. Again, the test
for the IPv6 variant is performed first.

Signed-off-by: Paolo Abeni <pab...@redhat.com>
---
 net/ipv4/udp.c         | 2 ++
 net/ipv4/udp_offload.c | 6 ++++--
 net/ipv6/udp.c         | 2 ++
 3 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index aff2a8e99e01..9ea851f47598 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -544,6 +544,8 @@ struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
        return __udp4_lib_lookup_skb(skb, sport, dport, &udp_table);
 }
 EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
+INDIRECT_CALLABLE(udp4_lib_lookup_skb, 1, struct sock *, udp_lookup,
+                 struct sk_buff *skb, __be16 sport, __be16 dport);
 
 /* Must be called under rcu_read_lock().
  * Does increment socket refcount.
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index c3c5b237c8e0..0ccd2aa1ab98 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -392,6 +392,8 @@ static struct sk_buff *udp_gro_receive_segment(struct 
list_head *head,
        return NULL;
 }
 
+INDIRECT_CALLABLE_DECLARE_2(struct sock *, udp_lookup, struct sk_buff *skb,
+                           __be16 sport, __be16 dport);
 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
                                struct udphdr *uh, udp_lookup_t lookup)
 {
@@ -403,7 +405,7 @@ struct sk_buff *udp_gro_receive(struct list_head *head, 
struct sk_buff *skb,
        struct sock *sk;
 
        rcu_read_lock();
-       sk = (*lookup)(skb, uh->source, uh->dest);
+       sk = INDIRECT_CALL_INET(lookup, udp_lookup, skb, uh->source, uh->dest);
        if (!sk)
                goto out_unlock;
 
@@ -505,7 +507,7 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
        uh->len = newlen;
 
        rcu_read_lock();
-       sk = (*lookup)(skb, uh->source, uh->dest);
+       sk = INDIRECT_CALL_INET(lookup, udp_lookup, skb, uh->source, uh->dest);
        if (sk && udp_sk(sk)->gro_enabled) {
                err = udp_gro_complete_segment(skb);
        } else if (sk && udp_sk(sk)->gro_complete) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 09cba4cfe31f..616f374760d1 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -282,6 +282,8 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
                                 inet6_sdif(skb), &udp_table, skb);
 }
 EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
+INDIRECT_CALLABLE(udp6_lib_lookup_skb, 2, struct sock *, udp_lookup,
+                 struct sk_buff *skb, __be16 sport, __be16 dport);
 
 /* Must be called under rcu_read_lock().
  * Does increment socket refcount.
-- 
2.19.2

Reply via email to