This avoids another indirect call for UDP GRO. Again, the test
for the IPv6 variant is performed first.

v1 -> v2:
 - adapted to INDIRECT_CALL_ changes

Signed-off-by: Paolo Abeni <pab...@redhat.com>
---
 net/ipv4/udp_offload.c | 8 ++++++--
 1 file changed, 6 insertions(+), 2 deletions(-)

diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 9a141a6cf1a0..64f9715173ac 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -392,6 +392,8 @@ static struct sk_buff *udp_gro_receive_segment(struct 
list_head *head,
        return NULL;
 }
 
+INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
+                                                  __be16 sport, __be16 dport));
 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
                                struct udphdr *uh, udp_lookup_t lookup)
 {
@@ -403,7 +405,8 @@ struct sk_buff *udp_gro_receive(struct list_head *head, 
struct sk_buff *skb,
        struct sock *sk;
 
        rcu_read_lock();
-       sk = (*lookup)(skb, uh->source, uh->dest);
+       sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+                               udp4_lib_lookup_skb, skb, uh->source, uh->dest);
        if (!sk)
                goto out_unlock;
 
@@ -503,7 +506,8 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff,
        uh->len = newlen;
 
        rcu_read_lock();
-       sk = (*lookup)(skb, uh->source, uh->dest);
+       sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
+                               udp4_lib_lookup_skb, skb, uh->source, uh->dest);
        if (sk && udp_sk(sk)->gro_enabled) {
                err = udp_gro_complete_segment(skb);
        } else if (sk && udp_sk(sk)->gro_complete) {
-- 
2.19.2

Reply via email to