Hi,

On Mon, 2019-01-28 at 09:50 +0100, Steffen Klassert wrote:
> diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
> index 64f9715173ac..584635db9231 100644
> --- a/net/ipv4/udp_offload.c
> +++ b/net/ipv4/udp_offload.c
> @@ -392,35 +392,24 @@ static struct sk_buff *udp_gro_receive_segment(struct 
> list_head *head,
>       return NULL;
>  }
>  
> -INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff 
> *skb,
> -                                                __be16 sport, __be16 dport));
>  struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
> -                             struct udphdr *uh, udp_lookup_t lookup)
> +                             struct udphdr *uh, struct sock *sk)
>  {
>       struct sk_buff *pp = NULL;
>       struct sk_buff *p;
>       struct udphdr *uh2;
>       unsigned int off = skb_gro_offset(skb);
>       int flush = 1;
> -     struct sock *sk;
> -
> -     rcu_read_lock();
> -     sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
> -                             udp4_lib_lookup_skb, skb, uh->source, uh->dest);
> -     if (!sk)
> -             goto out_unlock;
>  
> -     if (udp_sk(sk)->gro_enabled) {
> +     if (!sk || !udp_sk(sk)->gro_receive) {
>               pp = call_gro_receive(udp_gro_receive_segment, head, skb);
> -             rcu_read_unlock();
>               return pp;
>       }
>  
>       if (NAPI_GRO_CB(skb)->encap_mark ||
>           (skb->ip_summed != CHECKSUM_PARTIAL &&
>            NAPI_GRO_CB(skb)->csum_cnt == 0 &&
> -          !NAPI_GRO_CB(skb)->csum_valid) ||
> -         !udp_sk(sk)->gro_receive)
> +          !NAPI_GRO_CB(skb)->csum_valid))
>               goto out_unlock;

Here I think an additional chunk is missing: the caller is holding the
rcu lock, we should drop the rcu_read_unlock() from this function (and
likely rename the associated label).
 
>       /* mark that this skb passed once through the tunnel gro layer */
> @@ -459,8 +448,10 @@ INDIRECT_CALLABLE_SCOPE
>  struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
>  {
>       struct udphdr *uh = udp_gro_udphdr(skb);
> +     struct sk_buff *pp;
> +     struct sock *sk;
>  
> -     if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
> +     if (unlikely(!uh))
>               goto flush;
>  
>       /* Don't bother verifying checksum if we're going to flush anyway. */
> @@ -475,7 +466,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, 
> struct sk_buff *skb)
>                                            inet_gro_compute_pseudo);
>  skip:
>       NAPI_GRO_CB(skb)->is_ipv6 = 0;
> -     return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
> +     rcu_read_lock();
> +     sk = static_branch_unlikely(&udp_encap_needed_key) ? 
> udp4_lib_lookup_skb(skb, uh->source, uh->dest) : NULL;
> +     pp = udp_gro_receive(head, skb, uh, sk);
> +     rcu_read_unlock();
> +     return pp;
>  
>  flush:
>       NAPI_GRO_CB(skb)->flush = 1;

_Unrelated_ to this patch, but IIRC the RCU lock is already help by
dev_gro_receive(), so IMHO a follow-up patch could possibly remove the
lock here and make the code smaller. 

Apart from first point above, I like this patch a lot!

Paolo

Reply via email to