On Fri, Aug 03, 2018 at 01:23:33AM +0000, Vakul Garg wrote:
> 
> 
> > -----Original Message-----
> > From: Doron Roberts-Kedes [mailto:doro...@fb.com]
> > Sent: Friday, August 3, 2018 6:00 AM
> > To: David S . Miller <da...@davemloft.net>
> > Cc: Dave Watson <davejwat...@fb.com>; Vakul Garg
> > <vakul.g...@nxp.com>; Boris Pismenny <bor...@mellanox.com>; Aviad
> > Yehezkel <avia...@mellanox.com>; netdev@vger.kernel.org; Doron
> > Roberts-Kedes <doro...@fb.com>
> > Subject: [PATCH net-next] net/tls: Calculate nsg for zerocopy path without
> > skb_cow_data.
> > 
> > decrypt_skb fails if the number of sg elements required to map is greater
> > than MAX_SKB_FRAGS. As noted by Vakul Garg, nsg must always be
> > calculated, but skb_cow_data adds unnecessary memcpy's for the zerocopy
> > case.
> > 
> > The new function skb_nsg calculates the number of scatterlist elements
> > required to map the skb without the extra overhead of skb_cow_data. This
> > function mimics the structure of skb_to_sgvec.
> > 
> > Fixes: c46234ebb4d1 ("tls: RX path for ktls")
> > Signed-off-by: Doron Roberts-Kedes <doro...@fb.com>
> > ---
> >  net/tls/tls_sw.c | 89
> > ++++++++++++++++++++++++++++++++++++++++++++++--
> >  1 file changed, 86 insertions(+), 3 deletions(-)
> > 
> > diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index
> > ff3a6904a722..c62793601cfc 100644
> > --- a/net/tls/tls_sw.c
> > +++ b/net/tls/tls_sw.c
> > @@ -43,6 +43,76 @@
> > 
> >  #define MAX_IV_SIZE        TLS_CIPHER_AES_GCM_128_IV_SIZE
> > 
> > +static int __skb_nsg(struct sk_buff *skb, int offset, int len,
> > +              unsigned int recursion_level)
> > +{
> > +        int start = skb_headlen(skb);
> > +        int i, copy = start - offset;
> > +        struct sk_buff *frag_iter;
> > +        int elt = 0;
> > +
> > +        if (unlikely(recursion_level >= 24))
> > +                return -EMSGSIZE;
> > +
> > +        if (copy > 0) {
> > +                if (copy > len)
> > +                        copy = len;
> > +                elt++;
> > +                if ((len -= copy) == 0)
> > +                        return elt;
> > +                offset += copy;
> > +        }
> > +
> > +        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
> > +                int end;
> > +
> > +                WARN_ON(start > offset + len);
> > +
> > +                end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
> > +                if ((copy = end - offset) > 0) {
> > +                        if (copy > len)
> > +                                copy = len;
> > +                        elt++;
> > +                        if (!(len -= copy))
> > +                                return elt;
> > +                        offset += copy;
> > +                }
> > +                start = end;
> > +        }
> > +
> > +        skb_walk_frags(skb, frag_iter) {
> > +                int end, ret;
> > +
> > +                WARN_ON(start > offset + len);
> > +
> > +                end = start + frag_iter->len;
> > +                if ((copy = end - offset) > 0) {
> > +
> > +                        if (copy > len)
> > +                                copy = len;
> > +                        ret = __skb_nsg(frag_iter, offset - start, copy,
> > +                                   recursion_level + 1);
> > +                        if (unlikely(ret < 0))
> > +                                return ret;
> > +                        elt += ret;
> > +                        if ((len -= copy) == 0)
> > +                                return elt;
> > +                        offset += copy;
> > +                }
> > +                start = end;
> > +        }
> > +        BUG_ON(len);
> > +        return elt;
> > +}
> > +
> > +/* Return the number of scatterlist elements required to completely map
> > +the
> > + * skb, or -EMSGSIZE if the recursion depth is exceeded.
> > + */
> > +static int skb_nsg(struct sk_buff *skb, int offset, int len) {
> > +   return __skb_nsg(skb, offset, len, 0); }
> > +
> 
> These is generic function and useful elsewhere too.
> Should the above two functions be exported by skbuff.c?

True. Perhaps it can move into skbuff.c if/when there is a second
use case for it.

> 
> >  static int tls_do_decryption(struct sock *sk,
> >                          struct scatterlist *sgin,
> >                          struct scatterlist *sgout,
> > @@ -693,7 +763,7 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
> >     struct scatterlist sgin_arr[MAX_SKB_FRAGS + 2];
> >     struct scatterlist *sgin = &sgin_arr[0];
> >     struct strp_msg *rxm = strp_msg(skb);
> > -   int ret, nsg = ARRAY_SIZE(sgin_arr);
> > +   int ret, nsg;
> >     struct sk_buff *unused;
> > 
> >     ret = skb_copy_bits(skb, rxm->offset + TLS_HEADER_SIZE, @@ -
> > 704,10 +774,23 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
> > 
> >     memcpy(iv, tls_ctx->rx.iv, TLS_CIPHER_AES_GCM_128_SALT_SIZE);
> >     if (!sgout) {
> > -           nsg = skb_cow_data(skb, 0, &unused) + 1;
> > +           nsg = skb_cow_data(skb, 0, &unused);
> > +   } else {
> > +           nsg = skb_nsg(skb,
> > +                         rxm->offset + tls_ctx->rx.prepend_size,
> > +                         rxm->full_len - tls_ctx->rx.prepend_size);
> > +           if (nsg <= 0)
> > +                   return nsg;
> Comparison should be (nsg < 1). TLS forbids '0' sized records.

Yes true, v2 incoming

> 
> > +   }
> > +
> > +   // We need one extra for ctx->rx_aad_ciphertext
> > +   nsg++;
> > +
> > +   if (nsg > ARRAY_SIZE(sgin_arr))
> >             sgin = kmalloc_array(nsg, sizeof(*sgin), sk->sk_allocation);
> 
> Add check for kmalloc_array returnining NULL.

Yes true, v2 incoming.

> 
> > +
> > +   if (!sgout)
> >             sgout = sgin;
> > -   }
> > 
> >     sg_init_table(sgin, nsg);
> >     sg_set_buf(&sgin[0], ctx->rx_aad_ciphertext, TLS_AAD_SPACE_SIZE);
> > --
> > 2.17.1
> 

Reply via email to