Mina Almasry wrote:
> Augment dmabuf binding to be able to handle TX. Additional to all the RX
> binding, we also create tx_vec and tx_iter needed for the TX path.
> 
> Provide API for sendmsg to be able to send dmabufs bound to this device:
> 
> - Provide a new dmabuf_tx_cmsg which includes the dmabuf to send from,
>   and the offset into the dmabuf to send from.
> - MSG_ZEROCOPY with SCM_DEVMEM_DMABUF cmsg indicates send from dma-buf.
> 
> Devmem is uncopyable, so piggyback off the existing MSG_ZEROCOPY
> implementation, while disabling instances where MSG_ZEROCOPY falls back
> to copying.
> 
> We additionally look up the dmabuf to send from by id, then pipe the
> binding down to the new zerocopy_fill_skb_from_devmem which fills a TX skb
> with net_iov netmems instead of the traditional page netmems.
> 
> We also special case skb_frag_dma_map to return the dma-address of these
> dmabuf net_iovs instead of attempting to map pages.
> 
> Based on work by Stanislav Fomichev <s...@fomichev.me>. A lot of the meat
> of the implementation came from devmem TCP RFC v1[1], which included the
> TX path, but Stan did all the rebasing on top of netmem/net_iov.
> 
> Cc: Stanislav Fomichev <s...@fomichev.me>
> Signed-off-by: Kaiyuan Zhang <kaiyu...@google.com>
> Signed-off-by: Mina Almasry <almasrym...@google.com>
> 
> ---
>  include/linux/skbuff.h                  | 13 +++-
>  include/net/sock.h                      |  2 +
>  include/uapi/linux/uio.h                |  5 ++
>  net/core/datagram.c                     | 40 ++++++++++-
>  net/core/devmem.c                       | 91 +++++++++++++++++++++++--
>  net/core/devmem.h                       | 40 +++++++++--
>  net/core/netdev-genl.c                  | 65 +++++++++++++++++-
>  net/core/skbuff.c                       |  8 ++-
>  net/core/sock.c                         |  9 +++
>  net/ipv4/tcp.c                          | 36 +++++++---
>  net/vmw_vsock/virtio_transport_common.c |  4 +-
>  11 files changed, 281 insertions(+), 32 deletions(-)
> 

> +static int zerocopy_fill_skb_from_devmem(struct sk_buff *skb,
> +                                      struct msghdr *msg,
> +                                      struct iov_iter *from, int length)
> +{
> +     int i = skb_shinfo(skb)->nr_frags;
> +     int orig_length = length;
> +     netmem_ref netmem;
> +     size_t size;
> +
> +     while (length && iov_iter_count(from)) {
> +             if (i == MAX_SKB_FRAGS)
> +                     return -EMSGSIZE;
> +
> +             size = min_t(size_t, iter_iov_len(from), length);
> +             if (!size)
> +                     return -EFAULT;

On error, should caller skb_zerocopy_iter_stream rewind from, rather
than (or as well as) msg->msg_iter?
> +
> +             netmem = net_iov_to_netmem(iter_iov(from)->iov_base);
> +             get_netmem(netmem);
> +             skb_add_rx_frag_netmem(skb, i, netmem, from->iov_offset, size,
> +                                    PAGE_SIZE);
> +
> +             iov_iter_advance(from, size);
> +             length -= size;
> +             i++;
> +     }
> +
> +     iov_iter_advance(&msg->msg_iter, orig_length);

What does this do if sendmsg is called with NULL as buffer?
> +
> +     return 0;
> +}
> +
>  int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
>                           struct sk_buff *skb, struct iov_iter *from,
> -                         size_t length)
> +                         size_t length, bool is_devmem)
>  {
>       unsigned long orig_size = skb->truesize;
>       unsigned long truesize;
> @@ -702,6 +736,8 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct 
> sock *sk,
>  
>       if (msg && msg->msg_ubuf && msg->sg_from_iter)
>               ret = msg->sg_from_iter(skb, from, length);
> +     else if (unlikely(is_devmem))
> +             ret = zerocopy_fill_skb_from_devmem(skb, msg, from, length);
>       else
>               ret = zerocopy_fill_skb_from_iter(skb, from, length);
>  
> @@ -735,7 +771,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct 
> iov_iter *from)
>       if (skb_copy_datagram_from_iter(skb, 0, from, copy))
>               return -EFAULT;
>  
> -     return __zerocopy_sg_from_iter(NULL, NULL, skb, from, ~0U);
> +     return __zerocopy_sg_from_iter(NULL, NULL, skb, from, ~0U, NULL);
>  }
>  EXPORT_SYMBOL(zerocopy_sg_from_iter);

>  struct net_iov *
>  net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
> @@ -109,6 +112,13 @@ void net_devmem_unbind_dmabuf(struct 
> net_devmem_dmabuf_binding *binding)
>       unsigned long xa_idx;
>       unsigned int rxq_idx;
>  
> +     xa_erase(&net_devmem_dmabuf_bindings, binding->id);
> +
> +     /* Ensure no tx net_devmem_lookup_dmabuf() are in flight after the
> +      * erase.
> +      */
> +     synchronize_net();
> +

What precisely does this protect?

synchronize_net() ensures no packet is in flight inside an rcu
readside section. But a packet can still be in flight, such as posted
to the device or queued in a qdisc.

>       if (binding->list.next)
>               list_del(&binding->list);
>  
> @@ -122,8 +132,6 @@ void net_devmem_unbind_dmabuf(struct 
> net_devmem_dmabuf_binding *binding)
>               WARN_ON(netdev_rx_queue_restart(binding->dev, rxq_idx));
>       }
>  
> -     xa_erase(&net_devmem_dmabuf_bindings, binding->id);
> -
>       net_devmem_dmabuf_binding_put(binding);
>  }
>  
> @@ -174,8 +182,9 @@ int net_devmem_bind_dmabuf_to_queue(struct net_device 
> *dev, u32 rxq_idx,
>  }
>  
>  struct net_devmem_dmabuf_binding *
> -net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
> -                    struct netlink_ext_ack *extack)
> +net_devmem_bind_dmabuf(struct net_device *dev,
> +                    enum dma_data_direction direction,
> +                    unsigned int dmabuf_fd, struct netlink_ext_ack *extack)
>  {
>       struct net_devmem_dmabuf_binding *binding;
>       static u32 id_alloc_next;
> @@ -183,6 +192,7 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned 
> int dmabuf_fd,
>       struct dma_buf *dmabuf;
>       unsigned int sg_idx, i;
>       unsigned long virtual;
> +     struct iovec *iov;
>       int err;
>  
>       dmabuf = dma_buf_get(dmabuf_fd);
> @@ -218,13 +228,19 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned 
> int dmabuf_fd,
>       }
>  
>       binding->sgt = dma_buf_map_attachment_unlocked(binding->attachment,
> -                                                    DMA_FROM_DEVICE);
> +                                                    direction);
>       if (IS_ERR(binding->sgt)) {
>               err = PTR_ERR(binding->sgt);
>               NL_SET_ERR_MSG(extack, "Failed to map dmabuf attachment");
>               goto err_detach;
>       }
>  
> +     if (!binding->sgt || binding->sgt->nents == 0) {
> +             err = -EINVAL;
> +             NL_SET_ERR_MSG(extack, "Empty dmabuf attachment");
> +             goto err_detach;
> +     }
> +
>       /* For simplicity we expect to make PAGE_SIZE allocations, but the
>        * binding can be much more flexible than that. We may be able to
>        * allocate MTU sized chunks here. Leave that for future work...
> @@ -236,6 +252,19 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned 
> int dmabuf_fd,
>               goto err_unmap;
>       }
>  
> +     if (direction == DMA_TO_DEVICE) {
> +             virtual = 0;
> +             for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx)
> +                     virtual += sg_dma_len(sg);
> +
> +             binding->tx_vec = kcalloc(virtual / PAGE_SIZE + 1,

instead of open coding this computation repeatedly, consider a local
variable. And parentheses and maybe round_up().

> +                                       sizeof(struct iovec), GFP_KERNEL);
> +             if (!binding->tx_vec) {
> +                     err = -ENOMEM;
> +                     goto err_unmap;
> +             }
> +     }
> +
>       virtual = 0;
>       for_each_sgtable_dma_sg(binding->sgt, sg, sg_idx) {
>               dma_addr_t dma_addr = sg_dma_address(sg);
> @@ -277,11 +306,21 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned 
> int dmabuf_fd,
>                       niov->owner = owner;
>                       page_pool_set_dma_addr_netmem(net_iov_to_netmem(niov),
>                                                     
> net_devmem_get_dma_addr(niov));
> +
> +                     if (direction == DMA_TO_DEVICE) {
> +                             iov = &binding->tx_vec[virtual / PAGE_SIZE + i];

why does this start counting at virtual / PAGE_SIZE, rather than 0?

> +                             iov->iov_base = niov;
> +                             iov->iov_len = PAGE_SIZE;
> +                     }
>               }
>  
>               virtual += len;
>       }
>  
> +     if (direction == DMA_TO_DEVICE)
> +             iov_iter_init(&binding->tx_iter, WRITE, binding->tx_vec,
> +                           virtual / PAGE_SIZE + 1, virtual);
> +
>       return binding;
>  
>  err_free_chunks:
> @@ -302,6 +341,21 @@ net_devmem_bind_dmabuf(struct net_device *dev, unsigned 
> int dmabuf_fd,
>       return ERR_PTR(err);
>  }
>  
> +struct net_devmem_dmabuf_binding *net_devmem_lookup_dmabuf(u32 id)
> +{
> +     struct net_devmem_dmabuf_binding *binding;
> +
> +     rcu_read_lock();
> +     binding = xa_load(&net_devmem_dmabuf_bindings, id);
> +     if (binding) {
> +             if (!net_devmem_dmabuf_binding_get(binding))
> +                     binding = NULL;
> +     }
> +     rcu_read_unlock();
> +
> +     return binding;
> +}
> +
>  void dev_dmabuf_uninstall(struct net_device *dev)
>  {
>       struct net_devmem_dmabuf_binding *binding;
> @@ -332,6 +386,33 @@ void net_devmem_put_net_iov(struct net_iov *niov)
>       net_devmem_dmabuf_binding_put(niov->owner->binding);
>  }
>  
> +struct net_devmem_dmabuf_binding *
> +net_devmem_get_sockc_binding(struct sock *sk, struct sockcm_cookie *sockc)
> +{
> +     struct net_devmem_dmabuf_binding *binding;
> +     int err = 0;
> +
> +     binding = net_devmem_lookup_dmabuf(sockc->dmabuf_id);

This lookup is from global xarray net_devmem_dmabuf_bindings.

Is there a check that the socket is sending out through the device
to which this dmabuf was bound with netlink? Should there be?
(e.g., SO_BINDTODEVICE).

> +     if (!binding || !binding->tx_vec) {
> +             err = -EINVAL;
> +             goto out_err;
> +     }
> +
> +     if (sock_net(sk) != dev_net(binding->dev)) {
> +             err = -ENODEV;
> +             goto out_err;
> +     }
> +
> +     iov_iter_advance(&binding->tx_iter, sockc->dmabuf_offset);
> +     return binding;
> +
> +out_err:
> +     if (binding)
> +             net_devmem_dmabuf_binding_put(binding);
> +
> +     return ERR_PTR(err);
> +}
> +
>  /*** "Dmabuf devmem memory provider" ***/
>  

> @@ -1063,6 +1064,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr 
> *msg, size_t size)
>  
>       flags = msg->msg_flags;
>  
> +     sockcm_init(&sockc, sk);
> +     if (msg->msg_controllen) {
> +             err = sock_cmsg_send(sk, msg, &sockc);
> +             if (unlikely(err)) {
> +                     err = -EINVAL;
> +                     goto out_err;
> +             }
> +     }
> +
>       if ((flags & MSG_ZEROCOPY) && size) {
>               if (msg->msg_ubuf) {
>                       uarg = msg->msg_ubuf;
> @@ -1080,6 +1090,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr 
> *msg, size_t size)
>                       else
>                               uarg_to_msgzc(uarg)->zerocopy = 0;
>               }
> +
> +             if (sockc.dmabuf_id != 0) {
> +                     binding = net_devmem_get_sockc_binding(sk, &sockc);
> +                     if (IS_ERR(binding)) {
> +                             err = PTR_ERR(binding);
> +                             binding = NULL;
> +                             goto out_err;
> +                     }
> +             }

Reply via email to