On 2/3/25 22:39, Mina Almasry wrote:
...
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bb2b751d274a..3ff8f568c382 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1711,9 +1711,12 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, 
size_t size,
...
  int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
                                struct iov_iter *from, size_t length);
@@ -1721,12 +1724,14 @@ int zerocopy_fill_skb_from_iter(struct sk_buff *skb,
  static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb,
                                          struct msghdr *msg, int len)
  {
-       return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len);
+       return __zerocopy_sg_from_iter(msg, skb->sk, skb, &msg->msg_iter, len,
+                                      NULL);

Instead of propagating it all the way down and carving a new path, why
not reuse the existing infra? You already hook into where ubuf is
allocated, you can stash the binding in there. And
zerocopy_fill_skb_from_devmem can implement ->sg_from_iter,
see __zerocopy_sg_from_iter().

...
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f0693707aece..c989606ff58d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -63,6 +63,8 @@
+static int
+zerocopy_fill_skb_from_devmem(struct sk_buff *skb, struct iov_iter *from,
+                             int length,
+                             struct net_devmem_dmabuf_binding *binding)
+{
+       int i = skb_shinfo(skb)->nr_frags;
+       size_t virt_addr, size, off;
+       struct net_iov *niov;
+
+       while (length && iov_iter_count(from)) {
+               if (i == MAX_SKB_FRAGS)
+                       return -EMSGSIZE;
+
+               virt_addr = (size_t)iter_iov_addr(from);

Unless I missed it somewhere it needs to check that the iter
is iovec based.

+               niov = net_devmem_get_niov_at(binding, virt_addr, &off, &size);
+               if (!niov)
+                       return -EFAULT;
+
+               size = min_t(size_t, size, length);
+               size = min_t(size_t, size, iter_iov_len(from));
+
+               get_netmem(net_iov_to_netmem(niov));
+               skb_add_rx_frag_netmem(skb, i, net_iov_to_netmem(niov), off,
+                                      size, PAGE_SIZE);
+               iov_iter_advance(from, size);
+               length -= size;
+               i++;
+       }
+
+       return 0;
+}
+
  int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
                            struct sk_buff *skb, struct iov_iter *from,
-                           size_t length)
+                           size_t length,
+                           struct net_devmem_dmabuf_binding *binding)
  {
        unsigned long orig_size = skb->truesize;
        unsigned long truesize;
@@ -702,6 +737,8 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock 
*sk,
if (msg && msg->msg_ubuf && msg->sg_from_iter)
                ret = msg->sg_from_iter(skb, from, length);

As mentioned above, you can implement this callback. The callback can
also be moved into ubuf_info ops if that's more convenient, I had
patches stashed for that.

+       else if (unlikely(binding))
+               ret = zerocopy_fill_skb_from_devmem(skb, from, length, binding);
        else
                ret = zerocopy_fill_skb_from_iter(skb, from, length);
@@ -735,7 +772,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
        if (skb_copy_datagram_from_iter(skb, 0, from, copy))
                return -EFAULT;

...

diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0d704bda6c41..44198ae7e44c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1051,6 +1051,7 @@ int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr 
*msg, int *copied,
int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
  {
+       struct net_devmem_dmabuf_binding *binding = NULL;
        struct tcp_sock *tp = tcp_sk(sk);
        struct ubuf_info *uarg = NULL;
        struct sk_buff *skb;
@@ -1063,6 +1064,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr 
*msg, size_t size)
flags = msg->msg_flags; + sockcm_init(&sockc, sk);
+       if (msg->msg_controllen) {
+               err = sock_cmsg_send(sk, msg, &sockc);
+               if (unlikely(err)) {
+                       err = -EINVAL;
+                       goto out_err;
+               }
+       }
+
        if ((flags & MSG_ZEROCOPY) && size) {
                if (msg->msg_ubuf) {
                        uarg = msg->msg_ubuf;
@@ -1080,6 +1090,15 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr 
*msg, size_t size)
                        else
                                uarg_to_msgzc(uarg)->zerocopy = 0;
                }
+
+               if (sockc.dmabuf_id != 0) {

It's better to be mutually exclusive with msg->msg_ubuf, the callers
have expectations about the buffers used. And you likely don't want
to mix it with normal MSG_ZEROCOPY in a single skb and/or ubuf_info,
you can force reallocation of ubuf_info here.

+                       binding = net_devmem_get_binding(sk, sockc.dmabuf_id);
+                       if (IS_ERR(binding)) {
+                               err = PTR_ERR(binding);
+                               binding = NULL;
+                               goto out_err;
+                       }
+               }

--
Pavel Begunkov


Reply via email to