Implement netdev devmem allocator. The allocator takes a given struct
netdev_dmabuf_binding as input and allocates page_pool_iov from that
binding.

The allocation simply delegates to the binding's genpool for the
allocation logic and wraps the returned memory region in a page_pool_iov
struct.

page_pool_iov are refcounted and are freed back to the binding when the
refcount drops to 0.

Signed-off-by: Willem de Bruijn <will...@google.com>
Signed-off-by: Kaiyuan Zhang <kaiyu...@google.com>
Signed-off-by: Mina Almasry <almasrym...@google.com>

---
 include/linux/netdevice.h       | 13 ++++++++++++
 include/net/page_pool/helpers.h | 28 +++++++++++++++++++++++++
 net/core/dev.c                  | 37 +++++++++++++++++++++++++++++++++
 3 files changed, 78 insertions(+)

diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index eeeda849115c..1c351c138a5b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -843,6 +843,9 @@ struct netdev_dmabuf_binding {
 };
 
 #ifdef CONFIG_DMA_SHARED_BUFFER
+struct page_pool_iov *
+netdev_alloc_devmem(struct netdev_dmabuf_binding *binding);
+void netdev_free_devmem(struct page_pool_iov *ppiov);
 void __netdev_devmem_binding_free(struct netdev_dmabuf_binding *binding);
 int netdev_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
                       struct netdev_dmabuf_binding **out);
@@ -850,6 +853,16 @@ void netdev_unbind_dmabuf(struct netdev_dmabuf_binding 
*binding);
 int netdev_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
                                struct netdev_dmabuf_binding *binding);
 #else
+static inline struct page_pool_iov *
+netdev_alloc_devmem(struct netdev_dmabuf_binding *binding)
+{
+       return NULL;
+}
+
+static inline void netdev_free_devmem(struct page_pool_iov *ppiov)
+{
+}
+
 static inline void
 __netdev_devmem_binding_free(struct netdev_dmabuf_binding *binding)
 {
diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h
index 4ebd544ae977..78cbb040af94 100644
--- a/include/net/page_pool/helpers.h
+++ b/include/net/page_pool/helpers.h
@@ -83,6 +83,34 @@ static inline u64 *page_pool_ethtool_stats_get(u64 *data, 
void *stats)
 }
 #endif
 
+/* page_pool_iov support */
+
+static inline struct dmabuf_genpool_chunk_owner *
+page_pool_iov_owner(const struct page_pool_iov *ppiov)
+{
+       return ppiov->owner;
+}
+
+static inline unsigned int page_pool_iov_idx(const struct page_pool_iov *ppiov)
+{
+       return ppiov - page_pool_iov_owner(ppiov)->ppiovs;
+}
+
+static inline dma_addr_t
+page_pool_iov_dma_addr(const struct page_pool_iov *ppiov)
+{
+       struct dmabuf_genpool_chunk_owner *owner = page_pool_iov_owner(ppiov);
+
+       return owner->base_dma_addr +
+              ((dma_addr_t)page_pool_iov_idx(ppiov) << PAGE_SHIFT);
+}
+
+static inline struct netdev_dmabuf_binding *
+page_pool_iov_binding(const struct page_pool_iov *ppiov)
+{
+       return page_pool_iov_owner(ppiov)->binding;
+}
+
 /**
  * page_pool_dev_alloc_pages() - allocate a page.
  * @pool:      pool from which to allocate
diff --git a/net/core/dev.c b/net/core/dev.c
index c8c3709d42c8..2315bbc03ec8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -156,6 +156,7 @@
 #include <linux/genalloc.h>
 #include <linux/dma-buf.h>
 #include <net/page_pool/types.h>
+#include <net/page_pool/helpers.h>
 
 #include "dev.h"
 #include "net-sysfs.h"
@@ -2077,6 +2078,42 @@ void __netdev_devmem_binding_free(struct 
netdev_dmabuf_binding *binding)
        kfree(binding);
 }
 
+struct page_pool_iov *netdev_alloc_devmem(struct netdev_dmabuf_binding 
*binding)
+{
+       struct dmabuf_genpool_chunk_owner *owner;
+       struct page_pool_iov *ppiov;
+       unsigned long dma_addr;
+       ssize_t offset;
+       ssize_t index;
+
+       dma_addr = gen_pool_alloc_owner(binding->chunk_pool, PAGE_SIZE,
+                                       (void **)&owner);
+       if (!dma_addr)
+               return NULL;
+
+       offset = dma_addr - owner->base_dma_addr;
+       index = offset / PAGE_SIZE;
+       ppiov = &owner->ppiovs[index];
+
+       netdev_devmem_binding_get(binding);
+
+       return ppiov;
+}
+
+void netdev_free_devmem(struct page_pool_iov *ppiov)
+{
+       struct netdev_dmabuf_binding *binding = page_pool_iov_binding(ppiov);
+
+       refcount_set(&ppiov->refcount, 1);
+
+       if (gen_pool_has_addr(binding->chunk_pool,
+                             page_pool_iov_dma_addr(ppiov), PAGE_SIZE))
+               gen_pool_free(binding->chunk_pool,
+                             page_pool_iov_dma_addr(ppiov), PAGE_SIZE);
+
+       netdev_devmem_binding_put(binding);
+}
+
 void netdev_unbind_dmabuf(struct netdev_dmabuf_binding *binding)
 {
        struct netdev_rx_queue *rxq;
-- 
2.42.0.869.gea05f2083d-goog

Reply via email to