Use page_pool for RX buffer allocation in mergeable and small buffer
modes to enable page recycling and avoid repeated page allocator calls.
skb_mark_for_recycle() enables page reuse in the network stack.
Big packets mode is unchanged because it uses page->private for linked
list chaining of multiple pages per buffer, which conflicts with
page_pool's internal use of page->private.
Implement conditional DMA premapping using virtqueue_dma_dev():
- When non-NULL (vhost, virtio-pci): use PP_FLAG_DMA_MAP with page_pool
handling DMA mapping, submit via virtqueue_add_inbuf_premapped()
- When NULL (VDUSE, direct physical): page_pool handles allocation only,
submit via virtqueue_add_inbuf_ctx()
This preserves the DMA premapping optimization from commit 31f3cd4e5756b
("virtio-net: rq submits premapped per-buffer") while adding page_pool
support as a prerequisite for future zero-copy features (devmem TCP,
io_uring ZCRX).
Page pools are created in probe and destroyed in remove (not open/close),
following existing driver behavior where RX buffers remain in virtqueues
across interface state changes.
Signed-off-by: Vishwanath Seshagiri <[email protected]>
---
Resend for net-next window (no changes from v7).
- v7:
https://lore.kernel.org/virtualization/[email protected]/
Changes in v7:
- Replace virtnet_put_page() helper with direct page_pool_put_page()
calls (Xuan Zhuo)
- Add virtnet_no_page_pool() helper to consolidate big_packets mode check
(Michael S. Tsirkin)
- Add DMA sync_for_cpu for subsequent buffers in xdp_linearize_page() when
use_page_pool_dma is set (Michael S. Tsirkin)
- Remove unused pp_params.dev assignment in non-DMA path
- Add page pool recreation in virtnet_restore_up() for freeze/restore support
(Chris Mason's
Review Prompt)
- v6:
https://lore.kernel.org/virtualization/[email protected]/
Changes in v6:
- Drop page_pool_frag_offset_add() helper and switch to page_pool_alloc_va();
page_pool_alloc_netmem() already handles internal fragmentation internally
(Jakub Kicinski)
- v5:
https://lore.kernel.org/virtualization/[email protected]/
Benchmark results:
Configuration: pktgen TX -> tap -> vhost-net | virtio-net RX -> XDP_DROP
Small packets (64 bytes, mrg_rxbuf=off):
1Q: 853,493 -> 868,923 pps (+1.8%)
2Q: 1,655,793 -> 1,696,707 pps (+2.5%)
4Q: 3,143,375 -> 3,302,511 pps (+5.1%)
8Q: 6,082,590 -> 6,156,894 pps (+1.2%)
Mergeable RX (64 bytes):
1Q: 766,168 -> 814,493 pps (+6.3%)
2Q: 1,384,871 -> 1,670,639 pps (+20.6%)
4Q: 2,773,081 -> 3,080,574 pps (+11.1%)
8Q: 5,600,615 -> 6,043,891 pps (+7.9%)
Mergeable RX (1500 bytes):
1Q: 741,579 -> 785,442 pps (+5.9%)
2Q: 1,310,043 -> 1,534,554 pps (+17.1%)
4Q: 2,748,700 -> 2,890,582 pps (+5.2%)
8Q: 5,348,589 -> 5,618,664 pps (+5.0%)
drivers/net/Kconfig | 1 +
drivers/net/virtio_net.c | 467 ++++++++++++++++++++-------------------
2 files changed, 243 insertions(+), 225 deletions(-)
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ac12eaf11755..f1e6b6b0a86f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -450,6 +450,7 @@ config VIRTIO_NET
depends on VIRTIO
select NET_FAILOVER
select DIMLIB
+ select PAGE_POOL
help
This is the virtual network driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index db88dcaefb20..32aede2b1ed5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -26,6 +26,7 @@
#include <net/netdev_rx_queue.h>
#include <net/netdev_queues.h>
#include <net/xdp_sock_drv.h>
+#include <net/page_pool/helpers.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -290,14 +291,6 @@ struct virtnet_interrupt_coalesce {
u32 max_usecs;
};
-/* The dma information of pages allocated at a time. */
-struct virtnet_rq_dma {
- dma_addr_t addr;
- u32 ref;
- u16 len;
- u16 need_sync;
-};
-
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
@@ -356,8 +349,10 @@ struct receive_queue {
/* Average packet length for mergeable receive buffers. */
struct ewma_pkt_len mrg_avg_pkt_len;
- /* Page frag for packet buffer allocation. */
- struct page_frag alloc_frag;
+ struct page_pool *page_pool;
+
+ /* True if page_pool handles DMA mapping via PP_FLAG_DMA_MAP */
+ bool use_page_pool_dma;
/* RX: fragments + linear part + virtio header */
struct scatterlist sg[MAX_SKB_FRAGS + 2];
@@ -370,9 +365,6 @@ struct receive_queue {
struct xdp_rxq_info xdp_rxq;
- /* Record the last dma info to free after new pages is allocated. */
- struct virtnet_rq_dma *last_dma;
-
struct xsk_buff_pool *xsk_pool;
/* xdp rxq used by xsk */
@@ -521,11 +513,14 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog,
struct xdp_buff *xdp,
struct virtnet_rq_stats *stats);
static void virtnet_receive_done(struct virtnet_info *vi, struct
receive_queue *rq,
struct sk_buff *skb, u8 flags);
-static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
+static struct sk_buff *virtnet_skb_append_frag(struct receive_queue *rq,
+ struct sk_buff *head_skb,
struct sk_buff *curr_skb,
struct page *page, void *buf,
int len, int truesize);
static void virtnet_xsk_completed(struct send_queue *sq, int num);
+static void free_unused_bufs(struct virtnet_info *vi);
+static void virtnet_del_vqs(struct virtnet_info *vi);
enum virtnet_xmit_type {
VIRTNET_XMIT_TYPE_SKB,
@@ -706,15 +701,18 @@ static struct page *get_a_page(struct receive_queue *rq,
gfp_t gfp_mask)
return p;
}
+static bool virtnet_no_page_pool(struct virtnet_info *vi)
+{