Recycle the received page into the page_pool cache if the dma descriptors
arrived in a wrong order

Fixes: ca0e014609f05 ("net: mvneta: move skb build after descriptors 
processing")
Signed-off-by: Lorenzo Bianconi <lore...@kernel.org>
---
 drivers/net/ethernet/marvell/mvneta.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c 
b/drivers/net/ethernet/marvell/mvneta.c
index 69a900081165..c4345e3d616f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2383,8 +2383,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
                        mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
                                             &size, page, &ps);
                } else {
-                       if (unlikely(!xdp_buf.data_hard_start))
+                       if (unlikely(!xdp_buf.data_hard_start)) {
+                               rx_desc->buf_phys_addr = 0;
+                               page_pool_put_full_page(rxq->page_pool, page,
+                                                       true);
                                continue;
+                       }
 
                        mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf,
                                                    &size, page);
-- 
2.26.2

Reply via email to