This reverts commit 09d2b3182c8e3a215a9b2a1834f81dd07305989f.

leads to crashes with no ACCESS_PLATFORM when
sysctl net.core.high_order_alloc_disable=1
Cc: Xuan Zhuo <xuanz...@linux.alibaba.com>
Reported-by: Si-Wei Liu <si-wei....@oracle.com>
Signed-off-by: Michael S. Tsirkin <m...@redhat.com>
---
 drivers/net/virtio_net.c | 134 ---------------------------------------
 1 file changed, 134 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3cb0f8adf2e6..0944430dfb1f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -25,7 +25,6 @@
 #include <net/net_failover.h>
 #include <net/netdev_rx_queue.h>
 #include <net/netdev_queues.h>
-#include <net/xdp_sock_drv.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -349,11 +348,6 @@ struct receive_queue {
 
        /* Record the last dma info to free after new pages is allocated. */
        struct virtnet_rq_dma *last_dma;
-
-       struct xsk_buff_pool *xsk_pool;
-
-       /* xdp rxq used by xsk */
-       struct xdp_rxq_info xsk_rxq_info;
 };
 
 /* This structure can contain rss message with maximum settings for 
indirection table and keysize
@@ -5065,132 +5059,6 @@ static int virtnet_restore_guest_offloads(struct 
virtnet_info *vi)
        return virtnet_set_guest_offloads(vi, offloads);
 }
 
-static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct 
receive_queue *rq,
-                                   struct xsk_buff_pool *pool)
-{
-       int err, qindex;
-
-       qindex = rq - vi->rq;
-
-       if (pool) {
-               err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, 
rq->napi.napi_id);
-               if (err < 0)
-                       return err;
-
-               err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
-                                                MEM_TYPE_XSK_BUFF_POOL, NULL);
-               if (err < 0)
-                       goto unreg;
-
-               xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
-       }
-
-       virtnet_rx_pause(vi, rq);
-
-       err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
-       if (err) {
-               netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: 
%d\n", qindex, err);
-
-               pool = NULL;
-       }
-
-       rq->xsk_pool = pool;
-
-       virtnet_rx_resume(vi, rq);
-
-       if (pool)
-               return 0;
-
-unreg:
-       xdp_rxq_info_unreg(&rq->xsk_rxq_info);
-       return err;
-}
-
-static int virtnet_xsk_pool_enable(struct net_device *dev,
-                                  struct xsk_buff_pool *pool,
-                                  u16 qid)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       struct receive_queue *rq;
-       struct device *dma_dev;
-       struct send_queue *sq;
-       int err;
-
-       if (vi->hdr_len > xsk_pool_get_headroom(pool))
-               return -EINVAL;
-
-       /* In big_packets mode, xdp cannot work, so there is no need to
-        * initialize xsk of rq.
-        */
-       if (vi->big_packets && !vi->mergeable_rx_bufs)
-               return -ENOENT;
-
-       if (qid >= vi->curr_queue_pairs)
-               return -EINVAL;
-
-       sq = &vi->sq[qid];
-       rq = &vi->rq[qid];
-
-       /* xsk assumes that tx and rx must have the same dma device. The af-xdp
-        * may use one buffer to receive from the rx and reuse this buffer to
-        * send by the tx. So the dma dev of sq and rq must be the same one.
-        *
-        * But vq->dma_dev allows every vq has the respective dma dev. So I
-        * check the dma dev of vq and sq is the same dev.
-        */
-       if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
-               return -EINVAL;
-
-       dma_dev = virtqueue_dma_dev(rq->vq);
-       if (!dma_dev)
-               return -EINVAL;
-
-       err = xsk_pool_dma_map(pool, dma_dev, 0);
-       if (err)
-               goto err_xsk_map;
-
-       err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
-       if (err)
-               goto err_rq;
-
-       return 0;
-
-err_rq:
-       xsk_pool_dma_unmap(pool, 0);
-err_xsk_map:
-       return err;
-}
-
-static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       struct xsk_buff_pool *pool;
-       struct receive_queue *rq;
-       int err;
-
-       if (qid >= vi->curr_queue_pairs)
-               return -EINVAL;
-
-       rq = &vi->rq[qid];
-
-       pool = rq->xsk_pool;
-
-       err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
-
-       xsk_pool_dma_unmap(pool, 0);
-
-       return err;
-}
-
-static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf 
*xdp)
-{
-       if (xdp->xsk.pool)
-               return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
-                                              xdp->xsk.queue_id);
-       else
-               return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
-}
-
 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
                           struct netlink_ext_ack *extack)
 {
@@ -5316,8 +5184,6 @@ static int virtnet_xdp(struct net_device *dev, struct 
netdev_bpf *xdp)
        switch (xdp->command) {
        case XDP_SETUP_PROG:
                return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
-       case XDP_SETUP_XSK_POOL:
-               return virtnet_xsk_pool_setup(dev, xdp);
        default:
                return -EINVAL;
        }
-- 
MST


Reply via email to