The previous code allowed the number of Tx queues to be set higher than
the number of Rx queues.  If a packet was sent on a Tx queue with index
>= number Rx queues there was a segfault.

This commit fixes the issue by creating an Rx queue for every Tx queue
meaning that an event buffer is allocated to handle receiving Tx
completion messages.

mbuf pool and Rx ring are not allocated for these additional Rx queues
and RSS configuration ensures that no packets are received on them.

Fixes: 4e9c73e96e83 ("net/netvsc: add Hyper-V network device")
Cc: sthem...@microsoft.com
Cc: sta...@dpdk.org

Signed-off-by: Alan Elder <alan.el...@microsoft.com>
---
v2:
* Remove function declaration for static non-member function

---
 drivers/net/netvsc/hn_ethdev.c |  9 +++++++
 drivers/net/netvsc/hn_rxtx.c   | 46 +++++++++++++++++++++++++++++++---
 2 files changed, 52 insertions(+), 3 deletions(-)

diff --git a/drivers/net/netvsc/hn_ethdev.c b/drivers/net/netvsc/hn_ethdev.c
index b8a32832d7..d7e3f12346 100644
--- a/drivers/net/netvsc/hn_ethdev.c
+++ b/drivers/net/netvsc/hn_ethdev.c
@@ -313,6 +313,15 @@ static int hn_rss_reta_update(struct rte_eth_dev *dev,
 
                if (reta_conf[idx].mask & mask)
                        hv->rss_ind[i] = reta_conf[idx].reta[shift];
+
+               /*
+                * Ensure we don't allow config that directs traffic to an Rx
+                * queue that we aren't going to poll
+                */
+               if (hv->rss_ind[i] >=  dev->data->nb_rx_queues) {
+                       PMD_DRV_LOG(ERR, "RSS distributing traffic to invalid 
Rx queue");
+                       return -EINVAL;
+               }
        }
 
        err = hn_rndis_conf_rss(hv, NDIS_RSS_FLAG_DISABLE);
diff --git a/drivers/net/netvsc/hn_rxtx.c b/drivers/net/netvsc/hn_rxtx.c
index 9bf1ec5509..c0aaeaa972 100644
--- a/drivers/net/netvsc/hn_rxtx.c
+++ b/drivers/net/netvsc/hn_rxtx.c
@@ -243,6 +243,7 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
 {
        struct hn_data *hv = dev->data->dev_private;
        struct hn_tx_queue *txq;
+       struct hn_rx_queue *rxq;
        char name[RTE_MEMPOOL_NAMESIZE];
        uint32_t tx_free_thresh;
        int err = -ENOMEM;
@@ -301,6 +302,22 @@ hn_dev_tx_queue_setup(struct rte_eth_dev *dev,
                goto error;
        }
 
+       /*
+        * If there are more Tx queues than Rx queues, allocate rx_queues
+        * with event buffer so that Tx completion messages can still be
+        * received
+        */
+       if (queue_idx >= dev->data->nb_rx_queues) {
+               rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
+               /*
+                * Don't allocate mbuf pool or rx ring.  RSS is always 
configured
+                * to ensure packets aren't received by this Rx queue.
+                */
+               rxq->mb_pool = NULL;
+               rxq->rx_ring = NULL;
+               dev->data->rx_queues[queue_idx] = rxq;
+       }
+
        txq->agg_szmax  = RTE_MIN(hv->chim_szmax, hv->rndis_agg_size);
        txq->agg_pktmax = hv->rndis_agg_pkts;
        txq->agg_align  = hv->rndis_agg_align;
@@ -354,6 +371,17 @@ static void hn_txd_put(struct hn_tx_queue *txq, struct 
hn_txdesc *txd)
        rte_mempool_put(txq->txdesc_pool, txd);
 }
 
+static void
+hn_rx_queue_free_common(struct hn_rx_queue *rxq)
+{
+       if (!rxq)
+               return;
+
+       rte_free(rxq->rxbuf_info);
+       rte_free(rxq->event_buf);
+       rte_free(rxq);
+}
+
 void
 hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
@@ -364,6 +392,13 @@ hn_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t 
qid)
        if (!txq)
                return;
 
+       /*
+        * Free any Rx queues allocated for a Tx queue without a corresponding
+        * Rx queue
+        */
+       if (qid >= dev->data->nb_rx_queues)
+               hn_rx_queue_free_common(dev->data->rx_queues[qid]);
+
        rte_mempool_free(txq->txdesc_pool);
 
        rte_memzone_free(txq->tx_rndis_mz);
@@ -942,6 +977,13 @@ hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (queue_idx == 0) {
                rxq = hv->primary;
        } else {
+               /*
+                * If the number of Tx queues was previously greater than
+                * the number of Rx queues, we may already have allocated
+                * an rxq. If so, free it now before allocating a new one.
+                */
+               hn_rx_queue_free_common(dev->data->rx_queues[queue_idx]);
+
                rxq = hn_rx_queue_alloc(hv, queue_idx, socket_id);
                if (!rxq)
                        return -ENOMEM;
@@ -998,9 +1040,7 @@ hn_rx_queue_free(struct hn_rx_queue *rxq, bool 
keep_primary)
        if (keep_primary && rxq == rxq->hv->primary)
                return;
 
-       rte_free(rxq->rxbuf_info);
-       rte_free(rxq->event_buf);
-       rte_free(rxq);
+       hn_rx_queue_free_common(rxq);
 }
 
 void
-- 
2.25.1

Reply via email to