On 09/06/2018 08:19 PM, Jens Freimann wrote:
This implements the transmit path for devices with
support for packed virtqueues.
Add the feature bit and enable code to
add buffers to vring and mark descriptors as available.
Signed-off-by: Jens Freiman <jfreim...@redhat.com>
---
drivers/net/virtio/virtio_ethdev.c | 8 +-
drivers/net/virtio/virtio_ethdev.h | 2 +
drivers/net/virtio/virtio_rxtx.c | 113 ++++++++++++++++++++++++++++-
3 files changed, 121 insertions(+), 2 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c
b/drivers/net/virtio/virtio_ethdev.c
index ad91f7f82..d2c5755bb 100644
--- a/drivers/net/virtio/virtio_ethdev.c
+++ b/drivers/net/virtio/virtio_ethdev.c
@@ -384,6 +384,8 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t
vtpci_queue_idx)
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
+ if (vtpci_packed_queue(hw))
+ vq->vq_ring.avail_wrap_counter = 1;
/*
* Reserve a memzone for vring elements
@@ -1338,7 +1340,11 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev)
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
}
- if (hw->use_inorder_tx) {
+ if (vtpci_packed_queue(hw)) {
+ PMD_INIT_LOG(INFO, "virtio: using virtio 1.1 Tx path on port
%u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+ } else if (hw->use_inorder_tx) {
PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
eth_dev->data->port_id);
eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
diff --git a/drivers/net/virtio/virtio_ethdev.h
b/drivers/net/virtio/virtio_ethdev.h
index b726ad108..04161b461 100644
--- a/drivers/net/virtio/virtio_ethdev.h
+++ b/drivers/net/virtio/virtio_ethdev.h
@@ -79,6 +79,8 @@ uint16_t virtio_recv_mergeable_pkts_inorder(void *rx_queue,
uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
uint16_t virtio_xmit_pkts_inorder(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index eb891433e..12787070e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -38,6 +38,112 @@
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
+
+/* Cleanup from completed transmits. */
+static void
+virtio_xmit_cleanup_packed(struct virtqueue *vq)
+{
+ uint16_t idx;
+ uint16_t size = vq->vq_nentries;
+ struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
+ struct vq_desc_extra *dxp;
+
+ idx = vq->vq_used_cons_idx;
+ while (desc_is_used(&desc[idx], &vq->vq_ring) &&
+ vq->vq_free_cnt < size) {
+ dxp = &vq->vq_descx[idx];
+ vq->vq_free_cnt += dxp->ndescs;
+ idx = dxp->ndescs;
+ idx = idx >= size ? idx - size : idx;
+ }
+}
+
+uint16_t
+virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtnet_tx *txvq = tx_queue;
+ struct virtqueue *vq = txvq->vq;
+ uint16_t i;
+ struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
+ uint16_t idx, prev;
+ struct vq_desc_extra *dxp;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+
+ PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+ if (likely(vq->vq_free_cnt < vq->vq_free_thresh))
+ virtio_xmit_cleanup_packed(vq);
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *txm = tx_pkts[i];
+ struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
+ uint16_t head_idx;
+ int wrap_counter;
+ int descs_used;
+
+ if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
+ virtio_xmit_cleanup_packed(vq);
+
+ if (unlikely(txm->nb_segs + 1 > vq->vq_free_cnt)) {
+ PMD_TX_LOG(ERR,
+ "No free tx descriptors to
transmit");
+ break;
+ }
+ }
+
+ txvq->stats.bytes += txm->pkt_len;
+
+ vq->vq_free_cnt -= txm->nb_segs + 1;
+
+ wrap_counter = vq->vq_ring.avail_wrap_counter;
+ idx = vq->vq_avail_idx;
+ head_idx = idx;
+
+ dxp = &vq->vq_descx[idx];
+ if (dxp->cookie != NULL)
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = txm;
+
+ desc[idx].addr = txvq->virtio_net_hdr_mem +
+ RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
+ desc[idx].len = vq->hw->vtnet_hdr_size;
+ desc[idx].flags = VRING_DESC_F_NEXT |
+ VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
+ VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
+ descs_used = 1;
+
+ do {
+ idx = update_pq_avail_index(vq);
+ desc[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(txm, vq);
+ desc[idx].len = txm->data_len;
+ desc[idx].flags = VRING_DESC_F_NEXT |
+
VRING_DESC_F_AVAIL(vq->vq_ring.avail_wrap_counter) |
+
VRING_DESC_F_USED(!vq->vq_ring.avail_wrap_counter);
+ descs_used++;
+ } while ((txm = txm->next) != NULL);
+
+ desc[idx].flags &= ~VRING_DESC_F_NEXT;
+
+ rte_smp_wmb();
+ prev = (idx > 0 ? idx : vq->vq_nentries) - 1;
+ desc[prev].index = head_idx; //FIXME
//FIXIT! :)
+ desc[head_idx].flags =
+ (VRING_DESC_F_AVAIL(wrap_counter) |
+ VRING_DESC_F_USED(!wrap_counter));
+
+ vq->vq_descx[head_idx].ndescs = descs_used;
+ idx = update_pq_avail_index(vq);
+ }
+
+ txvq->stats.packets += i;
+ txvq->stats.errors += nb_pkts - i;
+
+ return i;
+}
+
int
virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
{
@@ -736,7 +842,12 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
if (hw->use_inorder_tx)
vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
- VIRTQUEUE_DUMP(vq);
+ if (vtpci_packed_queue(hw)) {
+ vq->vq_ring.avail_wrap_counter = 1;
+ }
+
+ if (!vtpci_packed_queue(hw))
+ VIRTQUEUE_DUMP(vq);
I guess the check isn't necessary anymore since support is added in
patch 5.
return 0;
}