Implement the receive part. Signed-off-by: Jens Freimann <jfreim...@redhat.com> --- drivers/net/virtio/virtio_ethdev.c | 15 +++- drivers/net/virtio/virtio_ethdev.h | 2 + drivers/net/virtio/virtio_rxtx.c | 131 +++++++++++++++++++++++++++++ 3 files changed, 145 insertions(+), 3 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index d2c5755bb..a2bb726ba 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -384,8 +384,10 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) vq->hw = hw; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; - if (vtpci_packed_queue(hw)) + if (vtpci_packed_queue(hw)) { vq->vq_ring.avail_wrap_counter = 1; + vq->vq_ring.used_wrap_counter = 1; + } /* * Reserve a memzone for vring elements @@ -1320,7 +1322,13 @@ set_rxtx_funcs(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; - if (hw->use_simple_rx) { + /* + * workarount for packed vqs which don't support + * mrg_rxbuf at this point + */ + if (vtpci_packed_queue(hw)) { + eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed; + } else if (hw->use_simple_rx) { PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u", eth_dev->data->port_id); eth_dev->rx_pkt_burst = virtio_recv_pkts_vec; @@ -1484,7 +1492,8 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) /* Setting up rx_header size for the device */ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) || - vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) + vtpci_with_feature(hw, VIRTIO_F_VERSION_1) || + vtpci_with_feature(hw, VIRTIO_F_RING_PACKED)) hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); else hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr); diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 04161b461..25eaff224 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -70,6 +70,8 @@ int virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +uint16_t virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); uint16_t virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 12787070e..3f5fa7366 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -31,6 +31,7 @@ #include "virtqueue.h" #include "virtio_rxtx.h" #include "virtio_rxtx_simple.h" +#include "virtio_ring.h" #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) @@ -710,6 +711,34 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) PMD_INIT_FUNC_TRACE(); + if (vtpci_packed_queue(hw)) { + struct vring_desc_packed *desc; + struct vq_desc_extra *dxp; + + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (unlikely(m == NULL)) + return -ENOMEM; + + dxp = &vq->vq_descx[desc_idx]; + dxp->cookie = m; + dxp->ndescs = 1; + + desc = &vq->vq_ring.desc_packed[desc_idx]; + desc->addr = VIRTIO_MBUF_ADDR(m, vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + desc->len = m->buf_len - RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + desc->flags |= VRING_DESC_F_WRITE; + rte_smp_wmb(); + set_desc_avail(&vq->vq_ring, desc); + } + vq->vq_ring.avail_wrap_counter ^= 1; + nbufs = desc_idx; + goto out; + } + /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; @@ -773,6 +802,7 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) vq_update_avail_idx(vq); } +out: PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); VIRTQUEUE_DUMP(vq); @@ -993,6 +1023,107 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) return 0; } +uint16_t +virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; + struct virtio_hw *hw = vq->hw; + struct rte_mbuf *rxm, *nmb; + uint16_t nb_rx; + uint32_t len; + uint32_t i; + uint32_t hdr_size; + struct virtio_net_hdr *hdr; + struct vring_desc_packed *descs = vq->vq_ring.desc_packed; + struct vring_desc_packed *desc; + uint16_t used_idx, id; + struct vq_desc_extra *dxp; + + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + + hdr_size = hw->vtnet_hdr_size; + + for (i = 0; i < nb_pkts; i++) { + rte_smp_rmb(); + used_idx = vq->vq_used_cons_idx; + desc = &descs[used_idx]; + id = desc->index; + if (!desc_is_used(desc, &vq->vq_ring)) + break; + + nmb = rte_mbuf_raw_alloc(rxvq->mpool); + if (unlikely(nmb == NULL)) { + struct rte_eth_dev *dev + = &rte_eth_devices[rxvq->port_id]; + dev->data->rx_mbuf_alloc_failed++; + break; + } + + dxp = &vq->vq_descx[id]; + len = desc->len; + rxm = dxp->cookie; + dxp->cookie = nmb; + dxp->ndescs = 1; + + desc->addr = VIRTIO_MBUF_ADDR(nmb, vq) + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + desc->len = nmb->buf_len - RTE_PKTMBUF_HEADROOM + + hw->vtnet_hdr_size; + desc->flags = VRING_DESC_F_WRITE; + + PMD_RX_LOG(DEBUG, "packet len:%d", len); + + if (unlikely(len < hdr_size + ETHER_HDR_LEN)) { + PMD_RX_LOG(ERR, "Packet drop"); + rte_pktmbuf_free(rxm); + rxvq->stats.errors++; + continue; + } + + rxm->port = rxvq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rxm->ol_flags = 0; + rxm->vlan_tci = 0; + + rxm->pkt_len = (uint32_t)(len - hdr_size); + rxm->data_len = (uint16_t)(len - hdr_size); + + hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr + + RTE_PKTMBUF_HEADROOM - hdr_size); + + if (hw->vlan_strip) + rte_vlan_strip(rxm); + + if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) { + rte_pktmbuf_free(rxm); + rxvq->stats.errors++; + continue; + } + + VIRTIO_DUMP_PACKET(rxm, rxm->data_len); + + rxvq->stats.bytes += rxm->pkt_len; + virtio_update_packet_stats(&rxvq->stats, rxm); + + rte_smp_wmb(); + + rx_pkts[nb_rx++] = rxm; + vq->vq_used_cons_idx += dxp->ndescs; + if (vq->vq_used_cons_idx >= vq->vq_nentries) { + vq->vq_used_cons_idx -= vq->vq_nentries; + vq->vq_ring.used_wrap_counter ^= 1; + } + } + + rxvq->stats.packets += nb_rx; + + return nb_rx; +} + #define VIRTIO_MBUF_BURST_SZ 64 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc)) uint16_t -- 2.17.1