dev start/stop implementations, start/stop the rx/tx queues.

Signed-off-by: Junlong Wang <wang.junlo...@zte.com.cn>
---
 doc/guides/nics/features/zxdh.ini |  2 +
 doc/guides/nics/zxdh.rst          |  2 +
 drivers/net/zxdh/zxdh_ethdev.c    | 61 ++++++++++++++++++++
 drivers/net/zxdh/zxdh_pci.c       | 24 ++++++++
 drivers/net/zxdh/zxdh_pci.h       |  1 +
 drivers/net/zxdh/zxdh_queue.c     | 93 +++++++++++++++++++++++++++++++
 drivers/net/zxdh/zxdh_queue.h     | 68 ++++++++++++++++++++++
 7 files changed, 251 insertions(+)

diff --git a/doc/guides/nics/features/zxdh.ini 
b/doc/guides/nics/features/zxdh.ini
index 05c8091ed7..874541c589 100644
--- a/doc/guides/nics/features/zxdh.ini
+++ b/doc/guides/nics/features/zxdh.ini
@@ -7,3 +7,5 @@
 Linux                = Y
 x86-64               = Y
 ARMv8                = Y
+SR-IOV               = Y
+Multiprocess         = Y
diff --git a/doc/guides/nics/zxdh.rst b/doc/guides/nics/zxdh.rst
index 2144753d75..eb970a888f 100644
--- a/doc/guides/nics/zxdh.rst
+++ b/doc/guides/nics/zxdh.rst
@@ -18,6 +18,8 @@ Features
 Features of the ZXDH PMD are:
 
 - Multi arch support: x86_64, ARMv8.
+- Multiple queues for TX and RX
+- SR-IOV VF
 
 
 Driver compilation and testing
diff --git a/drivers/net/zxdh/zxdh_ethdev.c b/drivers/net/zxdh/zxdh_ethdev.c
index f123e05ccf..a9c0d083fe 100644
--- a/drivers/net/zxdh/zxdh_ethdev.c
+++ b/drivers/net/zxdh/zxdh_ethdev.c
@@ -900,12 +900,35 @@ zxdh_np_uninit(struct rte_eth_dev *dev)
                zxdh_np_dtb_data_res_free(hw);
 }
 
+static int
+zxdh_dev_stop(struct rte_eth_dev *dev)
+{
+       int ret = 0;
+
+       if (dev->data->dev_started == 0)
+               return 0;
+
+       ret = zxdh_intr_disable(dev);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "intr disable failed");
+               return -1;
+       }
+
+       return 0;
+}
+
 static int
 zxdh_dev_close(struct rte_eth_dev *dev)
 {
        struct zxdh_hw *hw = dev->data->dev_private;
        int ret = 0;
 
+       ret = zxdh_dev_stop(dev);
+       if (ret != 0) {
+               PMD_DRV_LOG(ERR, "%s stop port %s failed.", __func__, 
dev->device->name);
+               return -1;
+       }
+
        ret = zxdh_tables_uninit(dev);
        if (ret != 0) {
                PMD_DRV_LOG(ERR, "%s :unint port %s failed ", __func__, 
dev->device->name);
@@ -929,9 +952,47 @@ zxdh_dev_close(struct rte_eth_dev *dev)
        return ret;
 }
 
+static int
+zxdh_dev_start(struct rte_eth_dev *dev)
+{
+       struct zxdh_hw *hw = dev->data->dev_private;
+       struct zxdh_virtqueue *vq;
+       int32_t ret;
+       uint16_t logic_qidx;
+       uint16_t i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               logic_qidx = 2 * i + ZXDH_RQ_QUEUE_IDX;
+               ret = zxdh_dev_rx_queue_setup_finish(dev, logic_qidx);
+               if (ret < 0)
+                       return ret;
+       }
+       ret = zxdh_intr_enable(dev);
+       if (ret) {
+               PMD_DRV_LOG(ERR, "interrupt enable failed");
+               return -EIO;
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               logic_qidx = 2 * i + ZXDH_RQ_QUEUE_IDX;
+               vq = hw->vqs[logic_qidx];
+               /* Flush the old packets */
+               zxdh_queue_rxvq_flush(vq);
+               zxdh_queue_notify(vq);
+       }
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               logic_qidx = 2 * i + ZXDH_TQ_QUEUE_IDX;
+               vq = hw->vqs[logic_qidx];
+               zxdh_queue_notify(vq);
+       }
+       return 0;
+}
+
 /* dev_ops for zxdh, bare necessities for basic operation */
 static const struct eth_dev_ops zxdh_eth_dev_ops = {
        .dev_configure                   = zxdh_dev_configure,
+       .dev_start                               = zxdh_dev_start,
+       .dev_stop                                = zxdh_dev_stop,
        .dev_close                               = zxdh_dev_close,
        .dev_infos_get                   = zxdh_dev_infos_get,
        .rx_queue_setup                  = zxdh_dev_rx_queue_setup,
diff --git a/drivers/net/zxdh/zxdh_pci.c b/drivers/net/zxdh/zxdh_pci.c
index 250e67d560..83164a5c79 100644
--- a/drivers/net/zxdh/zxdh_pci.c
+++ b/drivers/net/zxdh/zxdh_pci.c
@@ -202,6 +202,29 @@ zxdh_del_queue(struct zxdh_hw *hw, struct zxdh_virtqueue 
*vq)
        rte_write16(0, &hw->common_cfg->queue_enable);
 }
 
+static void
+zxdh_notify_queue(struct zxdh_hw *hw, struct zxdh_virtqueue *vq)
+{
+       uint32_t notify_data = 0;
+
+       if (!zxdh_pci_with_feature(hw, ZXDH_F_NOTIFICATION_DATA)) {
+               rte_write16(vq->vq_queue_index, vq->notify_addr);
+               return;
+       }
+
+       if (zxdh_pci_with_feature(hw, ZXDH_F_RING_PACKED)) {
+               notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
+                                               
ZXDH_VRING_PACKED_DESC_F_AVAIL)) << 31) |
+                                               ((uint32_t)vq->vq_avail_idx << 
16) |
+                                               vq->vq_queue_index;
+       } else {
+               notify_data = ((uint32_t)vq->vq_avail_idx << 16) | 
vq->vq_queue_index;
+       }
+       PMD_DRV_LOG(DEBUG, "queue:%d notify_data 0x%x notify_addr 0x%p",
+                                vq->vq_queue_index, notify_data, 
vq->notify_addr);
+       rte_write32(notify_data, vq->notify_addr);
+}
+
 const struct zxdh_pci_ops zxdh_dev_pci_ops = {
        .read_dev_cfg   = zxdh_read_dev_config,
        .write_dev_cfg  = zxdh_write_dev_config,
@@ -216,6 +239,7 @@ const struct zxdh_pci_ops zxdh_dev_pci_ops = {
        .set_queue_num  = zxdh_set_queue_num,
        .setup_queue    = zxdh_setup_queue,
        .del_queue      = zxdh_del_queue,
+       .notify_queue   = zxdh_notify_queue,
 };
 
 uint8_t
diff --git a/drivers/net/zxdh/zxdh_pci.h b/drivers/net/zxdh/zxdh_pci.h
index e3f13cb17d..5c5f72b90e 100644
--- a/drivers/net/zxdh/zxdh_pci.h
+++ b/drivers/net/zxdh/zxdh_pci.h
@@ -144,6 +144,7 @@ struct zxdh_pci_ops {
 
        int32_t  (*setup_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);
        void     (*del_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);
+       void     (*notify_queue)(struct zxdh_hw *hw, struct zxdh_virtqueue *vq);
 };
 
 struct zxdh_hw_internal {
diff --git a/drivers/net/zxdh/zxdh_queue.c b/drivers/net/zxdh/zxdh_queue.c
index af21f046ad..d45fd78dad 100644
--- a/drivers/net/zxdh/zxdh_queue.c
+++ b/drivers/net/zxdh/zxdh_queue.c
@@ -274,3 +274,96 @@ zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, 
uint16_t queue_id)
        zxdh_queue_disable_intr(vq);
        return 0;
 }
+
+int32_t
+zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq, struct rte_mbuf 
**cookie, uint16_t num)
+{
+       struct zxdh_vring_packed_desc *start_dp = vq->vq_packed.ring.desc;
+       struct zxdh_hw *hw = vq->hw;
+       struct zxdh_vq_desc_extra *dxp;
+       uint16_t flags = vq->vq_packed.cached_flags;
+       int32_t i;
+       uint16_t idx;
+
+       for (i = 0; i < num; i++) {
+               idx = vq->vq_avail_idx;
+               dxp = &vq->vq_descx[idx];
+               dxp->cookie = (void *)cookie[i];
+               dxp->ndescs = 1;
+               /* rx pkt fill in data_off */
+               start_dp[idx].addr = rte_mbuf_iova_get(cookie[i]) + 
RTE_PKTMBUF_HEADROOM;
+               start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM;
+               vq->vq_desc_head_idx = dxp->next;
+               if (vq->vq_desc_head_idx == ZXDH_VQ_RING_DESC_CHAIN_END)
+                       vq->vq_desc_tail_idx = vq->vq_desc_head_idx;
+               zxdh_queue_store_flags_packed(&start_dp[idx], flags, 
hw->weak_barriers);
+               if (++vq->vq_avail_idx >= vq->vq_nentries) {
+                       vq->vq_avail_idx -= vq->vq_nentries;
+                       vq->vq_packed.cached_flags ^= 
ZXDH_VRING_PACKED_DESC_F_AVAIL_USED;
+                       flags = vq->vq_packed.cached_flags;
+               }
+       }
+       vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
+       return 0;
+}
+
+int32_t
+zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t logic_qidx)
+{
+       struct zxdh_hw *hw = dev->data->dev_private;
+       struct zxdh_virtqueue *vq = hw->vqs[logic_qidx];
+       struct zxdh_virtnet_rx *rxvq = &vq->rxq;
+       uint16_t desc_idx;
+       int32_t error = 0;
+
+       /* Allocate blank mbufs for the each rx descriptor */
+       memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
+       for (desc_idx = 0; desc_idx < ZXDH_MBUF_BURST_SZ; desc_idx++)
+               vq->sw_ring[vq->vq_nentries + desc_idx] = &rxvq->fake_mbuf;
+
+       while (!zxdh_queue_full(vq)) {
+               uint16_t free_cnt = vq->vq_free_cnt;
+
+               free_cnt = RTE_MIN(ZXDH_MBUF_BURST_SZ, free_cnt);
+               struct rte_mbuf *new_pkts[free_cnt];
+
+               if (likely(rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, 
free_cnt) == 0)) {
+                       error = zxdh_enqueue_recv_refill_packed(vq, new_pkts, 
free_cnt);
+                       if (unlikely(error)) {
+                               int32_t i;
+                               for (i = 0; i < free_cnt; i++)
+                                       rte_pktmbuf_free(new_pkts[i]);
+                       }
+               } else {
+                       PMD_DRV_LOG(ERR, "port %d rxq %d allocated bufs from %s 
failed",
+                               hw->port_id, logic_qidx, rxvq->mpool->name);
+                       break;
+               }
+       }
+       return 0;
+}
+
+void
+zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq)
+{
+       struct zxdh_vq_desc_extra *dxp = NULL;
+       uint16_t i = 0;
+       struct zxdh_vring_packed_desc *descs = vq->vq_packed.ring.desc;
+       int32_t cnt = 0;
+
+       i = vq->vq_used_cons_idx;
+       while (zxdh_desc_used(&descs[i], vq) && cnt++ < vq->vq_nentries) {
+               dxp = &vq->vq_descx[descs[i].id];
+               if (dxp->cookie != NULL) {
+                       rte_pktmbuf_free(dxp->cookie);
+                       dxp->cookie = NULL;
+               }
+               vq->vq_free_cnt++;
+               vq->vq_used_cons_idx++;
+               if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+                       vq->vq_used_cons_idx -= vq->vq_nentries;
+                       vq->vq_packed.used_wrap_counter ^= 1;
+               }
+               i = vq->vq_used_cons_idx;
+       }
+}
diff --git a/drivers/net/zxdh/zxdh_queue.h b/drivers/net/zxdh/zxdh_queue.h
index 2f602d894f..343ab60c1a 100644
--- a/drivers/net/zxdh/zxdh_queue.h
+++ b/drivers/net/zxdh/zxdh_queue.h
@@ -25,6 +25,11 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
 #define ZXDH_VRING_DESC_F_WRITE           2
 /* This flag means the descriptor was made available by the driver */
 #define ZXDH_VRING_PACKED_DESC_F_AVAIL   (1 << (7))
+#define ZXDH_VRING_PACKED_DESC_F_USED    (1 << (15))
+
+/* Frequently used combinations */
+#define ZXDH_VRING_PACKED_DESC_F_AVAIL_USED \
+               (ZXDH_VRING_PACKED_DESC_F_AVAIL | ZXDH_VRING_PACKED_DESC_F_USED)
 
 #define ZXDH_RING_EVENT_FLAGS_ENABLE      0x0
 #define ZXDH_RING_EVENT_FLAGS_DISABLE     0x1
@@ -32,6 +37,8 @@ enum { ZXDH_VTNET_RQ = 0, ZXDH_VTNET_TQ = 1 };
 
 #define ZXDH_VQ_RING_DESC_CHAIN_END       32768
 #define ZXDH_QUEUE_DEPTH                  1024
+#define ZXDH_RQ_QUEUE_IDX                 0
+#define ZXDH_TQ_QUEUE_IDX                 1
 
 /*
  * ring descriptors: 16 bytes.
@@ -290,6 +297,63 @@ zxdh_mb(uint8_t weak_barriers)
                rte_mb();
 }
 
+static inline int32_t
+zxdh_queue_full(const struct zxdh_virtqueue *vq)
+{
+       return (vq->vq_free_cnt == 0);
+}
+
+static inline void
+zxdh_queue_store_flags_packed(struct zxdh_vring_packed_desc *dp,
+               uint16_t flags, uint8_t weak_barriers)
+{
+       if (weak_barriers) {
+       #ifdef RTE_ARCH_X86_64
+               rte_io_wmb();
+               dp->flags = flags;
+       #else
+               rte_atomic_store_explicit(&dp->flags, flags, 
rte_memory_order_release);
+       #endif
+       } else {
+               rte_io_wmb();
+               dp->flags = flags;
+       }
+}
+
+static inline uint16_t
+zxdh_queue_fetch_flags_packed(struct zxdh_vring_packed_desc *dp,
+               uint8_t weak_barriers)
+{
+       uint16_t flags;
+       if (weak_barriers) {
+       #ifdef RTE_ARCH_X86_64
+               flags = dp->flags;
+               rte_io_rmb();
+       #else
+               flags = rte_atomic_load_explicit(&dp->flags, 
rte_memory_order_acquire);
+       #endif
+       } else {
+               flags = dp->flags;
+               rte_io_rmb();
+       }
+
+       return flags;
+}
+
+static inline int32_t
+zxdh_desc_used(struct zxdh_vring_packed_desc *desc, struct zxdh_virtqueue *vq)
+{
+       uint16_t flags = zxdh_queue_fetch_flags_packed(desc, 
vq->hw->weak_barriers);
+       uint16_t used = !!(flags & ZXDH_VRING_PACKED_DESC_F_USED);
+       uint16_t avail = !!(flags & ZXDH_VRING_PACKED_DESC_F_AVAIL);
+       return avail == used && used == vq->vq_packed.used_wrap_counter;
+}
+
+static inline void zxdh_queue_notify(struct zxdh_virtqueue *vq)
+{
+       ZXDH_VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
 struct rte_mbuf *zxdh_queue_detach_unused(struct zxdh_virtqueue *vq);
 int32_t zxdh_free_queues(struct rte_eth_dev *dev);
 int32_t zxdh_get_queue_type(uint16_t vtpci_queue_idx);
@@ -306,5 +370,9 @@ int32_t zxdh_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        struct rte_mempool *mp);
 int32_t zxdh_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t 
queue_id);
 int32_t zxdh_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t 
queue_id);
+int32_t zxdh_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t 
logic_qidx);
+void zxdh_queue_rxvq_flush(struct zxdh_virtqueue *vq);
+int32_t zxdh_enqueue_recv_refill_packed(struct zxdh_virtqueue *vq,
+                       struct rte_mbuf **cookie, uint16_t num);
 
 #endif /* ZXDH_QUEUE_H */
-- 
2.27.0

Reply via email to