Add compress device ring initialization and cleanup code.

Signed-off-by: Nagadheeraj Rottela <rnagadhee...@marvell.com>
---
 drivers/common/nitrox/nitrox_csr.h |  12 +++
 drivers/common/nitrox/nitrox_hal.c | 116 +++++++++++++++++++++++++++++
 drivers/common/nitrox/nitrox_hal.h | 115 ++++++++++++++++++++++++++++
 drivers/common/nitrox/nitrox_qp.c  |  53 +++++++++++--
 drivers/common/nitrox/nitrox_qp.h  |  35 ++++++++-
 5 files changed, 322 insertions(+), 9 deletions(-)

diff --git a/drivers/common/nitrox/nitrox_csr.h 
b/drivers/common/nitrox/nitrox_csr.h
index de7a3c6713..97c797c2e2 100644
--- a/drivers/common/nitrox/nitrox_csr.h
+++ b/drivers/common/nitrox/nitrox_csr.h
@@ -25,6 +25,18 @@
 /* AQM Virtual Function Registers */
 #define AQMQ_QSZX(_i)                  (0x20008UL + ((_i) * 0x40000UL))
 
+/* ZQM virtual function registers */
+#define ZQMQ_DRBLX(_i)                 (0x30000UL + ((_i) * 0x40000UL))
+#define ZQMQ_QSZX(_i)                  (0x30008UL + ((_i) * 0x40000UL))
+#define ZQMQ_BADRX(_i)                 (0x30010UL + ((_i) * 0x40000UL))
+#define ZQMQ_NXT_CMDX(_i)              (0x30018UL + ((_i) * 0x40000UL))
+#define ZQMQ_CMD_CNTX(_i)              (0x30020UL + ((_i) * 0x40000UL))
+#define ZQMQ_CMP_THRX(_i)              (0x30028UL + ((_i) * 0x40000UL))
+#define ZQMQ_CMP_CNTX(_i)              (0x30030UL + ((_i) * 0x40000UL))
+#define ZQMQ_TIMER_LDX(_i)             (0x30038UL + ((_i) * 0x40000UL))
+#define ZQMQ_ENX(_i)                   (0x30048UL + ((_i) * 0x40000UL))
+#define ZQMQ_ACTIVITY_STATX(_i)                (0x30050UL + ((_i) * 0x40000UL))
+
 static inline uint64_t
 nitrox_read_csr(uint8_t *bar_addr, uint64_t offset)
 {
diff --git a/drivers/common/nitrox/nitrox_hal.c 
b/drivers/common/nitrox/nitrox_hal.c
index 433f3adb20..451549a664 100644
--- a/drivers/common/nitrox/nitrox_hal.c
+++ b/drivers/common/nitrox/nitrox_hal.c
@@ -9,6 +9,7 @@
 
 #include "nitrox_hal.h"
 #include "nitrox_csr.h"
+#include "nitrox_logs.h"
 
 #define MAX_VF_QUEUES  8
 #define MAX_PF_QUEUES  64
@@ -164,6 +165,121 @@ setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, 
uint16_t port)
        }
 }
 
+int
+zqmq_input_ring_disable(uint8_t *bar_addr, uint16_t ring)
+{
+       union zqmq_activity_stat zqmq_activity_stat;
+       union zqmq_en zqmq_en;
+       union zqmq_cmp_cnt zqmq_cmp_cnt;
+       uint64_t reg_addr;
+       int max_retries = 5;
+
+       /* clear queue enable */
+       reg_addr = ZQMQ_ENX(ring);
+       zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       zqmq_en.s.queue_enable = 0;
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_en.u64);
+       rte_delay_us_block(100);
+
+       /* wait for queue active to clear */
+       reg_addr = ZQMQ_ACTIVITY_STATX(ring);
+       zqmq_activity_stat.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       while (zqmq_activity_stat.s.queue_active && max_retries--) {
+               rte_delay_ms(10);
+               zqmq_activity_stat.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       }
+
+       if (zqmq_activity_stat.s.queue_active) {
+               NITROX_LOG(ERR, "Failed to disable zqmq ring %d\n", ring);
+               return -EBUSY;
+       }
+
+       /* clear commands completed count */
+       reg_addr = ZQMQ_CMP_CNTX(ring);
+       zqmq_cmp_cnt.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_cmp_cnt.u64);
+       rte_delay_us_block(CSR_DELAY);
+       return 0;
+}
+
+int
+setup_zqmq_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
+                     phys_addr_t raddr)
+{
+       union zqmq_drbl zqmq_drbl;
+       union zqmq_qsz zqmq_qsz;
+       union zqmq_en zqmq_en;
+       union zqmq_cmp_thr zqmq_cmp_thr;
+       union zqmq_timer_ld zqmq_timer_ld;
+       uint64_t reg_addr = 0;
+       int max_retries = 5;
+       int err = 0;
+
+       err = zqmq_input_ring_disable(bar_addr, ring);
+       if (err)
+               return err;
+
+       /* clear doorbell count */
+       reg_addr = ZQMQ_DRBLX(ring);
+       zqmq_drbl.u64 = 0;
+       zqmq_drbl.s.dbell_count = 0xFFFFFFFF;
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_drbl.u64);
+       rte_delay_us_block(CSR_DELAY);
+
+       reg_addr = ZQMQ_NXT_CMDX(ring);
+       nitrox_write_csr(bar_addr, reg_addr, 0);
+       rte_delay_us_block(CSR_DELAY);
+
+       /* write queue length */
+       reg_addr = ZQMQ_QSZX(ring);
+       zqmq_qsz.u64 = 0;
+       zqmq_qsz.s.host_queue_size = rsize;
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_qsz.u64);
+       rte_delay_us_block(CSR_DELAY);
+
+       /* write queue base address */
+       reg_addr = ZQMQ_BADRX(ring);
+       nitrox_write_csr(bar_addr, reg_addr, raddr);
+       rte_delay_us_block(CSR_DELAY);
+
+       /* write commands completed threshold */
+       reg_addr = ZQMQ_CMP_THRX(ring);
+       zqmq_cmp_thr.u64 = 0;
+       zqmq_cmp_thr.s.commands_completed_threshold = 0;
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_cmp_thr.u64);
+       rte_delay_us_block(CSR_DELAY);
+
+       /* write timer load value */
+       reg_addr = ZQMQ_TIMER_LDX(ring);
+       zqmq_timer_ld.u64 = 0;
+       zqmq_timer_ld.s.timer_load_value = 0;
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_timer_ld.u64);
+       rte_delay_us_block(CSR_DELAY);
+
+       reg_addr = ZQMQ_ENX(ring);
+       zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       zqmq_en.s.queue_enable = 1;
+       nitrox_write_csr(bar_addr, reg_addr, zqmq_en.u64);
+       rte_delay_us_block(100);
+
+       /* enable queue */
+       zqmq_en.u64 = 0;
+       zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       while (!zqmq_en.s.queue_enable && max_retries--) {
+               rte_delay_ms(10);
+               zqmq_en.u64 = nitrox_read_csr(bar_addr, reg_addr);
+       }
+
+       if (!zqmq_en.s.queue_enable) {
+               NITROX_LOG(ERR, "Failed to enable zqmq ring %d\n", ring);
+               err = -EFAULT;
+       } else {
+               err = 0;
+       }
+
+       return err;
+}
+
 int
 vf_get_vf_config_mode(uint8_t *bar_addr)
 {
diff --git a/drivers/common/nitrox/nitrox_hal.h 
b/drivers/common/nitrox/nitrox_hal.h
index dcfbd11d85..2367b967e5 100644
--- a/drivers/common/nitrox/nitrox_hal.h
+++ b/drivers/common/nitrox/nitrox_hal.h
@@ -146,6 +146,101 @@ union aqmq_qsz {
        } s;
 };
 
+union zqmq_activity_stat {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz            : 63;
+               uint64_t queue_active   : 1;
+#else
+               uint64_t queue_active   : 1;
+               uint64_t raz            : 63;
+#endif
+       } s;
+};
+
+union zqmq_en {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz            : 63;
+               uint64_t queue_enable   : 1;
+#else
+               uint64_t queue_enable   : 1;
+               uint64_t raz            : 63;
+#endif
+       } s;
+};
+
+union zqmq_cmp_cnt {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz                    : 30;
+               uint64_t resend                 : 1;
+               uint64_t completion_status      : 1;
+               uint64_t commands_completed_count: 32;
+#else
+               uint64_t commands_completed_count: 32;
+               uint64_t completion_status      : 1;
+               uint64_t resend                 : 1;
+               uint64_t raz                    : 30;
+#endif
+       } s;
+};
+
+union zqmq_drbl {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz            : 32;
+               uint64_t dbell_count    : 32;
+#else
+               uint64_t dbell_count    : 32;
+               uint64_t raz            : 32;
+#endif
+       } s;
+};
+
+union zqmq_qsz {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz            : 32;
+               uint64_t host_queue_size: 32;
+#else
+               uint64_t host_queue_size: 32;
+               uint64_t raz            : 32;
+#endif
+       } s;
+};
+
+union zqmq_cmp_thr {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz                            : 32;
+               uint64_t commands_completed_threshold   : 32;
+#else
+               uint64_t commands_completed_threshold   : 32;
+               uint64_t raz                            : 32;
+#endif
+       } s;
+};
+
+union zqmq_timer_ld {
+       uint64_t u64;
+       struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+               uint64_t raz            : 32;
+               uint64_t timer_load_value: 32;
+#else
+               uint64_t timer_load_value: 32;
+               uint64_t raz            : 32;
+#endif
+       } s;
+};
+
 enum nitrox_vf_mode {
        NITROX_MODE_PF = 0x0,
        NITROX_MODE_VF16 = 0x1,
@@ -154,6 +249,23 @@ enum nitrox_vf_mode {
        NITROX_MODE_VF128 = 0x4,
 };
 
+static inline int
+inc_zqmq_next_cmd(uint8_t *bar_addr, uint16_t ring)
+{
+       uint64_t reg_addr = 0;
+       uint64_t val;
+
+       reg_addr = ZQMQ_NXT_CMDX(ring);
+       val = nitrox_read_csr(bar_addr, reg_addr);
+       val++;
+       nitrox_write_csr(bar_addr, reg_addr, val);
+       rte_delay_us_block(CSR_DELAY);
+       if (nitrox_read_csr(bar_addr, reg_addr) != val)
+               return -EIO;
+
+       return 0;
+}
+
 int vf_get_vf_config_mode(uint8_t *bar_addr);
 int vf_config_mode_to_nr_queues(enum nitrox_vf_mode vf_mode);
 void setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
@@ -161,5 +273,8 @@ void setup_nps_pkt_input_ring(uint8_t *bar_addr, uint16_t 
ring, uint32_t rsize,
 void setup_nps_pkt_solicit_output_port(uint8_t *bar_addr, uint16_t port);
 void nps_pkt_input_ring_disable(uint8_t *bar_addr, uint16_t ring);
 void nps_pkt_solicited_port_disable(uint8_t *bar_addr, uint16_t port);
+int setup_zqmq_input_ring(uint8_t *bar_addr, uint16_t ring, uint32_t rsize,
+                         phys_addr_t raddr);
+int zqmq_input_ring_disable(uint8_t *bar_addr, uint16_t ring);
 
 #endif /* _NITROX_HAL_H_ */
diff --git a/drivers/common/nitrox/nitrox_qp.c 
b/drivers/common/nitrox/nitrox_qp.c
index 5e85ccbd51..6ec0781f1a 100644
--- a/drivers/common/nitrox/nitrox_qp.c
+++ b/drivers/common/nitrox/nitrox_qp.c
@@ -2,7 +2,7 @@
  * Copyright(C) 2019 Marvell International Ltd.
  */
 
-#include <rte_cryptodev.h>
+#include <rte_memzone.h>
 #include <rte_malloc.h>
 
 #include "nitrox_qp.h"
@@ -20,6 +20,7 @@ nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr,
        const struct rte_memzone *mz;
        size_t cmdq_size = qp->count * instr_size;
        uint64_t offset;
+       int err = 0;
 
        snprintf(mz_name, sizeof(mz_name), "%s_cmdq_%d", dev_name, qp->qno);
        mz = rte_memzone_reserve_aligned(mz_name, cmdq_size, socket_id,
@@ -32,14 +33,34 @@ nitrox_setup_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr,
                return -ENOMEM;
        }
 
+       switch (qp->type) {
+       case NITROX_QUEUE_SE:
+               offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(qp->qno);
+               qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset);
+               setup_nps_pkt_input_ring(bar_addr, qp->qno, qp->count,
+                                        mz->iova);
+               setup_nps_pkt_solicit_output_port(bar_addr, qp->qno);
+               break;
+       case NITROX_QUEUE_ZIP:
+               offset = ZQMQ_DRBLX(qp->qno);
+               qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset);
+               err = setup_zqmq_input_ring(bar_addr, qp->qno, qp->count,
+                                           mz->iova);
+               break;
+       default:
+               NITROX_LOG(ERR, "Invalid queue type %d\n", qp->type);
+               err = -EINVAL;
+               break;
+       }
+
+       if (err) {
+               rte_memzone_free(mz);
+               return err;
+       }
+
        qp->cmdq.mz = mz;
-       offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(qp->qno);
-       qp->cmdq.dbell_csr_addr = NITROX_CSR_ADDR(bar_addr, offset);
        qp->cmdq.ring = mz->addr;
        qp->cmdq.instr_size = instr_size;
-       setup_nps_pkt_input_ring(bar_addr, qp->qno, qp->count, mz->iova);
-       setup_nps_pkt_solicit_output_port(bar_addr, qp->qno);
-
        return 0;
 }
 
@@ -62,8 +83,23 @@ nitrox_setup_ridq(struct nitrox_qp *qp, int socket_id)
 static int
 nitrox_release_cmdq(struct nitrox_qp *qp, uint8_t *bar_addr)
 {
-       nps_pkt_solicited_port_disable(bar_addr, qp->qno);
-       nps_pkt_input_ring_disable(bar_addr, qp->qno);
+       int err = 0;
+
+       switch (qp->type) {
+       case NITROX_QUEUE_SE:
+               nps_pkt_solicited_port_disable(bar_addr, qp->qno);
+               nps_pkt_input_ring_disable(bar_addr, qp->qno);
+               break;
+       case NITROX_QUEUE_ZIP:
+               err = zqmq_input_ring_disable(bar_addr, qp->qno);
+               break;
+       default:
+               err = -EINVAL;
+       }
+
+       if (err)
+               return err;
+
        return rte_memzone_free(qp->cmdq.mz);
 }
 
@@ -83,6 +119,7 @@ nitrox_qp_setup(struct nitrox_qp *qp, uint8_t *bar_addr, 
const char *dev_name,
                return -EINVAL;
        }
 
+       qp->bar_addr = bar_addr;
        qp->count = count;
        qp->head = qp->tail = 0;
        rte_atomic16_init(&qp->pending_count);
diff --git a/drivers/common/nitrox/nitrox_qp.h 
b/drivers/common/nitrox/nitrox_qp.h
index d42d53f92b..177bcd7705 100644
--- a/drivers/common/nitrox/nitrox_qp.h
+++ b/drivers/common/nitrox/nitrox_qp.h
@@ -8,9 +8,16 @@
 #include <stdbool.h>
 
 #include <rte_io.h>
+#include "nitrox_hal.h"
 
 struct nitrox_softreq;
 
+enum nitrox_queue_type {
+       NITROX_QUEUE_SE,
+       NITROX_QUEUE_AE,
+       NITROX_QUEUE_ZIP,
+};
+
 struct command_queue {
        const struct rte_memzone *mz;
        uint8_t *dbell_csr_addr;
@@ -22,14 +29,23 @@ struct rid {
        struct nitrox_softreq *sr;
 };
 
+struct nitrox_qp_stats {
+       uint64_t enqueued_count;
+       uint64_t dequeued_count;
+       uint64_t enqueue_err_count;
+       uint64_t dequeue_err_count;
+};
+
 struct nitrox_qp {
+       enum nitrox_queue_type type;
+       uint8_t *bar_addr;
        struct command_queue cmdq;
        struct rid *ridq;
        uint32_t count;
        uint32_t head;
        uint32_t tail;
        struct rte_mempool *sr_mp;
-       struct rte_cryptodev_stats stats;
+       struct nitrox_qp_stats stats;
        uint16_t qno;
        rte_atomic16_t pending_count;
 };
@@ -89,6 +105,23 @@ nitrox_qp_enqueue(struct nitrox_qp *qp, void *instr, struct 
nitrox_softreq *sr)
        rte_atomic16_inc(&qp->pending_count);
 }
 
+static inline int
+nitrox_qp_enqueue_sr(struct nitrox_qp *qp, struct nitrox_softreq *sr)
+{
+       uint32_t head = qp->head % qp->count;
+       int err;
+
+       err = inc_zqmq_next_cmd(qp->bar_addr, qp->qno);
+       if (unlikely(err))
+               return err;
+
+       qp->head++;
+       qp->ridq[head].sr = sr;
+       rte_smp_wmb();
+       rte_atomic16_inc(&qp->pending_count);
+       return 0;
+}
+
 static inline void
 nitrox_qp_dequeue(struct nitrox_qp *qp)
 {
-- 
2.42.0

Reply via email to