Add support for the next statistics operations:
        stats_get
        stats_reset

These statistics are counted by the SW data-path.

Signed-off-by: Matan Azrad <ma...@nvidia.com>
---
 drivers/compress/mlx5/mlx5_compress.c | 36 +++++++++++++++++++++++++++++++++--
 1 file changed, 34 insertions(+), 2 deletions(-)

diff --git a/drivers/compress/mlx5/mlx5_compress.c 
b/drivers/compress/mlx5/mlx5_compress.c
index 719def2..d768453 100644
--- a/drivers/compress/mlx5/mlx5_compress.c
+++ b/drivers/compress/mlx5/mlx5_compress.c
@@ -63,6 +63,7 @@ struct mlx5_compress_qp {
        struct mlx5_pmd_mr opaque_mr;
        struct rte_comp_op **ops;
        struct mlx5_compress_priv *priv;
+       struct rte_compressdev_stats stats;
 };
 
 #define MLX5_COMPRESS_MAX_QPS 1024
@@ -357,14 +358,42 @@ struct mlx5_compress_qp {
        return 0;
 }
 
+static void
+mlx5_compress_stats_get(struct rte_compressdev *dev,
+               struct rte_compressdev_stats *stats)
+{
+       int qp_id;
+
+       for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+               struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+               stats->enqueued_count += qp->stats.enqueued_count;
+               stats->dequeued_count += qp->stats.dequeued_count;
+               stats->enqueue_err_count += qp->stats.enqueue_err_count;
+               stats->dequeue_err_count += qp->stats.dequeue_err_count;
+       }
+}
+
+static void
+mlx5_compress_stats_reset(struct rte_compressdev *dev)
+{
+       int qp_id;
+
+       for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+               struct mlx5_compress_qp *qp = dev->data->queue_pairs[qp_id];
+
+               memset(&qp->stats, 0, sizeof(qp->stats));
+       }
+}
+
 static struct rte_compressdev_ops mlx5_compress_ops = {
        .dev_configure          = mlx5_compress_dev_configure,
        .dev_start              = mlx5_compress_dev_start,
        .dev_stop               = mlx5_compress_dev_stop,
        .dev_close              = mlx5_compress_dev_close,
        .dev_infos_get          = mlx5_compress_dev_info_get,
-       .stats_get              = NULL,
-       .stats_reset            = NULL,
+       .stats_get              = mlx5_compress_stats_get,
+       .stats_reset            = mlx5_compress_stats_reset,
        .queue_pair_setup       = mlx5_compress_qp_setup,
        .queue_pair_release     = mlx5_compress_qp_release,
        .private_xform_create   = mlx5_compress_xform_create,
@@ -436,6 +465,7 @@ struct mlx5_compress_qp {
                ++ops;
                qp->pi++;
        } while (--remain);
+       qp->stats.enqueued_count += nb_ops;
        rte_io_wmb();
        qp->sq.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->pi);
        rte_wmb();
@@ -484,6 +514,7 @@ struct mlx5_compress_qp {
        mlx5_compress_dump_err_objs((volatile uint32_t *)cqe,
                                 (volatile uint32_t *)&wqes[idx],
                                 (volatile uint32_t *)&opaq[idx]);
+       qp->stats.dequeue_err_count++;
 }
 
 static uint16_t
@@ -554,6 +585,7 @@ struct mlx5_compress_qp {
        if (likely(i != 0)) {
                rte_io_wmb();
                qp->cq.db_rec[0] = rte_cpu_to_be_32(qp->ci);
+               qp->stats.dequeued_count += i;
        }
        return i;
 }
-- 
1.8.3.1

Reply via email to