From: Sunil Kumar Kori <sk...@marvell.com> Add congestion management RoC APIs.
Depends-on: patch-24710 ("ethdev: support congestion management") Signed-off-by: Sunil Kumar Kori <sk...@marvell.com> Change-Id: I55ca54cd2f5c7e208640de53799103342453e703 --- drivers/common/cnxk/roc_nix.h | 5 ++ drivers/common/cnxk/roc_nix_queue.c | 106 ++++++++++++++++++++++++++++ drivers/common/cnxk/version.map | 1 + 3 files changed, 112 insertions(+) diff --git a/drivers/common/cnxk/roc_nix.h b/drivers/common/cnxk/roc_nix.h index 5c2a869eba..34cb2c717c 100644 --- a/drivers/common/cnxk/roc_nix.h +++ b/drivers/common/cnxk/roc_nix.h @@ -315,6 +315,10 @@ struct roc_nix_rq { /* Average SPB aura level drop threshold for RED */ uint8_t spb_red_drop; /* Average SPB aura level pass threshold for RED */ + uint8_t xqe_red_pass; + /* Average xqe level drop threshold for RED */ + uint8_t xqe_red_drop; + /* Average xqe level pass threshold for RED */ uint8_t spb_red_pass; /* LPB aura drop enable */ bool lpb_drop_ena; @@ -869,6 +873,7 @@ int __roc_api roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena); int __roc_api roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena); +int __roc_api roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq); int __roc_api roc_nix_rq_ena_dis(struct roc_nix_rq *rq, bool enable); int __roc_api roc_nix_rq_is_sso_enable(struct roc_nix *roc_nix, uint32_t qid); int __roc_api roc_nix_rq_fini(struct roc_nix_rq *rq); diff --git a/drivers/common/cnxk/roc_nix_queue.c b/drivers/common/cnxk/roc_nix_queue.c index 405d9a8274..368f1a52f7 100644 --- a/drivers/common/cnxk/roc_nix_queue.c +++ b/drivers/common/cnxk/roc_nix_queue.c @@ -235,6 +235,46 @@ nix_rq_aura_buf_type_update(struct roc_nix_rq *rq, bool set) return 0; } +static int +nix_rq_cn9k_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) +{ + struct mbox *mbox = dev->mbox; + struct nix_aq_enq_req *aq; + + aq = mbox_alloc_msg_nix_aq_enq(mbox); + if (!aq) + return -ENOSPC; + + aq->qidx = rq->qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { + aq->rq.lpb_pool_pass = rq->red_pass; + aq->rq.lpb_pool_drop = rq->red_drop; + aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); + aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); + + } + + if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { + aq->rq.spb_pool_pass = rq->spb_red_pass; + aq->rq.spb_pool_drop = rq->spb_red_drop; + aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); + aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); + + } + + if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { + aq->rq.xqe_pass = rq->xqe_red_pass; + aq->rq.xqe_drop = rq->xqe_red_drop; + aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); + aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); + } + + return mbox_process(mbox); +} + int nix_rq_cn9k_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, bool ena) @@ -529,6 +569,46 @@ nix_rq_cfg(struct dev *dev, struct roc_nix_rq *rq, uint16_t qints, bool cfg, return 0; } +static int +nix_rq_cman_cfg(struct dev *dev, struct roc_nix_rq *rq) +{ + struct nix_cn10k_aq_enq_req *aq; + struct mbox *mbox = dev->mbox; + + aq = mbox_alloc_msg_nix_cn10k_aq_enq(mbox); + if (!aq) + return -ENOSPC; + + aq->qidx = rq->qid; + aq->ctype = NIX_AQ_CTYPE_RQ; + aq->op = NIX_AQ_INSTOP_WRITE; + + if (rq->red_pass && (rq->red_pass >= rq->red_drop)) { + aq->rq.lpb_pool_pass = rq->red_pass; + aq->rq.lpb_pool_drop = rq->red_drop; + aq->rq_mask.lpb_pool_pass = ~(aq->rq_mask.lpb_pool_pass); + aq->rq_mask.lpb_pool_drop = ~(aq->rq_mask.lpb_pool_drop); + + } + + if (rq->spb_red_pass && (rq->spb_red_pass >= rq->spb_red_drop)) { + aq->rq.spb_pool_pass = rq->spb_red_pass; + aq->rq.spb_pool_drop = rq->spb_red_drop; + aq->rq_mask.spb_pool_pass = ~(aq->rq_mask.spb_pool_pass); + aq->rq_mask.spb_pool_drop = ~(aq->rq_mask.spb_pool_drop); + + } + + if (rq->xqe_red_pass && (rq->xqe_red_pass >= rq->xqe_red_drop)) { + aq->rq.xqe_pass = rq->xqe_red_pass; + aq->rq.xqe_drop = rq->xqe_red_drop; + aq->rq_mask.xqe_drop = ~(aq->rq_mask.xqe_drop); + aq->rq_mask.xqe_pass = ~(aq->rq_mask.xqe_pass); + } + + return mbox_process(mbox); +} + int roc_nix_rq_init(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) { @@ -616,6 +696,32 @@ roc_nix_rq_modify(struct roc_nix *roc_nix, struct roc_nix_rq *rq, bool ena) return nix_tel_node_add_rq(rq); } +int +roc_nix_rq_cman_config(struct roc_nix *roc_nix, struct roc_nix_rq *rq) +{ + bool is_cn9k = roc_model_is_cn9k(); + struct nix *nix; + struct dev *dev; + int rc; + + if (roc_nix == NULL || rq == NULL) + return NIX_ERR_PARAM; + + nix = roc_nix_to_nix_priv(roc_nix); + + if (rq->qid >= nix->nb_rx_queues) + return NIX_ERR_QUEUE_INVALID_RANGE; + + dev = &nix->dev; + + if (is_cn9k) + rc = nix_rq_cn9k_cman_cfg(dev, rq); + else + rc = nix_rq_cman_cfg(dev, rq); + + return rc; +} + int roc_nix_rq_fini(struct roc_nix_rq *rq) { diff --git a/drivers/common/cnxk/version.map b/drivers/common/cnxk/version.map index 276fec3660..e935f17c28 100644 --- a/drivers/common/cnxk/version.map +++ b/drivers/common/cnxk/version.map @@ -228,6 +228,7 @@ INTERNAL { roc_nix_reassembly_configure; roc_nix_register_cq_irqs; roc_nix_register_queue_irqs; + roc_nix_rq_cman_config; roc_nix_rq_dump; roc_nix_rq_ena_dis; roc_nix_rq_fini; -- 2.25.1