Update grinder schedule function to allow configuration
flexiblity for pipe traffic classes and queues, and subport
level configuration of the pipe parameters.

Signed-off-by: Jasvinder Singh <jasvinder.si...@intel.com>
Signed-off-by: Abraham Tovar <abrahamx.to...@intel.com>
Signed-off-by: Lukasz Krakowiak <lukaszx.krakow...@intel.com>
---
 lib/librte_sched/rte_sched.c | 82 ++++++++++++++++++++++--------------
 1 file changed, 51 insertions(+), 31 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 607fe6c18..f468827f4 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -2096,14 +2096,14 @@ grinder_credits_update(struct rte_sched_port *port,
 #ifndef RTE_SCHED_SUBPORT_TC_OV
 
 static inline int
-grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
+grinder_credits_check(struct rte_sched_subport *subport,
+       uint32_t pos, uint32_t frame_overhead)
 {
-       struct rte_sched_grinder *grinder = port->grinder + pos;
-       struct rte_sched_subport *subport = grinder->subport;
+       struct rte_sched_grinder *grinder = subport->grinder + pos;
        struct rte_sched_pipe *pipe = grinder->pipe;
        struct rte_mbuf *pkt = grinder->pkt;
        uint32_t tc_index = grinder->tc_index;
-       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+       uint32_t pkt_len = pkt->pkt_len + frame_overhead;
        uint32_t subport_tb_credits = subport->tb_credits;
        uint32_t subport_tc_credits = subport->tc_credits[tc_index];
        uint32_t pipe_tb_credits = pipe->tb_credits;
@@ -2119,7 +2119,7 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
        if (!enough_credits)
                return 0;
 
-       /* Update port credits */
+       /* Update subport credits */
        subport->tb_credits -= pkt_len;
        subport->tc_credits[tc_index] -= pkt_len;
        pipe->tb_credits -= pkt_len;
@@ -2131,23 +2131,30 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
 #else
 
 static inline int
-grinder_credits_check(struct rte_sched_port *port, uint32_t pos)
+grinder_credits_check(struct rte_sched_subport *subport,
+       uint32_t pos, uint32_t frame_overhead)
 {
-       struct rte_sched_grinder *grinder = port->grinder + pos;
-       struct rte_sched_subport *subport = grinder->subport;
+       struct rte_sched_grinder *grinder = subport->grinder + pos;
        struct rte_sched_pipe *pipe = grinder->pipe;
        struct rte_mbuf *pkt = grinder->pkt;
        uint32_t tc_index = grinder->tc_index;
-       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+       uint32_t pkt_len = pkt->pkt_len + frame_overhead;
        uint32_t subport_tb_credits = subport->tb_credits;
        uint32_t subport_tc_credits = subport->tc_credits[tc_index];
        uint32_t pipe_tb_credits = pipe->tb_credits;
        uint32_t pipe_tc_credits = pipe->tc_credits[tc_index];
-       uint32_t pipe_tc_ov_mask1[] = {UINT32_MAX, UINT32_MAX, UINT32_MAX, 
pipe->tc_ov_credits};
-       uint32_t pipe_tc_ov_mask2[] = {0, 0, 0, UINT32_MAX};
-       uint32_t pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
+       uint32_t pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+       uint32_t pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {0};
+       uint32_t pipe_tc_ov_credits, i;
        int enough_credits;
 
+       for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
+               pipe_tc_ov_mask1[i] = UINT32_MAX;
+
+       pipe_tc_ov_mask1[RTE_SCHED_TRAFFIC_CLASS_BE] = pipe->tc_ov_credits;
+       pipe_tc_ov_mask2[RTE_SCHED_TRAFFIC_CLASS_BE] = UINT32_MAX;
+       pipe_tc_ov_credits = pipe_tc_ov_mask1[tc_index];
+
        /* Check pipe and subport credits */
        enough_credits = (pkt_len <= subport_tb_credits) &&
                (pkt_len <= subport_tc_credits) &&
@@ -2170,36 +2177,48 @@ grinder_credits_check(struct rte_sched_port *port, 
uint32_t pos)
 
 #endif /* RTE_SCHED_SUBPORT_TC_OV */
 
-
 static inline int
-grinder_schedule(struct rte_sched_port *port, uint32_t pos)
+grinder_schedule(struct rte_sched_port *port,
+       struct rte_sched_subport *subport, uint32_t pos)
 {
-       struct rte_sched_grinder *grinder = port->grinder + pos;
-       struct rte_sched_queue *queue = grinder->queue[grinder->qpos];
+       struct rte_sched_grinder *grinder = subport->grinder + pos;
        struct rte_mbuf *pkt = grinder->pkt;
-       uint32_t pkt_len = pkt->pkt_len + port->frame_overhead;
+       struct rte_sched_queue *queue;
+       uint32_t frame_overhead = port->frame_overhead;
+       uint32_t qpos, pkt_len;
+       int be_tc_active;
 
-       if (!grinder_credits_check(port, pos))
+       if (!grinder_credits_check(subport, pos, frame_overhead))
                return 0;
 
+       pkt_len = pkt->pkt_len + frame_overhead;
+       qpos = grinder->qpos;
+       queue = grinder->queue[qpos];
+
        /* Advance port time */
        port->time += pkt_len;
 
        /* Send packet */
        port->pkts_out[port->n_pkts_out++] = pkt;
        queue->qr++;
-       grinder->wrr_tokens[grinder->qpos] += pkt_len * 
grinder->wrr_cost[grinder->qpos];
+
+       be_tc_active = (grinder->tc_index == RTE_SCHED_TRAFFIC_CLASS_BE);
+       grinder->wrr_tokens[qpos] +=
+               pkt_len * grinder->wrr_cost[qpos] * be_tc_active;
+
        if (queue->qr == queue->qw) {
-               uint32_t qindex = grinder->qindex[grinder->qpos];
+               uint32_t qindex = grinder->qindex[qpos];
+
+               rte_bitmap_clear(subport->bmp, qindex);
+               grinder->qmask &= ~(1 << qpos);
+               if (be_tc_active)
+                       grinder->wrr_mask[qpos] = 0;
 
-               rte_bitmap_clear(port->bmp, qindex);
-               grinder->qmask &= ~(1 << grinder->qpos);
-               grinder->wrr_mask[grinder->qpos] = 0;
-               rte_sched_port_set_queue_empty_timestamp(port, port->subport, 
qindex);
+               rte_sched_port_set_queue_empty_timestamp(port, subport, qindex);
        }
 
        /* Reset pipe loop detection */
-       port->pipe_loop = RTE_SCHED_PIPE_INVALID;
+       subport->pipe_loop = RTE_SCHED_PIPE_INVALID;
        grinder->productive = 1;
 
        return 1;
@@ -2585,14 +2604,15 @@ grinder_prefetch_mbuf(struct rte_sched_subport 
*subport, uint32_t pos)
 static inline uint32_t
 grinder_handle(struct rte_sched_port *port, uint32_t pos)
 {
-       struct rte_sched_grinder *grinder = port->grinder + pos;
+       struct rte_sched_subport *subport = port->subport;
+       struct rte_sched_grinder *grinder = subport->grinder + pos;
 
        switch (grinder->state) {
        case e_GRINDER_PREFETCH_PIPE:
        {
-               if (grinder_next_pipe(port->subport, pos)) {
-                       grinder_prefetch_pipe(port->subport, pos);
-                       port->busy_grinders++;
+               if (grinder_next_pipe(subport, pos)) {
+                       grinder_prefetch_pipe(subport, pos);
+                       subport->busy_grinders++;
 
                        grinder->state = e_GRINDER_PREFETCH_TC_QUEUE_ARRAYS;
                        return 0;
@@ -2615,7 +2635,7 @@ grinder_handle(struct rte_sched_port *port, uint32_t pos)
 
        case e_GRINDER_PREFETCH_MBUF:
        {
-               grinder_prefetch_mbuf(port->subport, pos);
+               grinder_prefetch_mbuf(subport, pos);
 
                grinder->state = e_GRINDER_READ_MBUF;
                return 0;
@@ -2625,7 +2645,7 @@ grinder_handle(struct rte_sched_port *port, uint32_t pos)
        {
                uint32_t result = 0;
 
-               result = grinder_schedule(port, pos);
+               result = grinder_schedule(port, subport, pos);
 
                /* Look for next packet within the same TC */
                if (result && grinder->qmask) {
-- 
2.21.0

Reply via email to