From: Alan Dewar <alan.de...@att.com>

Move the WRED queue configuration parameters from rte_sched_port_params
into rte_sched_subport_params so that we can have different WRED
configuations on each subport.

Updated sched unit-test to exercise new functionality.

Signed-off-by: Alan Dewar <alan.de...@att.com>
---
 lib/librte_sched/rte_sched.c |  54 +++++-
 lib/librte_sched/rte_sched.h |   6 +-
 test/test/test_sched.c       | 402 +++++++++++++++++++++++++++++++++----------
 3 files changed, 363 insertions(+), 99 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 9436ba5..087d7fc 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -77,6 +77,11 @@ struct rte_sched_subport {
        uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
        uint32_t qsize_sum;
        uint32_t qoffset;
+
+#ifdef RTE_SCHED_RED
+       struct rte_red_config red_config[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
+               [e_RTE_METER_COLORS];
+#endif
 };
 
 struct rte_sched_pipe_profile {
@@ -857,7 +862,9 @@ static int
 rte_sched_subport_config_common(struct rte_sched_port *port,
                                uint32_t subport_id,
                                struct rte_sched_subport_params *params,
-                               uint16_t *qsize)
+                               uint16_t *qsize,
+                               struct rte_red_params red_params[]
+                                       [e_RTE_METER_COLORS])
 {
        struct rte_sched_subport *s;
        uint32_t i;
@@ -909,6 +916,38 @@ rte_sched_subport_config_common(struct rte_sched_port 
*port,
                                                     params->tc_rate[i]);
                s->tc_credits[i] = s->tc_credits_per_period[i];
        }
+
+#ifdef RTE_SCHED_RED
+       for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+               uint32_t j;
+
+               if (!red_params) {
+                       /* Copy the red configuration from port */
+                       for (j = 0; j < e_RTE_METER_COLORS; j++)
+                               s->red_config[i][j] = port->red_config[i][j];
+               } else {
+                       /* Subport has an individual red configuration */
+                       for (j = 0; j < e_RTE_METER_COLORS; j++) {
+                               /* if min/max are both zero, then RED is
+                                * disabled
+                                */
+                               if ((red_params[i][j].min_th |
+                                    red_params[i][j].max_th) == 0) {
+                                       continue;
+                               }
+
+                               if (rte_red_config_init(&s->red_config[i][j],
+                                       red_params[i][j].wq_log2,
+                                       red_params[i][j].min_th,
+                                       red_params[i][j].max_th,
+                                       red_params[i][j].maxp_inv) != 0) {
+                                       return -6;
+                               }
+                       }
+               }
+       }
+#endif
+
 #ifdef RTE_SCHED_SUBPORT_TC_OV
        /* TC oversubscription */
        s->tc_ov_wm_min = port->mtu;
@@ -932,17 +971,20 @@ rte_sched_subport_config(struct rte_sched_port *port,
                         uint32_t subport_id,
                         struct rte_sched_subport_params *params)
 {
-       return rte_sched_subport_config_common(port, subport_id, params, NULL);
+       return rte_sched_subport_config_common(port, subport_id, params, NULL,
+                                              NULL);
 }
 
 int
 rte_sched_subport_config_v2(struct rte_sched_port *port,
                            uint32_t subport_id,
                            struct rte_sched_subport_params *params,
-                           uint16_t *qsize)
+                           uint16_t *qsize,
+                           struct rte_red_params red_params[]
+                               [e_RTE_METER_COLORS])
 {
        return rte_sched_subport_config_common(port, subport_id, params,
-                                              qsize);
+                                              qsize, red_params);
 }
 
 int
@@ -1236,6 +1278,8 @@ rte_sched_port_update_queue_stats_on_drop(struct 
rte_sched_port *port,
 static inline int
 rte_sched_port_red_drop(struct rte_sched_port *port, struct rte_mbuf *pkt, 
uint32_t qindex, uint16_t qlen)
 {
+       struct rte_sched_subport *subport = port->subport +
+               (qindex / rte_sched_port_queues_per_subport(port));
        struct rte_sched_queue_extra *qe;
        struct rte_red_config *red_cfg;
        struct rte_red *red;
@@ -1244,7 +1288,7 @@ rte_sched_port_red_drop(struct rte_sched_port *port, 
struct rte_mbuf *pkt, uint3
 
        tc_index = (qindex >> 2) & 0x3;
        color = rte_sched_port_pkt_read_color(pkt);
-       red_cfg = &port->red_config[tc_index][color];
+       red_cfg = &subport->red_config[tc_index][color];
 
        if ((red_cfg->min_th | red_cfg->max_th) == 0)
                return 0;
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 1e1d618..fe41ae4 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -275,6 +275,8 @@ rte_sched_subport_config(struct rte_sched_port *port,
  *   Subport configuration parameters
  * @param qsize
  *   Array of traffic-class maximum queue-lengths
+ * @param red_params
+ *   Subport WRED queue configuration parameters
  * @return
  *   0 upon success, error code otherwise
  */
@@ -282,7 +284,9 @@ int
 rte_sched_subport_config_v2(struct rte_sched_port *port,
                            uint32_t subport_id,
                            struct rte_sched_subport_params *params,
-                           uint16_t *qsize);
+                           uint16_t *qsize,
+                           struct rte_red_params red_params[]
+                               [e_RTE_METER_COLORS]);
 
 /**
  * Hierarchical scheduler pipe configuration
diff --git a/test/test/test_sched.c b/test/test/test_sched.c
index 2b22ebe..bc25d34 100644
--- a/test/test/test_sched.c
+++ b/test/test/test_sched.c
@@ -104,6 +104,32 @@ prepare_pkt(struct rte_mbuf *mbuf, uint32_t subport, 
uint32_t pipe, uint32_t tc,
        mbuf->data_len = 60;
 }
 
+static int
+pkt_check(struct rte_mbuf **mbufs, uint32_t nb_pkts, uint32_t in_subport,
+         uint32_t in_pipe, uint32_t in_tc, uint32_t in_queue)
+{
+       uint32_t i;
+
+       for (i = 0; i < nb_pkts; i++) {
+               enum rte_meter_color color;
+               uint32_t out_subport, out_pipe, out_tc, out_queue;
+
+               color = rte_sched_port_pkt_read_color(mbufs[i]);
+               TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+
+               rte_sched_port_pkt_read_tree_path(mbufs[i], &out_subport,
+                                                 &out_pipe, &out_tc,
+                                                 &out_queue);
+
+               TEST_ASSERT_EQUAL(in_subport, out_subport, "Wrong subport\n");
+               TEST_ASSERT_EQUAL(in_pipe, out_pipe, "Wrong pipe\n");
+               TEST_ASSERT_EQUAL(in_tc, out_tc, "Wrong traffic_class\n");
+               TEST_ASSERT_EQUAL(in_queue, out_queue, "Wrong queue\n");
+               rte_pktmbuf_free(mbufs[i]);
+       }
+
+       return 0;
+}
 
 /**
  * test main entrance for library sched
@@ -143,41 +169,33 @@ test_sched(void)
                prepare_pkt(in_mbufs[i], SUBPORT, PIPE, TC, QUEUE);
        }
 
-
        err = rte_sched_port_enqueue(port, in_mbufs, 10);
        TEST_ASSERT_EQUAL(err, 10, "Wrong enqueue, err=%d\n", err);
 
        err = rte_sched_port_dequeue(port, out_mbufs, 10);
        TEST_ASSERT_EQUAL(err, 10, "Wrong dequeue, err=%d\n", err);
 
-       for (i = 0; i < 10; i++) {
-               enum rte_meter_color color;
-               uint32_t subport, traffic_class, queue;
-
-               color = rte_sched_port_pkt_read_color(out_mbufs[i]);
-               TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
-
-               rte_sched_port_pkt_read_tree_path(out_mbufs[i],
-                               &subport, &pipe, &traffic_class, &queue);
-
-               TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");
-               TEST_ASSERT_EQUAL(pipe, PIPE, "Wrong pipe\n");
-               TEST_ASSERT_EQUAL(traffic_class, TC, "Wrong traffic_class\n");
-               TEST_ASSERT_EQUAL(queue, QUEUE, "Wrong queue\n");
-
-       }
-
+       err = pkt_check(out_mbufs, err, SUBPORT, PIPE, TC, QUEUE);
+       TEST_ASSERT_SUCCESS(err, "Packet checking failed\n");
 
        struct rte_sched_subport_stats subport_stats;
        uint32_t tc_ov;
        rte_sched_subport_read_stats(port, SUBPORT, &subport_stats, &tc_ov);
-#if 0
-       TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc[TC-1], 10, "Wrong subport 
stats\n");
+#ifdef RTE_SCHED_COLLECT_STATS
+       TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc[TC], 10,
+                         "Wrong subport stats\n");
 #endif
        struct rte_sched_queue_stats queue_stats;
        uint16_t qlen;
        rte_sched_queue_read_stats(port, QUEUE, &queue_stats, &qlen);
 #if 0
+       /*
+        * This assert fails because the wrong queue_id is passed into
+        * rte_sched_queue_read_stats.  To calculate the correct queue_id
+        * we really need to call rte_sched_port_qindex passing in port,
+        * subport, pipe, traffic-class and queue-number.  Unfortunately
+        * rte_sched_port_qindex is a static function.
+        */
        TEST_ASSERT_EQUAL(queue_stats.n_pkts, 10, "Wrong queue stats\n");
 #endif
 
@@ -222,7 +240,7 @@ static struct rte_sched_pipe_params pipe_profile_v2[] = {
        },
 };
 
-static uint16_t subport_qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {
+static uint16_t config_subport_qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {
        { 16, 32, 64, 128 },
        { 256, 512, 1024, 2048 },
 };
@@ -239,6 +257,64 @@ static struct rte_sched_port_params port_param_v2 = {
        .n_pipe_profiles = 1,
 };
 
+/*
+ * Note that currently all the packets are coloured yellow.
+ */
+struct rte_red_params subport_0_redparams[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
+                                               [e_RTE_METER_COLORS] = {
+       { /* TC-0 queue-size 16 */
+               /* min_th  max_th  maxp_inv  wq_log2 */
+               {       0,      0,        0,       0 }, /* Green */
+               {       1,     15,      255,      12 }, /* Yellow */
+               {       1,     15,        1,       1 }  /* Red */
+       },
+       { /* TC-1 queue-size 32 */
+               {      24,     31,        1,       1 }, /* Green */
+               {      16,     31,       10,       1 }, /* Yellow */
+               {       8,     31,      100,       1 }  /* Red */
+       },
+       { /* TC-2 queue-size 64 */
+               {      32,     63,        1,       1 }, /* Green */
+               {      16,     31,        1,       1 }, /* Yellow */
+               {       8,     15,        1,       1 }  /* Red */
+       },
+       { /* TC-3 queue-size 128 */
+               {      64,    127,      255,      12 }, /* Green */
+               {      32,     63,      255,      12 }, /* Yellow */
+               {      16,     31,      255,      12 }  /* Red */
+       }
+};
+
+struct rte_red_params subport_1_redparams[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE]
+                                               [e_RTE_METER_COLORS] = {
+       { /* TC-0 queue-size 256 */
+               /* min_th  max_th  maxp_inv  wq_log2 */
+               {       0,      0,        0,       0 }, /* Green */
+               {     128,    255,      100,       1 }, /* Yellow */
+               {     128,    255,        2,      10 }  /* Red */
+       },
+       { /* TC-1 queue-size 512 */
+               {     256,    511,        2,       1 }, /* Green */
+               {     128,    511,       20,       1 }, /* Yellow */
+               {      64,    511,      200,       1 }  /* Red */
+       },
+       { /* TC-2 queue-size 1024 */
+               {     512,   1023,        6,       4 }, /* Green */
+               {     256,   1023,        6,       4 }, /* Yellow */
+               {     128,   1023,        6,       4 }  /* Red */
+       },
+       { /* TC-3 queue-size 2048 - RTE_RED_MAX_TH_MAX = 1023 */
+               {    1022,   1023,      128,       9 }, /* Green */
+               {     512,   1023,       64,       6 }, /* Yellow */
+               {     256,   1023,       32,       3 }  /* Red */
+       }
+};
+
+struct rte_red_params *config_subport_redparams[] = {
+       &subport_0_redparams[0][0],
+       &subport_1_redparams[0][0]
+};
+
 static uint32_t subport_total_qsize(struct rte_sched_port_params *pp,
                                    uint16_t *qsize)
 {
@@ -252,14 +328,81 @@ static uint32_t subport_total_qsize(struct 
rte_sched_port_params *pp,
                pp->n_pipes_per_subport * sizeof(struct rte_mbuf *));
 }
 
-static int fill_queue_to_drop(struct rte_mempool *mp,
-                             struct rte_sched_port *port,
-                             uint32_t in_subport, uint32_t in_pipe,
-                             uint32_t in_tc, uint16_t qsize)
+static int
+test_dequeue_pkts(struct rte_sched_port *port, struct rte_mbuf **mbufs,
+                 uint16_t nb_pkts)
+{
+       uint16_t total_dequeued;
+       int err;
+
+       total_dequeued = 0;
+       err = 1;
+
+       /*
+        * With small queues we should be able to dequeue a full queue's worth
+        * of packets with a single call to rte_sched_port_dequeue.  With
+        * larger queues we will probably need to make multiple calls as we
+        * could run out of credit to dequeue all the packet in one attempt.
+        */
+       while (total_dequeued < nb_pkts && err != 0) {
+               err = rte_sched_port_dequeue(port, mbufs, nb_pkts);
+               total_dequeued += err;
+       }
+       return total_dequeued;
+}
+
+static int
+test_sched_v2_setup(uint16_t qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE],
+                   struct rte_red_params **subport_redparams,
+                   struct rte_sched_port **port)
+{
+       uint32_t queue_array_size;
+       uint32_t subport;
+       uint32_t pipe;
+       int err;
+
+       queue_array_size = 0;
+       for (subport = 0; subport < NB_SUBPORTS; subport++)
+               queue_array_size +=
+                       subport_total_qsize(&port_param_v2, &qsize[subport][0]);
+
+       *port = rte_sched_port_config_v2(&port_param_v2,
+                                        queue_array_size);
+       TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
+
+       for (subport = 0; subport < NB_SUBPORTS; subport++) {
+               void *redparams = NULL;
+
+               if (subport_redparams)
+                       redparams = subport_redparams[subport];
+
+               err = rte_sched_subport_config_v2(*port, subport,
+                                                 &subport_param_v2[subport],
+                                                 &qsize[subport][0],
+                                                 redparams);
+               TEST_ASSERT_SUCCESS(err,
+                                   "Error config sched subport %u, err=%d\n",
+                                   subport, err);
+               for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport;
+                    pipe++) {
+                       err = rte_sched_pipe_config(*port, subport, pipe, 0);
+                       TEST_ASSERT_SUCCESS(err,
+                                           "Error config sched subport %u "
+                                           "pipe %u, err=%d\n",
+                                           subport, pipe, err);
+               }
+       }
+       return 0;
+}
+
+static int
+test_queue_size_drop(struct rte_mempool *mp, struct rte_sched_port *port,
+                    uint32_t subport, uint32_t pipe, uint32_t tc,
+                    uint16_t qsize)
 {
        struct rte_mbuf **in_mbufs;
        struct rte_mbuf **out_mbufs;
-       uint32_t in_queue = 0;
+       uint32_t queue = 0;
        uint32_t i;
        int err;
 
@@ -279,7 +422,7 @@ static int fill_queue_to_drop(struct rte_mempool *mp,
        for (i = 0; i <= qsize; i++) {
                in_mbufs[i] = rte_pktmbuf_alloc(mp);
                TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
-               prepare_pkt(in_mbufs[i], in_subport, in_pipe, in_tc, in_queue);
+               prepare_pkt(in_mbufs[i], subport, pipe, tc, queue);
        }
 
        /*
@@ -296,53 +439,117 @@ static int fill_queue_to_drop(struct rte_mempool *mp,
        in_mbufs[qsize] = NULL;
 
        /*
-        * With small queues we should be able to dequeue a full queue's worth
-        * of packets with a single call to rte_sched_port_dequeue.  With
-        * larger queues we will probably need to make multiple calls as we
-        * could run out of credit to dequeue all the packet in one attempt.
+        * Dequeue all the packets off the queue.
         */
-       i = 0;
-       err = 1;
-       while (i < qsize && err != 0) {
-               err = rte_sched_port_dequeue(port, out_mbufs, qsize);
-               i += err;
-       }
-       TEST_ASSERT_EQUAL(i, qsize,
-                         "Wrong dequeue, err=%d, i: %u, qsize: %u\n",
-                         err, i, qsize);
+       i = test_dequeue_pkts(port, out_mbufs, qsize);
+       TEST_ASSERT_EQUAL(i, qsize, "Failed to dequeue all pkts\n");
 
        /*
-        * Check that all the dequeued packets have to right numbers in them.
+        * Check that all the dequeued packets have the right numbers in them.
         */
-       for (i = 0; i < qsize; i++) {
-               enum rte_meter_color color;
-               uint32_t out_subport, out_pipe, out_tc, out_queue;
+       err = pkt_check(out_mbufs, qsize, subport, pipe, tc, queue);
+       TEST_ASSERT_SUCCESS(err, "Packet checking failed\n");
 
-               color = rte_sched_port_pkt_read_color(out_mbufs[i]);
-               TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+#ifdef RTE_SCHED_COLLECT_STATS
+       struct rte_sched_subport_stats subport_stats;
+       uint32_t tc_ov;
 
-               rte_sched_port_pkt_read_tree_path(out_mbufs[i],
-                               &out_subport, &out_pipe, &out_tc, &out_queue);
+       /*
+        * Did the subport stats see a packet dropped in this traffic-class?
+        */
+       rte_sched_subport_read_stats(port, subport, &subport_stats, &tc_ov);
+       TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc_dropped[tc], 1,
+                         "Wrong subport stats\n");
+#endif
 
-               TEST_ASSERT_EQUAL(in_subport, out_subport, "Wrong subport\n");
-               TEST_ASSERT_EQUAL(in_pipe, out_pipe, "Wrong pipe\n");
-               TEST_ASSERT_EQUAL(in_tc, out_tc, "Wrong traffic_class\n");
-               TEST_ASSERT_EQUAL(in_queue, out_queue, "Wrong queue\n");
-               rte_pktmbuf_free(out_mbufs[i]);
+       rte_free(in_mbufs);
+       rte_free(out_mbufs);
+
+       return 0;
+}
+
+static int
+test_queue_size(struct rte_mempool *mp, struct rte_sched_port *port,
+               uint32_t subport)
+{
+       uint32_t pipe;
+       uint32_t tc;
+       int err;
+
+       for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport; pipe++) {
+               for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+                       err = test_queue_size_drop(mp, port, subport, pipe, tc,
+                                       config_subport_qsize[subport][tc]);
+                       TEST_ASSERT_SUCCESS(err, "test_queue_size_drop "
+                                           "failed\n");
+               }
+       }
+       return 0;
+}
+
+static int test_red(struct rte_mempool *mp, struct rte_sched_port *port,
+                   uint32_t subport, uint32_t pipe, uint32_t tc,
+                   uint16_t qsize)
+{
+       struct rte_mbuf **in_mbufs;
+       struct rte_mbuf **out_mbufs;
+       uint32_t queue = 0;
+       uint32_t i;
+       int err;
+       int queued;
+
+       in_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+                             RTE_CACHE_LINE_SIZE);
+       TEST_ASSERT_NOT_NULL(in_mbufs, "Buffer array allocation failed\n");
+
+       out_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+                              RTE_CACHE_LINE_SIZE);
+       TEST_ASSERT_NOT_NULL(out_mbufs, "Buffer array allocation failed\n");
+
+       /*
+        * Allocate qsize buffers so that we can attemp to completely fill the
+        * queue, then check the subport stats to see if any packets were
+        * red-dropped.
+        */
+       for (i = 0; i < qsize; i++) {
+               in_mbufs[i] = rte_pktmbuf_alloc(mp);
+               TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
+               prepare_pkt(in_mbufs[i], subport, pipe, tc, queue);
        }
 
+       /*
+        * Some of these packets might not get queued correctly due to
+        * red-drops.
+        */
+       queued = rte_sched_port_enqueue(port, in_mbufs, qsize);
+
 #ifdef RTE_SCHED_COLLECT_STATS
        struct rte_sched_subport_stats subport_stats;
        uint32_t tc_ov;
+       uint32_t red_drops;
 
        /*
-        * Did the subport stats see a packet dropped in this traffic-class?
+        * Did the subport stats see any packets red-dropped in this
+        * traffic-class?
         */
-       rte_sched_subport_read_stats(port, in_subport, &subport_stats, &tc_ov);
-       TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc_dropped[in_tc], 1,
-                         "Wrong subport stats\n");
+       rte_sched_subport_read_stats(port, subport, &subport_stats, &tc_ov);
+       red_drops = subport_stats.n_pkts_red_dropped[tc];
+       TEST_ASSERT_EQUAL((qsize - red_drops), (uint32_t)queued,
+                         "Red-drop count doesn't agree queued count\n");
 #endif
 
+       /*
+        * Dequeue all the packets off the queue.
+        */
+       i = test_dequeue_pkts(port, out_mbufs, queued);
+       TEST_ASSERT_EQUAL(i, (uint32_t)queued, "Failed to dequeue all pkts\n");
+
+       /*
+        * Check that all the dequeued packets have the right numbers in them.
+        */
+       err = pkt_check(out_mbufs, queued, subport, pipe, tc, queue);
+       TEST_ASSERT_SUCCESS(err, "Packet checking failed\n");
+
        rte_free(in_mbufs);
        rte_free(out_mbufs);
 
@@ -350,8 +557,8 @@ static int fill_queue_to_drop(struct rte_mempool *mp,
 }
 
 static int
-subport_fill_queues(struct rte_mempool *mp, struct rte_sched_port *port,
-                   uint32_t subport)
+test_red_queues(struct rte_mempool *mp, struct rte_sched_port *port,
+               uint32_t subport)
 {
        uint32_t pipe;
        uint32_t tc;
@@ -359,27 +566,24 @@ subport_fill_queues(struct rte_mempool *mp, struct 
rte_sched_port *port,
 
        for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport; pipe++) {
                for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
-                       err = fill_queue_to_drop(mp, port, subport, pipe, tc,
-                                                subport_qsize[subport][tc]);
-                       TEST_ASSERT_SUCCESS(err, "fill-queue-to-drop failed, "
-                                         "err=%d\n", err);
+                       err = test_red(mp, port, subport, pipe, tc,
+                                      config_subport_qsize[subport][tc]);
+                       TEST_ASSERT_SUCCESS(err, "test_red failed\n");
                }
        }
        return 0;
 }
 
 /**
- * test main entrance for library sched using the v2 APIs that
- * allow queue-size and WRED configurations on a per-subport basis.
+ * test main entrance for library sched using the v2 APIs that allow
+ * queue-size on a per-subport basis.
  */
 static int
-test_sched_v2(void)
+test_sched_v2_qsize(void)
 {
        struct rte_mempool *mp = NULL;
        struct rte_sched_port *port = NULL;
        uint32_t subport;
-       uint32_t pipe;
-       uint32_t queue_array_size;
        int err;
 
        rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
@@ -390,35 +594,45 @@ test_sched_v2(void)
        port_param_v2.socket = 0;
        port_param_v2.rate = (uint64_t) 10000 * 1000 * 1000 / 8;
 
-       queue_array_size = 0;
-       for (subport = 0; subport < NB_SUBPORTS; subport++)
-               queue_array_size +=
-                       subport_total_qsize(&port_param_v2,
-                                           &subport_qsize[subport][0]);
-
-       port = rte_sched_port_config_v2(&port_param_v2,
-                                       queue_array_size);
-       TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
+       err = test_sched_v2_setup(config_subport_qsize, NULL, &port);
+       TEST_ASSERT_SUCCESS(err, "test_sched_v2_setup failed\n");
 
        for (subport = 0; subport < NB_SUBPORTS; subport++) {
-               err = rte_sched_subport_config_v2(port, subport,
-                                                 &subport_param_v2[subport],
-                                                 &subport_qsize[subport][0]);
-               TEST_ASSERT_SUCCESS(err,
-                                   "Error config sched subport %u, err=%d\n",
-                                   subport, err);
-               for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport;
-                    pipe++) {
-                       err = rte_sched_pipe_config(port, subport, pipe, 0);
-                       TEST_ASSERT_SUCCESS(err,
-                                           "Error config sched subport %u "
-                                           "pipe %u, err=%d\n",
-                                           subport, pipe, err);
-               }
+               err = test_queue_size(mp, port, subport);
+               TEST_ASSERT_SUCCESS(err, "test_queue_size failed\n");
        }
 
+       rte_sched_port_free(port);
+
+       return 0;
+}
+
+/**
+ * test main entrance for library sched using the v2 APIs that allow WRED
+ * configurations on a per-subport basis.
+ */
+static int
+test_sched_v2_red(void)
+{
+       struct rte_mempool *mp = NULL;
+       struct rte_sched_port *port = NULL;
+       uint32_t subport;
+       int err;
+
+       rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
+
+       mp = create_mempool();
+       TEST_ASSERT_NOT_NULL(mp, "Error creating mempool\n");
+
+       port_param_v2.socket = 0;
+       port_param_v2.rate = (uint64_t) 10000 * 1000 * 1000 / 8;
+
+       err = test_sched_v2_setup(config_subport_qsize,
+                                 config_subport_redparams, &port);
+       TEST_ASSERT_SUCCESS(err, "Test setup failed\n");
+
        for (subport = 0; subport < NB_SUBPORTS; subport++) {
-               err = subport_fill_queues(mp, port, subport);
+               err = test_red_queues(mp, port, subport);
                TEST_ASSERT_SUCCESS(err, "subport-fill-queue failed, err=%d\n",
                                    err);
        }
@@ -428,4 +642,6 @@ test_sched_v2(void)
        return 0;
 }
 
-REGISTER_TEST_COMMAND(sched_autotest_v2, test_sched_v2);
+
+REGISTER_TEST_COMMAND(sched_autotest_v2_qsize, test_sched_v2_qsize);
+REGISTER_TEST_COMMAND(sched_autotest_v2_red, test_sched_v2_red);
-- 
2.7.4

Reply via email to