From: Alan Dewar <alan.de...@att.com>

Added new APIs to allow the maximum queue sizes for each traffic class
to be configured on a per-subport basis, rather than all subport's
inheriting their maximum queue sizes from their parent port.

Added new sched unit-test to exercise the new APIs.

Signed-off-by: Alan Dewar <alan.de...@att.com>
---
 lib/librte_sched/rte_sched.c           | 243 +++++++++++++++++++++---------
 lib/librte_sched/rte_sched.h           |  48 ++++++
 lib/librte_sched/rte_sched_version.map |   8 +
 test/test/test_sched.c                 | 260 ++++++++++++++++++++++++++++++++-
 4 files changed, 485 insertions(+), 74 deletions(-)

diff --git a/lib/librte_sched/rte_sched.c b/lib/librte_sched/rte_sched.c
index 634486c..9436ba5 100644
--- a/lib/librte_sched/rte_sched.c
+++ b/lib/librte_sched/rte_sched.c
@@ -58,6 +58,7 @@ struct rte_sched_subport {
        uint64_t tc_time; /* time of next update */
        uint32_t tc_credits_per_period[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
        uint32_t tc_credits[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
+       uint16_t qsize[RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE];
        uint32_t tc_period;
 
        /* TC oversubscription */
@@ -71,6 +72,11 @@ struct rte_sched_subport {
 
        /* Statistics */
        struct rte_sched_subport_stats stats;
+
+       /* Queue base calculation */
+       uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
+       uint32_t qsize_sum;
+       uint32_t qoffset;
 };
 
 struct rte_sched_pipe_profile {
@@ -215,10 +221,6 @@ struct rte_sched_port {
        struct rte_mbuf **pkts_out;
        uint32_t n_pkts_out;
 
-       /* Queue base calculation */
-       uint32_t qsize_add[RTE_SCHED_QUEUES_PER_PIPE];
-       uint32_t qsize_sum;
-
        /* Large data structures */
        struct rte_sched_subport *subport;
        struct rte_sched_pipe *pipe;
@@ -241,16 +243,12 @@ enum rte_sched_port_array {
        e_RTE_SCHED_PORT_ARRAY_TOTAL,
 };
 
-#ifdef RTE_SCHED_COLLECT_STATS
-
 static inline uint32_t
 rte_sched_port_queues_per_subport(struct rte_sched_port *port)
 {
        return RTE_SCHED_QUEUES_PER_PIPE * port->n_pipes_per_subport;
 }
 
-#endif
-
 static inline uint32_t
 rte_sched_port_queues_per_port(struct rte_sched_port *port)
 {
@@ -260,19 +258,27 @@ rte_sched_port_queues_per_port(struct rte_sched_port 
*port)
 static inline struct rte_mbuf **
 rte_sched_port_qbase(struct rte_sched_port *port, uint32_t qindex)
 {
-       uint32_t pindex = qindex >> 4;
        uint32_t qpos = qindex & 0xF;
+       uint32_t subport_id = qindex / rte_sched_port_queues_per_subport(port);
+       struct rte_sched_subport *subport = port->subport + subport_id;
+       uint32_t subport_pipe_offset;
+
+       subport_pipe_offset = qindex % rte_sched_port_queues_per_subport(port);
+       subport_pipe_offset /= RTE_SCHED_QUEUES_PER_PIPE;
+       subport_pipe_offset *= RTE_SCHED_QUEUES_PER_PIPE;
 
-       return (port->queue_array + pindex *
-               port->qsize_sum + port->qsize_add[qpos]);
+       return (port->queue_array + subport->qoffset + subport_pipe_offset +
+               subport->qsize_add[qpos]);
 }
 
 static inline uint16_t
 rte_sched_port_qsize(struct rte_sched_port *port, uint32_t qindex)
 {
        uint32_t tc = (qindex >> 2) & 0x3;
+       uint32_t subport_id = qindex / rte_sched_port_queues_per_subport(port);
+       struct rte_sched_subport *subport = port->subport + subport_id;
 
-       return port->qsize[tc];
+       return subport->qsize[tc];
 }
 
 static int
@@ -360,7 +366,9 @@ rte_sched_port_check_params(struct rte_sched_port_params 
*params)
 }
 
 static uint32_t
-rte_sched_port_get_array_base(struct rte_sched_port_params *params, enum 
rte_sched_port_array array)
+rte_sched_port_get_array_base(struct rte_sched_port_params *params,
+                             enum rte_sched_port_array array,
+                             uint32_t size_queue_array)
 {
        uint32_t n_subports_per_port = params->n_subports_per_port;
        uint32_t n_pipes_per_subport = params->n_pipes_per_subport;
@@ -375,16 +383,19 @@ rte_sched_port_get_array_base(struct 
rte_sched_port_params *params, enum rte_sch
        uint32_t size_pipe_profiles
                = RTE_SCHED_PIPE_PROFILES_PER_PORT * sizeof(struct 
rte_sched_pipe_profile);
        uint32_t size_bmp_array = 
rte_bitmap_get_memory_footprint(n_queues_per_port);
-       uint32_t size_per_pipe_queue_array, size_queue_array;
+       uint32_t size_per_pipe_queue_array;
 
        uint32_t base, i;
 
-       size_per_pipe_queue_array = 0;
-       for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
-               size_per_pipe_queue_array += RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
-                       * params->qsize[i] * sizeof(struct rte_mbuf *);
+       if (size_queue_array == 0) {
+               size_per_pipe_queue_array = 0;
+               for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
+                       size_per_pipe_queue_array +=
+                               RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS
+                               * params->qsize[i] * sizeof(struct rte_mbuf *);
+               }
+               size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
        }
-       size_queue_array = n_pipes_per_port * size_per_pipe_queue_array;
 
        base = 0;
 
@@ -419,8 +430,9 @@ rte_sched_port_get_array_base(struct rte_sched_port_params 
*params, enum rte_sch
        return base;
 }
 
-uint32_t
-rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+static uint32_t
+rte_sched_port_get_memory_footprint_common(struct rte_sched_port_params 
*params,
+                                          uint32_t size_queue_array)
 {
        uint32_t size0, size1;
        int status;
@@ -434,39 +446,93 @@ rte_sched_port_get_memory_footprint(struct 
rte_sched_port_params *params)
        }
 
        size0 = sizeof(struct rte_sched_port);
-       size1 = rte_sched_port_get_array_base(params, 
e_RTE_SCHED_PORT_ARRAY_TOTAL);
+       size1 = rte_sched_port_get_array_base(params,
+                                             e_RTE_SCHED_PORT_ARRAY_TOTAL,
+                                             size_queue_array);
 
        return size0 + size1;
 }
 
+uint32_t
+rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params)
+{
+       return rte_sched_port_get_memory_footprint_common(params, 0);
+}
+
+uint32_t
+rte_sched_port_get_memory_footprint_v2(struct rte_sched_port_params *params,
+                                      uint32_t size_queue_array)
+{
+       return rte_sched_port_get_memory_footprint_common(params,
+                                                         size_queue_array);
+}
+
 static void
-rte_sched_port_config_qsize(struct rte_sched_port *port)
+rte_sched_subport_config_qsize(struct rte_sched_port *port,
+                              uint32_t subport_id,
+                              uint16_t *qsize)
 {
+       struct rte_sched_subport *subport = port->subport + subport_id;
+       uint32_t tc;
+
+       for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+               if (qsize == NULL)
+                       /* The subport inherits its qsizes from the port */
+                       subport->qsize[tc] = port->qsize[tc];
+               else
+                       /* The subport has explicity configured qsizes */
+                       subport->qsize[tc] = qsize[tc];
+       }
+
        /* TC 0 */
-       port->qsize_add[0] = 0;
-       port->qsize_add[1] = port->qsize_add[0] + port->qsize[0];
-       port->qsize_add[2] = port->qsize_add[1] + port->qsize[0];
-       port->qsize_add[3] = port->qsize_add[2] + port->qsize[0];
+       subport->qsize_add[0] = 0;
+       subport->qsize_add[1] = subport->qsize_add[0] + subport->qsize[0];
+       subport->qsize_add[2] = subport->qsize_add[1] + subport->qsize[0];
+       subport->qsize_add[3] = subport->qsize_add[2] + subport->qsize[0];
 
        /* TC 1 */
-       port->qsize_add[4] = port->qsize_add[3] + port->qsize[0];
-       port->qsize_add[5] = port->qsize_add[4] + port->qsize[1];
-       port->qsize_add[6] = port->qsize_add[5] + port->qsize[1];
-       port->qsize_add[7] = port->qsize_add[6] + port->qsize[1];
+       subport->qsize_add[4] = subport->qsize_add[3] + subport->qsize[0];
+       subport->qsize_add[5] = subport->qsize_add[4] + subport->qsize[1];
+       subport->qsize_add[6] = subport->qsize_add[5] + subport->qsize[1];
+       subport->qsize_add[7] = subport->qsize_add[6] + subport->qsize[1];
 
        /* TC 2 */
-       port->qsize_add[8] = port->qsize_add[7] + port->qsize[1];
-       port->qsize_add[9] = port->qsize_add[8] + port->qsize[2];
-       port->qsize_add[10] = port->qsize_add[9] + port->qsize[2];
-       port->qsize_add[11] = port->qsize_add[10] + port->qsize[2];
+       subport->qsize_add[8] = subport->qsize_add[7] + subport->qsize[1];
+       subport->qsize_add[9] = subport->qsize_add[8] + subport->qsize[2];
+       subport->qsize_add[10] = subport->qsize_add[9] + subport->qsize[2];
+       subport->qsize_add[11] = subport->qsize_add[10] + subport->qsize[2];
 
        /* TC 3 */
-       port->qsize_add[12] = port->qsize_add[11] + port->qsize[2];
-       port->qsize_add[13] = port->qsize_add[12] + port->qsize[3];
-       port->qsize_add[14] = port->qsize_add[13] + port->qsize[3];
-       port->qsize_add[15] = port->qsize_add[14] + port->qsize[3];
+       subport->qsize_add[12] = subport->qsize_add[11] + subport->qsize[2];
+       subport->qsize_add[13] = subport->qsize_add[12] + subport->qsize[3];
+       subport->qsize_add[14] = subport->qsize_add[13] + subport->qsize[3];
+       subport->qsize_add[15] = subport->qsize_add[14] + subport->qsize[3];
+
+       subport->qsize_sum = subport->qsize_add[15] + subport->qsize[3];
 
-       port->qsize_sum = port->qsize_add[15] + port->qsize[3];
+       if (subport_id != 0) {
+               struct rte_sched_subport *prev = port->subport +
+                       (subport_id - 1);
+
+               subport->qoffset = prev->qoffset + prev->qsize_sum;
+       }
+}
+
+static char *
+rte_sched_build_queue_size_string(uint16_t *qsize, char *output_str)
+{
+       uint32_t tc;
+       int str_len;
+
+       str_len = sprintf(output_str, "[");
+       for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+               str_len += sprintf(output_str + str_len, "%u",
+                                  qsize[tc]);
+               if (tc != RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE - 1)
+                       str_len += sprintf(output_str + str_len, ", ");
+       }
+       sprintf(output_str + str_len, "]");
+       return output_str;
 }
 
 static void
@@ -590,16 +656,12 @@ rte_sched_port_config_pipe_profile_table(struct 
rte_sched_port *port, struct rte
        }
 }
 
-struct rte_sched_port *
-rte_sched_port_config(struct rte_sched_port_params *params)
+static struct rte_sched_port *
+rte_sched_port_config_common(struct rte_sched_port_params *params,
+                            uint32_t mem_size)
 {
        struct rte_sched_port *port = NULL;
-       uint32_t mem_size, bmp_mem_size, n_queues_per_port, i, cycles_per_byte;
-
-       /* Check user parameters. Determine the amount of memory to allocate */
-       mem_size = rte_sched_port_get_memory_footprint(params);
-       if (mem_size == 0)
-               return NULL;
+       uint32_t bmp_mem_size, n_queues_per_port, i, cycles_per_byte;
 
        /* Allocate memory to store the data structures */
        port = rte_zmalloc("qos_params", mem_size, RTE_CACHE_LINE_SIZE);
@@ -659,30 +721,28 @@ rte_sched_port_config(struct rte_sched_port_params 
*params)
        port->pkts_out = NULL;
        port->n_pkts_out = 0;
 
-       /* Queue base calculation */
-       rte_sched_port_config_qsize(port);
-
        /* Large data structures */
        port->subport = (struct rte_sched_subport *)
                (port->memory + rte_sched_port_get_array_base(params,
-                                                             
e_RTE_SCHED_PORT_ARRAY_SUBPORT));
+                       e_RTE_SCHED_PORT_ARRAY_SUBPORT, 0));
        port->pipe = (struct rte_sched_pipe *)
                (port->memory + rte_sched_port_get_array_base(params,
-                                                             
e_RTE_SCHED_PORT_ARRAY_PIPE));
+                       e_RTE_SCHED_PORT_ARRAY_PIPE, 0));
        port->queue = (struct rte_sched_queue *)
                (port->memory + rte_sched_port_get_array_base(params,
-                                                             
e_RTE_SCHED_PORT_ARRAY_QUEUE));
+                       e_RTE_SCHED_PORT_ARRAY_QUEUE, 0));
        port->queue_extra = (struct rte_sched_queue_extra *)
                (port->memory + rte_sched_port_get_array_base(params,
-                                                             
e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA));
+                       e_RTE_SCHED_PORT_ARRAY_QUEUE_EXTRA, 0));
        port->pipe_profiles = (struct rte_sched_pipe_profile *)
                (port->memory + rte_sched_port_get_array_base(params,
-                                                             
e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES));
+                       e_RTE_SCHED_PORT_ARRAY_PIPE_PROFILES, 0));
        port->bmp_array =  port->memory
-               + rte_sched_port_get_array_base(params, 
e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY);
+               + rte_sched_port_get_array_base(params,
+                       e_RTE_SCHED_PORT_ARRAY_BMP_ARRAY, 0);
        port->queue_array = (struct rte_mbuf **)
                (port->memory + rte_sched_port_get_array_base(params,
-                                                             
e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY));
+                       e_RTE_SCHED_PORT_ARRAY_QUEUE_ARRAY, 0));
 
        /* Pipe profile table */
        rte_sched_port_config_pipe_profile_table(port, params);
@@ -704,6 +764,35 @@ rte_sched_port_config(struct rte_sched_port_params *params)
        return port;
 }
 
+
+struct rte_sched_port *
+rte_sched_port_config(struct rte_sched_port_params *params)
+{
+       uint32_t mem_size;
+
+       /* Check user parameters. Determine the amount of memory to allocate */
+       mem_size = rte_sched_port_get_memory_footprint(params);
+       if (mem_size == 0)
+               return NULL;
+
+       return rte_sched_port_config_common(params, mem_size);
+}
+
+struct rte_sched_port *
+rte_sched_port_config_v2(struct rte_sched_port_params *params,
+                        uint32_t queue_array_size)
+{
+       uint32_t mem_size;
+
+       /* Check user parameters. Determine the amount of memory to allocate */
+       mem_size = rte_sched_port_get_memory_footprint_common(params,
+                                                             queue_array_size);
+       if (mem_size == 0)
+               return NULL;
+
+       return rte_sched_port_config_common(params, mem_size);
+}
+
 void
 rte_sched_port_free(struct rte_sched_port *port)
 {
@@ -736,10 +825,13 @@ static void
 rte_sched_port_log_subport_config(struct rte_sched_port *port, uint32_t i)
 {
        struct rte_sched_subport *s = port->subport + i;
+       char queue_size_str[(7 * RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) + 3];
 
+       rte_sched_build_queue_size_string(s->qsize, queue_size_str);
        RTE_LOG(DEBUG, SCHED, "Low level config for subport %u:\n"
                "    Token bucket: period = %u, credits per period = %u, size = 
%u\n"
                "    Traffic classes: period = %u, credits per period = [%u, 
%u, %u, %u]\n"
+               "    Traffic class queue-sizes: %s\n"
                "    Traffic class 3 oversubscription: wm min = %u, wm max = 
%u\n",
                i,
 
@@ -754,16 +846,18 @@ rte_sched_port_log_subport_config(struct rte_sched_port 
*port, uint32_t i)
                s->tc_credits_per_period[1],
                s->tc_credits_per_period[2],
                s->tc_credits_per_period[3],
+               queue_size_str,
 
                /* Traffic class 3 oversubscription */
                s->tc_ov_wm_min,
                s->tc_ov_wm_max);
 }
 
-int
-rte_sched_subport_config(struct rte_sched_port *port,
-       uint32_t subport_id,
-       struct rte_sched_subport_params *params)
+static int
+rte_sched_subport_config_common(struct rte_sched_port *port,
+                               uint32_t subport_id,
+                               struct rte_sched_subport_params *params,
+                               uint16_t *qsize)
 {
        struct rte_sched_subport *s;
        uint32_t i;
@@ -808,15 +902,13 @@ rte_sched_subport_config(struct rte_sched_port *port,
 
        /* Traffic Classes (TCs) */
        s->tc_period = rte_sched_time_ms_to_bytes(params->tc_period, 
port->rate);
+       s->tc_time = port->time + s->tc_period;
        for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) {
                s->tc_credits_per_period[i]
                        = rte_sched_time_ms_to_bytes(params->tc_period,
                                                     params->tc_rate[i]);
-       }
-       s->tc_time = port->time + s->tc_period;
-       for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
                s->tc_credits[i] = s->tc_credits_per_period[i];
-
+       }
 #ifdef RTE_SCHED_SUBPORT_TC_OV
        /* TC oversubscription */
        s->tc_ov_wm_min = port->mtu;
@@ -829,12 +921,31 @@ rte_sched_subport_config(struct rte_sched_port *port,
        s->tc_ov_rate = 0;
 #endif
 
+       rte_sched_subport_config_qsize(port, subport_id, qsize);
        rte_sched_port_log_subport_config(port, subport_id);
 
        return 0;
 }
 
 int
+rte_sched_subport_config(struct rte_sched_port *port,
+                        uint32_t subport_id,
+                        struct rte_sched_subport_params *params)
+{
+       return rte_sched_subport_config_common(port, subport_id, params, NULL);
+}
+
+int
+rte_sched_subport_config_v2(struct rte_sched_port *port,
+                           uint32_t subport_id,
+                           struct rte_sched_subport_params *params,
+                           uint16_t *qsize)
+{
+       return rte_sched_subport_config_common(port, subport_id, params,
+                                              qsize);
+}
+
+int
 rte_sched_pipe_config(struct rte_sched_port *port,
        uint32_t subport_id,
        uint32_t pipe_id,
diff --git a/lib/librte_sched/rte_sched.h b/lib/librte_sched/rte_sched.h
index 5d2a688..1e1d618 100644
--- a/lib/librte_sched/rte_sched.h
+++ b/lib/librte_sched/rte_sched.h
@@ -225,6 +225,20 @@ struct rte_sched_port *
 rte_sched_port_config(struct rte_sched_port_params *params);
 
 /**
+ * Hierarchical scheduler port configuration
+ *
+ * @param params
+ *   Port scheduler configuration parameter structure
+ * @param size_queue_array
+ *   Pre-calculated size of the port's queue-array
+ * @return
+ *   Handle to port scheduler instance upon success or NULL otherwise.
+ */
+struct rte_sched_port *
+rte_sched_port_config_v2(struct rte_sched_port_params *params,
+                        uint32_t size_queue_array);
+
+/**
  * Hierarchical scheduler port free
  *
  * @param port
@@ -251,6 +265,26 @@ rte_sched_subport_config(struct rte_sched_port *port,
        struct rte_sched_subport_params *params);
 
 /**
+ * Hierarchical scheduler subport configuration
+ *
+ * @param port
+ *   Handle to port scheduler instance
+ * @param subport_id
+ *   Subport ID
+ * @param params
+ *   Subport configuration parameters
+ * @param qsize
+ *   Array of traffic-class maximum queue-lengths
+ * @return
+ *   0 upon success, error code otherwise
+ */
+int
+rte_sched_subport_config_v2(struct rte_sched_port *port,
+                           uint32_t subport_id,
+                           struct rte_sched_subport_params *params,
+                           uint16_t *qsize);
+
+/**
  * Hierarchical scheduler pipe configuration
  *
  * @param port
@@ -281,6 +315,20 @@ rte_sched_pipe_config(struct rte_sched_port *port,
 uint32_t
 rte_sched_port_get_memory_footprint(struct rte_sched_port_params *params);
 
+/**
+ * Hierarchical scheduler memory footprint size per port
+ *
+ * @param params
+ *   Port scheduler configuration parameter structure
+ * @param size_queue_array
+ *   The required size of the port's queue-array
+ * @return
+ *   Memory footprint size in bytes upon success, 0 otherwise
+ */
+uint32_t
+rte_sched_port_get_memory_footprint_v2(struct rte_sched_port_params *params,
+                                      uint32_t size_queue_array);
+
 /*
  * Statistics
  *
diff --git a/lib/librte_sched/rte_sched_version.map 
b/lib/librte_sched/rte_sched_version.map
index 3aa159a..ce92b82 100644
--- a/lib/librte_sched/rte_sched_version.map
+++ b/lib/librte_sched/rte_sched_version.map
@@ -29,3 +29,11 @@ DPDK_2.1 {
        rte_sched_port_pkt_read_color;
 
 } DPDK_2.0;
+
+DPDK_18.05 {
+       global;
+
+       rte_sched_port_config_v2;
+       rte_sched_subport_config_v2;
+       rte_sched_port_get_memory_footprint_v2;
+} DPDK_2.1;
diff --git a/test/test/test_sched.c b/test/test/test_sched.c
index 32e500b..2b22ebe 100644
--- a/test/test/test_sched.c
+++ b/test/test/test_sched.c
@@ -15,7 +15,7 @@
 #include <rte_ip.h>
 #include <rte_byteorder.h>
 #include <rte_sched.h>
-
+#include <rte_malloc.h>
 
 #define SUBPORT         0
 #define PIPE            1
@@ -56,7 +56,7 @@ static struct rte_sched_port_params port_param = {
        .n_pipe_profiles = 1,
 };
 
-#define NB_MBUF          32
+#define NB_MBUF          8192
 #define MBUF_DATA_SZ     (2048 + RTE_PKTMBUF_HEADROOM)
 #define MEMPOOL_CACHE_SZ 0
 #define SOCKET           0
@@ -76,7 +76,8 @@ create_mempool(void)
 }
 
 static void
-prepare_pkt(struct rte_mbuf *mbuf)
+prepare_pkt(struct rte_mbuf *mbuf, uint32_t subport, uint32_t pipe, uint32_t 
tc,
+           uint32_t queue)
 {
        struct ether_hdr *eth_hdr;
        struct vlan_hdr *vlan1, *vlan2;
@@ -89,13 +90,14 @@ prepare_pkt(struct rte_mbuf *mbuf)
        eth_hdr = (struct ether_hdr *)((uintptr_t)&eth_hdr->ether_type + 2 
*sizeof(struct vlan_hdr));
        ip_hdr = (struct ipv4_hdr *)((uintptr_t)eth_hdr +  
sizeof(eth_hdr->ether_type));
 
-       vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
-       vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
+       vlan1->vlan_tci = rte_cpu_to_be_16(subport);
+       vlan2->vlan_tci = rte_cpu_to_be_16(pipe);
        eth_hdr->ether_type =  rte_cpu_to_be_16(ETHER_TYPE_IPv4);
-       ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
+       ip_hdr->dst_addr = IPv4(0, 0, tc, queue);
 
 
-       rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, 
e_RTE_METER_YELLOW);
+       rte_sched_port_pkt_write(mbuf, subport, pipe, tc, queue,
+                                e_RTE_METER_YELLOW);
 
        /* 64 byte packet */
        mbuf->pkt_len  = 60;
@@ -138,7 +140,7 @@ test_sched(void)
        for (i = 0; i < 10; i++) {
                in_mbufs[i] = rte_pktmbuf_alloc(mp);
                TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
-               prepare_pkt(in_mbufs[i]);
+               prepare_pkt(in_mbufs[i], SUBPORT, PIPE, TC, QUEUE);
        }
 
 
@@ -185,3 +187,245 @@ test_sched(void)
 }
 
 REGISTER_TEST_COMMAND(sched_autotest, test_sched);
+
+#define NB_SUBPORTS 2
+
+static struct rte_sched_subport_params subport_param_v2[] = {
+       {
+               .tb_rate = 1250000000,
+               .tb_size = 1000000,
+
+               .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+               .tc_period = 10,
+       },
+       {
+               .tb_rate = 1250000000,
+               .tb_size = 1000000,
+
+               .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+               .tc_period = 10,
+       },
+};
+
+static struct rte_sched_pipe_params pipe_profile_v2[] = {
+       { /* Profile #0 */
+               .tb_rate = 1250000000,
+               .tb_size = 1000000,
+
+               .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
+               .tc_period = 10,
+
+               .wrr_weights = {1, 1, 1, 1,
+                               1, 1, 1, 1,
+                               1, 1, 1, 1,
+                               1, 1, 1, 1},
+       },
+};
+
+static uint16_t subport_qsize[][RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE] = {
+       { 16, 32, 64, 128 },
+       { 256, 512, 1024, 2048 },
+};
+
+static struct rte_sched_port_params port_param_v2 = {
+       .socket = 0, /* computed */
+       .rate = 0, /* computed */
+       .mtu = 1522,
+       .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
+       .n_subports_per_port = 2,
+       .n_pipes_per_subport = 128,
+       .qsize = {32, 32, 32, 32},
+       .pipe_profiles = pipe_profile_v2,
+       .n_pipe_profiles = 1,
+};
+
+static uint32_t subport_total_qsize(struct rte_sched_port_params *pp,
+                                   uint16_t *qsize)
+{
+       uint32_t queue_array_size = 0;
+       uint32_t tc;
+
+       for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++)
+               queue_array_size += qsize[tc];
+
+       return (queue_array_size * RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS *
+               pp->n_pipes_per_subport * sizeof(struct rte_mbuf *));
+}
+
+static int fill_queue_to_drop(struct rte_mempool *mp,
+                             struct rte_sched_port *port,
+                             uint32_t in_subport, uint32_t in_pipe,
+                             uint32_t in_tc, uint16_t qsize)
+{
+       struct rte_mbuf **in_mbufs;
+       struct rte_mbuf **out_mbufs;
+       uint32_t in_queue = 0;
+       uint32_t i;
+       int err;
+
+       in_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+                             RTE_CACHE_LINE_SIZE);
+       TEST_ASSERT_NOT_NULL(in_mbufs, "Buffer array allocation failed\n");
+
+       out_mbufs = rte_malloc(NULL, ((qsize + 1) * sizeof(struct rte_mbuf *)),
+                              RTE_CACHE_LINE_SIZE);
+       TEST_ASSERT_NOT_NULL(out_mbufs, "Buffer array allocation failed\n");
+
+       /*
+        * Allocate qsize + 1 buffers so that we can completely fill the
+        * queue, then try to enqueue one more packet so that it will be tail
+        * dropped.
+        */
+       for (i = 0; i <= qsize; i++) {
+               in_mbufs[i] = rte_pktmbuf_alloc(mp);
+               TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
+               prepare_pkt(in_mbufs[i], in_subport, in_pipe, in_tc, in_queue);
+       }
+
+       /*
+        * All these packets should be queued correctly.
+        */
+       err = rte_sched_port_enqueue(port, in_mbufs, qsize);
+       TEST_ASSERT_EQUAL(err, qsize, "Wrong enqueue, err=%d\n", err);
+
+       /*
+        * This packet should fail to be queued, it will be freed when dropped.
+        */
+       err = rte_sched_port_enqueue(port, &in_mbufs[qsize], 1);
+       TEST_ASSERT_EQUAL(err, 0, "Enqueue didn't fail, but should have\n");
+       in_mbufs[qsize] = NULL;
+
+       /*
+        * With small queues we should be able to dequeue a full queue's worth
+        * of packets with a single call to rte_sched_port_dequeue.  With
+        * larger queues we will probably need to make multiple calls as we
+        * could run out of credit to dequeue all the packet in one attempt.
+        */
+       i = 0;
+       err = 1;
+       while (i < qsize && err != 0) {
+               err = rte_sched_port_dequeue(port, out_mbufs, qsize);
+               i += err;
+       }
+       TEST_ASSERT_EQUAL(i, qsize,
+                         "Wrong dequeue, err=%d, i: %u, qsize: %u\n",
+                         err, i, qsize);
+
+       /*
+        * Check that all the dequeued packets have to right numbers in them.
+        */
+       for (i = 0; i < qsize; i++) {
+               enum rte_meter_color color;
+               uint32_t out_subport, out_pipe, out_tc, out_queue;
+
+               color = rte_sched_port_pkt_read_color(out_mbufs[i]);
+               TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+
+               rte_sched_port_pkt_read_tree_path(out_mbufs[i],
+                               &out_subport, &out_pipe, &out_tc, &out_queue);
+
+               TEST_ASSERT_EQUAL(in_subport, out_subport, "Wrong subport\n");
+               TEST_ASSERT_EQUAL(in_pipe, out_pipe, "Wrong pipe\n");
+               TEST_ASSERT_EQUAL(in_tc, out_tc, "Wrong traffic_class\n");
+               TEST_ASSERT_EQUAL(in_queue, out_queue, "Wrong queue\n");
+               rte_pktmbuf_free(out_mbufs[i]);
+       }
+
+#ifdef RTE_SCHED_COLLECT_STATS
+       struct rte_sched_subport_stats subport_stats;
+       uint32_t tc_ov;
+
+       /*
+        * Did the subport stats see a packet dropped in this traffic-class?
+        */
+       rte_sched_subport_read_stats(port, in_subport, &subport_stats, &tc_ov);
+       TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc_dropped[in_tc], 1,
+                         "Wrong subport stats\n");
+#endif
+
+       rte_free(in_mbufs);
+       rte_free(out_mbufs);
+
+       return 0;
+}
+
+static int
+subport_fill_queues(struct rte_mempool *mp, struct rte_sched_port *port,
+                   uint32_t subport)
+{
+       uint32_t pipe;
+       uint32_t tc;
+       int err;
+
+       for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport; pipe++) {
+               for (tc = 0; tc < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; tc++) {
+                       err = fill_queue_to_drop(mp, port, subport, pipe, tc,
+                                                subport_qsize[subport][tc]);
+                       TEST_ASSERT_SUCCESS(err, "fill-queue-to-drop failed, "
+                                         "err=%d\n", err);
+               }
+       }
+       return 0;
+}
+
+/**
+ * test main entrance for library sched using the v2 APIs that
+ * allow queue-size and WRED configurations on a per-subport basis.
+ */
+static int
+test_sched_v2(void)
+{
+       struct rte_mempool *mp = NULL;
+       struct rte_sched_port *port = NULL;
+       uint32_t subport;
+       uint32_t pipe;
+       uint32_t queue_array_size;
+       int err;
+
+       rte_log_set_level(RTE_LOGTYPE_EAL, RTE_LOG_DEBUG);
+
+       mp = create_mempool();
+       TEST_ASSERT_NOT_NULL(mp, "Error creating mempool\n");
+
+       port_param_v2.socket = 0;
+       port_param_v2.rate = (uint64_t) 10000 * 1000 * 1000 / 8;
+
+       queue_array_size = 0;
+       for (subport = 0; subport < NB_SUBPORTS; subport++)
+               queue_array_size +=
+                       subport_total_qsize(&port_param_v2,
+                                           &subport_qsize[subport][0]);
+
+       port = rte_sched_port_config_v2(&port_param_v2,
+                                       queue_array_size);
+       TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");
+
+       for (subport = 0; subport < NB_SUBPORTS; subport++) {
+               err = rte_sched_subport_config_v2(port, subport,
+                                                 &subport_param_v2[subport],
+                                                 &subport_qsize[subport][0]);
+               TEST_ASSERT_SUCCESS(err,
+                                   "Error config sched subport %u, err=%d\n",
+                                   subport, err);
+               for (pipe = 0; pipe < port_param_v2.n_pipes_per_subport;
+                    pipe++) {
+                       err = rte_sched_pipe_config(port, subport, pipe, 0);
+                       TEST_ASSERT_SUCCESS(err,
+                                           "Error config sched subport %u "
+                                           "pipe %u, err=%d\n",
+                                           subport, pipe, err);
+               }
+       }
+
+       for (subport = 0; subport < NB_SUBPORTS; subport++) {
+               err = subport_fill_queues(mp, port, subport);
+               TEST_ASSERT_SUCCESS(err, "subport-fill-queue failed, err=%d\n",
+                                   err);
+       }
+
+       rte_sched_port_free(port);
+
+       return 0;
+}
+
+REGISTER_TEST_COMMAND(sched_autotest_v2, test_sched_v2);
-- 
2.7.4

Reply via email to