This reverts commit d91c4b1bb5a938734fe8e66da8f965304919f38e. When enabling TC OV unconditionally, it is observed the performance drops by ~20% hence reverting this commit.
Fixes: d91c4b1bb5a9 ("sched: enable traffic class oversubscription unconditionally") Signed-off-by: Megha Ajmera <megha.ajm...@intel.com> --- config/rte_config.h | 1 + drivers/net/softnic/rte_eth_softnic_tm.c | 18 +++++ examples/qos_sched/init.c | 2 + lib/sched/rte_sched.c | 90 ++++++++++++++++++++++++ 4 files changed, 111 insertions(+) diff --git a/config/rte_config.h b/config/rte_config.h index 8eb29c1525..de6fea5b67 100644 --- a/config/rte_config.h +++ b/config/rte_config.h @@ -90,6 +90,7 @@ /* rte_sched defines */ #undef RTE_SCHED_CMAN +#undef RTE_SCHED_SUBPORT_TC_OV /* rte_graph defines */ #define RTE_GRAPH_BURST_SIZE 256 diff --git a/drivers/net/softnic/rte_eth_softnic_tm.c b/drivers/net/softnic/rte_eth_softnic_tm.c index 6a7766ba1c..e74092ce7f 100644 --- a/drivers/net/softnic/rte_eth_softnic_tm.c +++ b/drivers/net/softnic/rte_eth_softnic_tm.c @@ -595,9 +595,15 @@ static const struct rte_tm_level_capabilities tm_level_cap[] = { .sched_sp_n_priorities_max = 1, .sched_wfq_n_children_per_group_max = UINT32_MAX, .sched_wfq_n_groups_max = 1, +#ifdef RTE_SCHED_SUBPORT_TC_OV .sched_wfq_weight_max = UINT32_MAX, .sched_wfq_packet_mode_supported = 0, .sched_wfq_byte_mode_supported = 1, +#else + .sched_wfq_weight_max = 1, + .sched_wfq_packet_mode_supported = 0, + .sched_wfq_byte_mode_supported = 1, +#endif .stats_mask = STATS_MASK_DEFAULT, } }, @@ -2822,6 +2828,8 @@ pmd_tm_hierarchy_commit(struct rte_eth_dev *dev, return 0; } +#ifdef RTE_SCHED_SUBPORT_TC_OV + static int update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight) { @@ -2859,6 +2867,8 @@ update_pipe_weight(struct rte_eth_dev *dev, struct tm_node *np, uint32_t weight) return 0; } +#endif + static int update_queue_weight(struct rte_eth_dev *dev, struct tm_node *nq, uint32_t weight) @@ -2973,6 +2983,7 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev, rte_strerror(EINVAL)); /* fall-through */ case TM_NODE_LEVEL_PIPE: +#ifdef RTE_SCHED_SUBPORT_TC_OV if (update_pipe_weight(dev, n, weight)) return -rte_tm_error_set(error, EINVAL, @@ -2980,6 +2991,13 @@ pmd_tm_node_parent_update(struct rte_eth_dev *dev, NULL, rte_strerror(EINVAL)); return 0; +#else + return -rte_tm_error_set(error, + EINVAL, + RTE_TM_ERROR_TYPE_NODE_WEIGHT, + NULL, + rte_strerror(EINVAL)); +#endif /* fall-through */ case TM_NODE_LEVEL_TC: return -rte_tm_error_set(error, diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c index 8a0fb8a374..3c1f0bc680 100644 --- a/examples/qos_sched/init.c +++ b/examples/qos_sched/init.c @@ -183,7 +183,9 @@ static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = { .tc_rate = {305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175, 305175}, .tc_period = 40, +#ifdef RTE_SCHED_SUBPORT_TC_OV .tc_ov_weight = 1, +#endif .wrr_weights = {1, 1, 1, 1}, }, diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index ec74bee939..7298df9e90 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -1317,12 +1317,14 @@ rte_sched_subport_config(struct rte_sched_port *port, for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) s->grinder_base_bmp_pos[i] = RTE_SCHED_PIPE_INVALID; +#ifdef RTE_SCHED_SUBPORT_TC_OV /* TC oversubscription */ s->tc_ov_wm_min = port->mtu; s->tc_ov_period_id = 0; s->tc_ov = 0; s->tc_ov_n = 0; s->tc_ov_rate = 0; +#endif } { @@ -1342,9 +1344,11 @@ rte_sched_subport_config(struct rte_sched_port *port, else profile->tc_credits_per_period[i] = 0; +#ifdef RTE_SCHED_SUBPORT_TC_OV s->tc_ov_wm_max = rte_sched_time_ms_to_bytes(profile->tc_period, s->pipe_tc_be_rate_max); s->tc_ov_wm = s->tc_ov_wm_max; +#endif s->profile = subport_profile_id; } @@ -2251,6 +2255,50 @@ rte_sched_port_enqueue(struct rte_sched_port *port, struct rte_mbuf **pkts, return result; } +#ifndef RTE_SCHED_SUBPORT_TC_OV + +static inline void +grinder_credits_update(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) +{ + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_pipe *pipe = grinder->pipe; + struct rte_sched_pipe_profile *params = grinder->pipe_params; + struct rte_sched_subport_profile *sp = grinder->subport_params; + uint64_t n_periods; + uint32_t i; + + /* Subport TB */ + n_periods = (port->time - subport->tb_time) / sp->tb_period; + subport->tb_credits += n_periods * sp->tb_credits_per_period; + subport->tb_credits = RTE_MIN(subport->tb_credits, sp->tb_size); + subport->tb_time += n_periods * sp->tb_period; + + /* Pipe TB */ + n_periods = (port->time - pipe->tb_time) / params->tb_period; + pipe->tb_credits += n_periods * params->tb_credits_per_period; + pipe->tb_credits = RTE_MIN(pipe->tb_credits, params->tb_size); + pipe->tb_time += n_periods * params->tb_period; + + /* Subport TCs */ + if (unlikely(port->time >= subport->tc_time)) { + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + subport->tc_credits[i] = sp->tc_credits_per_period[i]; + + subport->tc_time = port->time + sp->tc_period; + } + + /* Pipe TCs */ + if (unlikely(port->time >= pipe->tc_time)) { + for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) + pipe->tc_credits[i] = params->tc_credits_per_period[i]; + + pipe->tc_time = port->time + params->tc_period; + } +} + +#else + static inline uint64_t grinder_tc_ov_credits_update(struct rte_sched_port *port, struct rte_sched_subport *subport, uint32_t pos) @@ -2345,6 +2393,46 @@ grinder_credits_update(struct rte_sched_port *port, } } +#endif /* RTE_SCHED_TS_CREDITS_UPDATE, RTE_SCHED_SUBPORT_TC_OV */ + + +#ifndef RTE_SCHED_SUBPORT_TC_OV + +static inline int +grinder_credits_check(struct rte_sched_port *port, + struct rte_sched_subport *subport, uint32_t pos) +{ + struct rte_sched_grinder *grinder = subport->grinder + pos; + struct rte_sched_pipe *pipe = grinder->pipe; + struct rte_mbuf *pkt = grinder->pkt; + uint32_t tc_index = grinder->tc_index; + uint64_t pkt_len = pkt->pkt_len + port->frame_overhead; + uint64_t subport_tb_credits = subport->tb_credits; + uint64_t subport_tc_credits = subport->tc_credits[tc_index]; + uint64_t pipe_tb_credits = pipe->tb_credits; + uint64_t pipe_tc_credits = pipe->tc_credits[tc_index]; + int enough_credits; + + /* Check queue credits */ + enough_credits = (pkt_len <= subport_tb_credits) && + (pkt_len <= subport_tc_credits) && + (pkt_len <= pipe_tb_credits) && + (pkt_len <= pipe_tc_credits); + + if (!enough_credits) + return 0; + + /* Update port credits */ + subport->tb_credits -= pkt_len; + subport->tc_credits[tc_index] -= pkt_len; + pipe->tb_credits -= pkt_len; + pipe->tc_credits[tc_index] -= pkt_len; + + return 1; +} + +#else + static inline int grinder_credits_check(struct rte_sched_port *port, struct rte_sched_subport *subport, uint32_t pos) @@ -2391,6 +2479,8 @@ grinder_credits_check(struct rte_sched_port *port, return 1; } +#endif /* RTE_SCHED_SUBPORT_TC_OV */ + static inline int grinder_schedule(struct rte_sched_port *port, -- 2.25.1