On Thu, 28 Jan 2016 16:33:51 +0300 Ilya Maximets <i.maxim...@samsung.com> wrote:
> This command can be used to check the port/rxq assignment to > pmd threads. For each pmd thread of the datapath shows list > of queue-ids with port names. > > Additionally log message from pmd_thread_main() extended with > queue-id, and type of this message changed from INFO to DBG. > > Signed-off-by: Ilya Maximets <i.maxim...@samsung.com> > --- > INSTALL.DPDK.md | 9 ++++-- > lib/dpif-netdev.c | 77 > ++++++++++++++++++++++++++++++++++------------ > lib/netdev.c | 6 ++++ > lib/netdev.h | 1 + > vswitchd/ovs-vswitchd.8.in | 3 ++ > 5 files changed, 74 insertions(+), 22 deletions(-) > > diff --git a/INSTALL.DPDK.md b/INSTALL.DPDK.md > index c601358..316e350 100644 > --- a/INSTALL.DPDK.md > +++ b/INSTALL.DPDK.md > @@ -272,9 +272,12 @@ Performance Tuning: > > NIC port0 <-> OVS <-> VM <-> OVS <-> NIC port 1 > > - The OVS log can be checked to confirm that the port/rxq assignment to > - pmd threads is as required. This can also be checked with the following > - commands: > + The following command can be used to confirm that the port/rxq > assignment > + to pmd threads is as required: > + > + `ovs-appctl dpif-netdev/pmd-show-poll-lists` > + > + This can also be checked with: > > ``` > top -H > diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c > index 833174a..102e6b5 100644 > --- a/lib/dpif-netdev.c > +++ b/lib/dpif-netdev.c > @@ -565,8 +565,9 @@ get_dp_netdev(const struct dpif *dpif) > } > > enum pmd_info_type { > - PMD_INFO_SHOW_STATS, /* show how cpu cycles are spent */ > - PMD_INFO_CLEAR_STATS /* set the cycles count to 0 */ > + PMD_INFO_SHOW_STATS, /* Show how cpu cycles are spent. */ > + PMD_INFO_CLEAR_STATS, /* Set the cycles count to 0. */ > + PMD_INFO_SHOW_POLL /* Show poll-lists of pmd threads. */ > }; > > static void > @@ -675,6 +676,35 @@ pmd_info_clear_stats(struct ds *reply OVS_UNUSED, > } > > static void > +pmd_info_show_poll(struct ds *reply, struct dp_netdev_pmd_thread *pmd) > +{ > + if (pmd->core_id != NON_PMD_CORE_ID) { > + struct rxq_poll *poll; > + const char *prev_name = NULL; > + > + ds_put_format(reply, "pmd thread numa_id %d core_id %u:\n", > + pmd->numa_id, pmd->core_id); > + > + ovs_mutex_lock(&pmd->poll_mutex); > + LIST_FOR_EACH (poll, node, &pmd->poll_list) { > + const char *name = netdev_get_name(poll->port->netdev); > + > + if (!prev_name || strcmp(name, prev_name)) { > + if (prev_name) { > + ds_put_cstr(reply, "\n"); > + } > + ds_put_format(reply, "\tport: %s\tqueue-id:", > + netdev_get_name(poll->port->netdev)); > + } > + ds_put_format(reply, " %d", netdev_rxq_get_queue_id(poll->rx)); > + prev_name = name; > + } > + ovs_mutex_unlock(&pmd->poll_mutex); > + ds_put_cstr(reply, "\n"); > + } > +} > + > +static void > dpif_netdev_pmd_info(struct unixctl_conn *conn, int argc, const char *argv[], > void *aux) > { > @@ -700,22 +730,26 @@ dpif_netdev_pmd_info(struct unixctl_conn *conn, int > argc, const char *argv[], > } > > CMAP_FOR_EACH (pmd, node, &dp->poll_threads) { > - unsigned long long stats[DP_N_STATS]; > - uint64_t cycles[PMD_N_CYCLES]; > - int i; > + if (type == PMD_INFO_SHOW_POLL) { > + pmd_info_show_poll(&reply, pmd); > + } else { > + unsigned long long stats[DP_N_STATS]; > + uint64_t cycles[PMD_N_CYCLES]; > + int i; > > - /* Read current stats and cycle counters */ > - for (i = 0; i < ARRAY_SIZE(stats); i++) { > - atomic_read_relaxed(&pmd->stats.n[i], &stats[i]); > - } > - for (i = 0; i < ARRAY_SIZE(cycles); i++) { > - atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]); > - } > + /* Read current stats and cycle counters */ > + for (i = 0; i < ARRAY_SIZE(stats); i++) { > + atomic_read_relaxed(&pmd->stats.n[i], &stats[i]); > + } > + for (i = 0; i < ARRAY_SIZE(cycles); i++) { > + atomic_read_relaxed(&pmd->cycles.n[i], &cycles[i]); > + } > > - if (type == PMD_INFO_CLEAR_STATS) { > - pmd_info_clear_stats(&reply, pmd, stats, cycles); > - } else if (type == PMD_INFO_SHOW_STATS) { > - pmd_info_show_stats(&reply, pmd, stats, cycles); > + if (type == PMD_INFO_CLEAR_STATS) { > + pmd_info_clear_stats(&reply, pmd, stats, cycles); > + } else if (type == PMD_INFO_SHOW_STATS) { > + pmd_info_show_stats(&reply, pmd, stats, cycles); > + } > } > } > > @@ -729,7 +763,8 @@ static int > dpif_netdev_init(void) > { > static enum pmd_info_type show_aux = PMD_INFO_SHOW_STATS, > - clear_aux = PMD_INFO_CLEAR_STATS; > + clear_aux = PMD_INFO_CLEAR_STATS, > + poll_aux = PMD_INFO_SHOW_POLL; > > unixctl_command_register("dpif-netdev/pmd-stats-show", "[dp]", > 0, 1, dpif_netdev_pmd_info, > @@ -737,6 +772,9 @@ dpif_netdev_init(void) > unixctl_command_register("dpif-netdev/pmd-stats-clear", "[dp]", > 0, 1, dpif_netdev_pmd_info, > (void *)&clear_aux); > + unixctl_command_register("dpif-netdev/pmd-show-poll-lists", "[dp]", > + 0, 1, dpif_netdev_pmd_info, > + (void *)&poll_aux); > return 0; > } > > @@ -2678,8 +2716,9 @@ reload: > > /* List port/core affinity */ > for (i = 0; i < poll_cnt; i++) { > - VLOG_INFO("Core %d processing port \'%s\'\n", pmd->core_id, > - netdev_get_name(poll_list[i].port->netdev)); > + VLOG_DBG("Core %d processing port \'%s\' with queue-id %d\n", > + pmd->core_id, netdev_get_name(poll_list[i].port->netdev), > + netdev_rxq_get_queue_id(poll_list[i].rx)); > } If you are demoting an INFO log message, I would ask you tell in the NEWS file about that and the new command. I am saying this because I know people watching for that message to make sure it is working. Another concern is with the command name. We have: pmd-stats-<action> which can be "clear" or "show" you're proposing the opposite: pmd-<action>-<something> I believe we will want to have rx queue management done manually at some point in the future, so: pmd-set-<something> seems to be confusing Perhaps pmd-rxq-<action> ? pmd-rxq-show does what you propose, show the rxq for each pmd pmd-rxq-set we can get pmd id, port and queue id to pin pmd-rxq-clear to reset any fixed mapping pmd-rxq-stop to stop polling a specific queue pmd-rxq-start to start pmd-rxq-stats-show stats per queue? pmd-rxq-stats-clear ... Other than that the patch looks great and works for me. Thanks, -- fbl _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev