The counters use x86 TSC if available (currently only with DPDK). They will be exposed by subsequents commits
Signed-off-by: Daniele Di Proietto <diproiet...@vmware.com> --- lib/dpif-netdev.c | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 44e25f4..09e106f 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -227,6 +227,13 @@ enum dp_stat_type { DP_N_STATS }; +enum pmd_cycles_counter_type { + PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ + PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ + PMD_CYCLES_OTHER, /* Cycles spent doing other tasks */ + PMD_N_CYCLES +}; + /* A port in a netdev-based datapath. */ struct dp_netdev_port { struct cmap_node node; /* Node in dp_netdev's 'ports'. */ @@ -342,6 +349,12 @@ struct dp_netdev_pmd_stats { unsigned long long int n[DP_N_STATS]; }; +/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */ +struct dp_netdev_pmd_cycles { + /* Indexed by PMD_CYCLES_*. */ + uint64_t n[PMD_N_CYCLES]; +}; + /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate * the performance overhead of interrupt processing. Therefore netdev can * not implement rx-wait for these devices. dpif-netdev needs to poll @@ -383,6 +396,11 @@ struct dp_netdev_pmd_thread { /* Statistics. */ struct dp_netdev_pmd_stats stats; + /* Cycles counters */ + struct dp_netdev_pmd_cycles cycles; + + /* Used to count cicles. See 'pmd_cycles_counter_diff()' */ + uint64_t last_cycles; struct latch exit_latch; /* For terminating the pmd thread. */ atomic_uint change_seq; /* For reloading pmd ports. */ @@ -393,6 +411,12 @@ struct dp_netdev_pmd_thread { int numa_id; /* numa node id of this pmd thread. */ }; +static inline uint64_t +pmd_cycles_counter_diff(struct dp_netdev_pmd_thread *); +static inline void +pmd_count_previous_cycles(struct dp_netdev_pmd_thread *, + enum pmd_cycles_counter_type); + #define PMD_INITIAL_SEQ 1 /* Interface to netdev-based datapath. */ @@ -2072,6 +2096,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_lock(&dp->non_pmd_mutex); ovs_mutex_lock(&dp->port_mutex); + pmd_cycles_counter_diff(pmd); } dp_netdev_execute_actions(pmd, &pp, 1, false, execute->actions, @@ -2080,6 +2105,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) dp_netdev_pmd_unref(pmd); ovs_mutex_unlock(&dp->port_mutex); ovs_mutex_unlock(&dp->non_pmd_mutex); + pmd_count_previous_cycles(pmd, PMD_CYCLES_PROCESSING); } /* Even though may_steal is set to false, some actions could modify or @@ -2227,6 +2253,32 @@ dp_netdev_actions_free(struct dp_netdev_actions *actions) } +/* This function returns the length of the interval since the last call + * to the function itself (with the same 'pmd' argument) */ +static inline uint64_t +pmd_cycles_counter_diff(struct dp_netdev_pmd_thread *pmd) +{ + uint64_t old_cycles = pmd->last_cycles, +#ifdef DPDK_NETDEV + new_cycles = rte_get_tsc_cycles(); +#else + new_cycles = 0; +#endif + + pmd->last_cycles = new_cycles; + + return new_cycles - old_cycles; +} + +/* Updates the pmd cycles counter, considering the past cycles spent + * for the reason specified in 'type' */ +static inline void +pmd_count_previous_cycles(struct dp_netdev_pmd_thread *pmd, + enum pmd_cycles_counter_type type) +{ + pmd->cycles.n[type] += pmd_cycles_counter_diff(pmd); +} + static void dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, @@ -2239,6 +2291,8 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, if (!error) { int i; + pmd_count_previous_cycles(pmd, PMD_CYCLES_POLLING); + *recirc_depth_get() = 0; /* XXX: initialize md in netdev implementation. */ @@ -2246,6 +2300,7 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no); } dp_netdev_input(pmd, packets, cnt); + pmd_count_previous_cycles(pmd, PMD_CYCLES_PROCESSING); } else if (error != EAGAIN && error != EOPNOTSUPP) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -2265,6 +2320,7 @@ dpif_netdev_run(struct dpif *dpif) uint64_t new_tnl_seq; ovs_mutex_lock(&dp->non_pmd_mutex); + pmd_count_previous_cycles(non_pmd, PMD_CYCLES_OTHER); CMAP_FOR_EACH (port, node, &dp->ports) { if (!netdev_is_pmd(port->netdev)) { int i; @@ -2375,6 +2431,10 @@ pmd_thread_main(void *f_) /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */ ovsthread_setspecific(pmd->dp->per_pmd_key, pmd); pmd_thread_setaffinity_cpu(pmd->core_id); + + /* Initialize the cycles counter */ + pmd_cycles_counter_diff(pmd); + reload: emc_cache_init(&pmd->flow_cache); poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt); @@ -2383,6 +2443,7 @@ reload: * reloading the updated configuration. */ dp_netdev_pmd_reload_done(pmd); + pmd_count_previous_cycles(pmd, PMD_CYCLES_OTHER); for (;;) { int i; @@ -2393,6 +2454,8 @@ reload: if (lc++ > 1024) { unsigned int seq; + pmd_count_previous_cycles(pmd, PMD_CYCLES_POLLING); + lc = 0; emc_cache_slow_sweep(&pmd->flow_cache); @@ -2403,6 +2466,7 @@ reload: port_seq = seq; break; } + } } @@ -2543,10 +2607,11 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, ovs_mutex_init(&pmd->flow_mutex); dpcls_init(&pmd->cls); cmap_init(&pmd->flow_table); - /* init the 'flow_cache' since there is no + /* init the 'flow_cache' and cycles counter since there is no * actual thread created for NON_PMD_CORE_ID. */ if (core_id == NON_PMD_CORE_ID) { emc_cache_init(&pmd->flow_cache); + pmd_cycles_counter_diff(pmd); } cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node), hash_int(core_id, 0)); -- 2.1.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev