The counters use x86 TSC if available (currently only with DPDK). They will be exposed by subsequents commits
Signed-off-by: Daniele Di Proietto <diproiet...@vmware.com> --- lib/dpif-netdev.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 69 insertions(+), 1 deletion(-) diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c index 8f267f6..c093d10 100644 --- a/lib/dpif-netdev.c +++ b/lib/dpif-netdev.c @@ -227,6 +227,13 @@ enum dp_stat_type { DP_N_STATS }; +enum pmd_cycles_counter_type { + PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */ + PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */ + PMD_CYCLES_OTHER, /* Cycles spent doing other tasks */ + PMD_N_CYCLES +}; + /* A port in a netdev-based datapath. */ struct dp_netdev_port { struct cmap_node node; /* Node in dp_netdev's 'ports'. */ @@ -342,6 +349,12 @@ struct dp_netdev_pmd_stats { ATOMIC(unsigned long long) n[DP_N_STATS]; }; +/* Contained by struct dp_netdev_pmd_thread's 'cycle' member. */ +struct dp_netdev_pmd_cycles { + /* Indexed by PMD_CYCLES_*. */ + ATOMIC(uint64_t) n[PMD_N_CYCLES]; +}; + /* PMD: Poll modes drivers. PMD accesses devices via polling to eliminate * the performance overhead of interrupt processing. Therefore netdev can * not implement rx-wait for these devices. dpif-netdev needs to poll @@ -384,6 +397,12 @@ struct dp_netdev_pmd_thread { /* Statistics. */ struct dp_netdev_pmd_stats stats; + /* Cycles counters */ + struct dp_netdev_pmd_cycles cycles; + + /* Used to count cicles. See 'pmd_cycles_counter_diff()' */ + uint64_t last_cycles; + struct latch exit_latch; /* For terminating the pmd thread. */ atomic_uint change_seq; /* For reloading pmd ports. */ pthread_t thread; @@ -393,6 +412,10 @@ struct dp_netdev_pmd_thread { int numa_id; /* numa node id of this pmd thread. */ }; +static inline uint64_t pmd_cycles_counter_diff(struct dp_netdev_pmd_thread *); +static inline void pmd_count_previous_cycles(struct dp_netdev_pmd_thread *, + enum pmd_cycles_counter_type); + #define PMD_INITIAL_SEQ 1 /* Interface to netdev-based datapath. */ @@ -2093,6 +2116,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) if (pmd->core_id == NON_PMD_CORE_ID) { ovs_mutex_lock(&dp->non_pmd_mutex); ovs_mutex_lock(&dp->port_mutex); + pmd_cycles_counter_diff(pmd); } pp = execute->packet; @@ -2102,6 +2126,7 @@ dpif_netdev_execute(struct dpif *dpif, struct dpif_execute *execute) dp_netdev_pmd_unref(pmd); ovs_mutex_unlock(&dp->port_mutex); ovs_mutex_unlock(&dp->non_pmd_mutex); + pmd_count_previous_cycles(pmd, PMD_CYCLES_PROCESSING); } return 0; @@ -2244,6 +2269,36 @@ dp_netdev_actions_free(struct dp_netdev_actions *actions) } +/* This function returns the length of the interval since the last call + * to the function itself (with the same 'pmd' argument) */ +static inline uint64_t +pmd_cycles_counter_diff(struct dp_netdev_pmd_thread *pmd) +{ + uint64_t old_cycles = pmd->last_cycles, +#ifdef DPDK_NETDEV + new_cycles = rte_get_tsc_cycles(); +#else + new_cycles = 0; +#endif + + pmd->last_cycles = new_cycles; + + return new_cycles - old_cycles; +} + +/* Updates the pmd cycles counter, considering the past cycles spent + * for the reason specified in 'type' */ +static inline void +pmd_count_previous_cycles(struct dp_netdev_pmd_thread *pmd, + enum pmd_cycles_counter_type type) +{ + uint64_t c; + + atomic_read_relaxed(&pmd->cycles.n[type], &c); + c += pmd_cycles_counter_diff(pmd); + atomic_store_relaxed(&pmd->cycles.n[type], c); +} + static void dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, struct dp_netdev_port *port, @@ -2256,6 +2311,8 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, if (!error) { int i; + pmd_count_previous_cycles(pmd, PMD_CYCLES_POLLING); + *recirc_depth_get() = 0; /* XXX: initialize md in netdev implementation. */ @@ -2263,6 +2320,7 @@ dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd, packets[i]->md = PKT_METADATA_INITIALIZER(port->port_no); } dp_netdev_input(pmd, packets, cnt); + pmd_count_previous_cycles(pmd, PMD_CYCLES_PROCESSING); } else if (error != EAGAIN && error != EOPNOTSUPP) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); @@ -2282,6 +2340,7 @@ dpif_netdev_run(struct dpif *dpif) uint64_t new_tnl_seq; ovs_mutex_lock(&dp->non_pmd_mutex); + pmd_count_previous_cycles(non_pmd, PMD_CYCLES_OTHER); CMAP_FOR_EACH (port, node, &dp->ports) { if (!netdev_is_pmd(port->netdev)) { int i; @@ -2392,6 +2451,10 @@ pmd_thread_main(void *f_) /* Stores the pmd thread's 'pmd' to 'per_pmd_key'. */ ovsthread_setspecific(pmd->dp->per_pmd_key, pmd); pmd_thread_setaffinity_cpu(pmd->core_id); + + /* Initialize the cycles counter */ + pmd_cycles_counter_diff(pmd); + reload: emc_cache_init(&pmd->flow_cache); poll_cnt = pmd_load_queues(pmd, &poll_list, poll_cnt); @@ -2400,6 +2463,7 @@ reload: * reloading the updated configuration. */ dp_netdev_pmd_reload_done(pmd); + pmd_count_previous_cycles(pmd, PMD_CYCLES_OTHER); for (;;) { int i; @@ -2410,6 +2474,8 @@ reload: if (lc++ > 1024) { unsigned int seq; + pmd_count_previous_cycles(pmd, PMD_CYCLES_POLLING); + lc = 0; emc_cache_slow_sweep(&pmd->flow_cache); @@ -2420,6 +2486,7 @@ reload: port_seq = seq; break; } + } } @@ -2560,10 +2627,11 @@ dp_netdev_configure_pmd(struct dp_netdev_pmd_thread *pmd, struct dp_netdev *dp, ovs_mutex_init(&pmd->flow_mutex); dpcls_init(&pmd->cls); cmap_init(&pmd->flow_table); - /* init the 'flow_cache' since there is no + /* init the 'flow_cache' and cycles counter since there is no * actual thread created for NON_PMD_CORE_ID. */ if (core_id == NON_PMD_CORE_ID) { emc_cache_init(&pmd->flow_cache); + pmd_cycles_counter_diff(pmd); } cmap_insert(&dp->poll_threads, CONST_CAST(struct cmap_node *, &pmd->node), hash_int(core_id, 0)); -- 2.1.4 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev