Signed-off-by: Ryan Wilson <wr...@nicira.com> --- v2: Fixed bug with group stats all buckets, cleaned up ofgroup unref code, added Andy Zhou's test for more complete test coverage v3: Split group ref/unref, support for group and bucket stats, and Andy Zhou's tests into 3 patches. --- lib/ofp-util.h | 12 ++++---- ofproto/ofproto-dpif-xlate.c | 51 ++++++++++++++++++++++++------- ofproto/ofproto-dpif.c | 68 ++++++++++++++++++++++++++---------------- ofproto/ofproto-dpif.h | 3 ++ ofproto/ofproto-provider.h | 2 +- ofproto/ofproto.c | 2 +- 6 files changed, 96 insertions(+), 42 deletions(-)
diff --git a/lib/ofp-util.h b/lib/ofp-util.h index ce6045b..552b006 100644 --- a/lib/ofp-util.h +++ b/lib/ofp-util.h @@ -1050,6 +1050,11 @@ int ofputil_decode_queue_stats(struct ofputil_queue_stats *qs, struct ofpbuf *ms void ofputil_append_queue_stat(struct list *replies, const struct ofputil_queue_stats *oqs); +struct bucket_counter { + uint64_t packet_count; /* Number of packets processed by bucket. */ + uint64_t byte_count; /* Number of bytes processed by bucket. */ +}; + /* Bucket for use in groups. */ struct ofputil_bucket { struct list list_node; @@ -1062,6 +1067,8 @@ struct ofputil_bucket { * failover groups. */ struct ofpact *ofpacts; /* Series of "struct ofpact"s. */ size_t ofpacts_len; /* Length of ofpacts, in bytes. */ + + struct bucket_counter stats; }; /* Protocol-independent group_mod. */ @@ -1072,11 +1079,6 @@ struct ofputil_group_mod { struct list buckets; /* Contains "struct ofputil_bucket"s. */ }; -struct bucket_counter { - uint64_t packet_count; /* Number of packets processed by bucket. */ - uint64_t byte_count; /* Number of bytes processed by bucket. */ -}; - /* Group stats reply, independent of protocol. */ struct ofputil_group_stats { uint32_t group_id; /* Group identifier. */ diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index f08d1ec..a9672f6 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -235,6 +235,7 @@ enum xc_type { XC_LEARN, XC_NORMAL, XC_FIN_TIMEOUT, + XC_GROUP, }; /* xlate_cache entries hold enough information to perform the side effects of @@ -279,6 +280,10 @@ struct xc_entry { uint16_t idle; uint16_t hard; } fin; + struct { + struct group_dpif *group; + struct ofputil_bucket *bucket; + } group; } u; }; @@ -837,7 +842,7 @@ odp_port_is_alive(const struct xlate_ctx *ctx, ofp_port_t ofp_port) return true; } -static const struct ofputil_bucket * +static struct ofputil_bucket * group_first_live_bucket(const struct xlate_ctx *, const struct group_dpif *, int depth); @@ -862,7 +867,7 @@ group_is_alive(const struct xlate_ctx *ctx, uint32_t group_id, int depth) static bool bucket_is_alive(const struct xlate_ctx *ctx, - const struct ofputil_bucket *bucket, int depth) + struct ofputil_bucket *bucket, int depth) { if (depth >= MAX_LIVENESS_RECURSION) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); @@ -879,7 +884,7 @@ bucket_is_alive(const struct xlate_ctx *ctx, group_is_alive(ctx, bucket->watch_group, depth + 1)); } -static const struct ofputil_bucket * +static struct ofputil_bucket * group_first_live_bucket(const struct xlate_ctx *ctx, const struct group_dpif *group, int depth) { @@ -896,16 +901,16 @@ group_first_live_bucket(const struct xlate_ctx *ctx, return NULL; } -static const struct ofputil_bucket * +static struct ofputil_bucket * group_best_live_bucket(const struct xlate_ctx *ctx, const struct group_dpif *group, uint32_t basis) { - const struct ofputil_bucket *best_bucket = NULL; + struct ofputil_bucket *best_bucket = NULL; uint32_t best_score = 0; int i = 0; - const struct ofputil_bucket *bucket; + struct ofputil_bucket *bucket; const struct list *buckets; group_dpif_get_buckets(group, &buckets); @@ -2112,7 +2117,23 @@ match: } static void -xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket) +xlate_group_stats(struct xlate_ctx *ctx, struct group_dpif *group, + struct ofputil_bucket *bucket) +{ + if (ctx->xin->resubmit_stats) { + group_dpif_credit_stats(group, bucket, ctx->xin->resubmit_stats); + } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_GROUP); + entry->u.group.group = group_dpif_ref(group); + entry->u.group.bucket = bucket; + } +} + +static void +xlate_group_bucket(struct xlate_ctx *ctx, struct ofputil_bucket *bucket) { uint64_t action_list_stub[1024 / 8]; struct ofpbuf action_list, action_set; @@ -2132,7 +2153,7 @@ xlate_group_bucket(struct xlate_ctx *ctx, const struct ofputil_bucket *bucket) static void xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group) { - const struct ofputil_bucket *bucket; + struct ofputil_bucket *bucket; const struct list *buckets; struct flow old_flow = ctx->xin->flow; @@ -2148,16 +2169,18 @@ xlate_all_group(struct xlate_ctx *ctx, struct group_dpif *group) * just before applying the all or indirect group. */ ctx->xin->flow = old_flow; } + xlate_group_stats(ctx, group, NULL); } static void xlate_ff_group(struct xlate_ctx *ctx, struct group_dpif *group) { - const struct ofputil_bucket *bucket; + struct ofputil_bucket *bucket; bucket = group_first_live_bucket(ctx, group, 0); if (bucket) { xlate_group_bucket(ctx, bucket); + xlate_group_stats(ctx, group, bucket); } } @@ -2165,7 +2188,7 @@ static void xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group) { struct flow_wildcards *wc = &ctx->xout->wc; - const struct ofputil_bucket *bucket; + struct ofputil_bucket *bucket; uint32_t basis; basis = hash_mac(ctx->xin->flow.dl_dst, 0, 0); @@ -2173,6 +2196,7 @@ xlate_select_group(struct xlate_ctx *ctx, struct group_dpif *group) if (bucket) { memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); xlate_group_bucket(ctx, bucket); + xlate_group_stats(ctx, group, bucket); } } @@ -3595,6 +3619,10 @@ xlate_push_stats(struct xlate_cache *xcache, bool may_learn, xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags, entry->u.fin.idle, entry->u.fin.hard); break; + case XC_GROUP: + group_dpif_credit_stats(entry->u.group.group, entry->u.group.bucket, + stats); + break; default: OVS_NOT_REACHED(); } @@ -3663,6 +3691,9 @@ xlate_cache_clear(struct xlate_cache *xcache) /* 'u.fin.rule' is always already held as a XC_RULE, which * has already released it's reference above. */ break; + case XC_GROUP: + group_dpif_unref(entry->u.group.group); + break; default: OVS_NOT_REACHED(); } diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index 8674881..fab3661 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -107,7 +107,6 @@ struct group_dpif { struct ovs_mutex stats_mutex; uint64_t packet_count OVS_GUARDED; /* Number of packets received. */ uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */ - struct bucket_counter *bucket_stats OVS_GUARDED; /* Bucket statistics. */ }; struct ofbundle { @@ -3541,17 +3540,42 @@ static void group_construct_stats(struct group_dpif *group) OVS_REQUIRES(group->stats_mutex) { + struct ofputil_bucket *bucket; + const struct list *buckets; + group->packet_count = 0; group->byte_count = 0; - if (!group->bucket_stats) { - group->bucket_stats = xcalloc(group->up.n_buckets, - sizeof *group->bucket_stats); - } else { - memset(group->bucket_stats, 0, group->up.n_buckets * - sizeof *group->bucket_stats); + + group_dpif_get_buckets(group, &buckets); + LIST_FOR_EACH (bucket, list_node, buckets) { + bucket->stats.packet_count = 0; + bucket->stats.byte_count = 0; } } +void +group_dpif_credit_stats(struct group_dpif *group, + struct ofputil_bucket *bucket, + const struct dpif_flow_stats *stats) +{ + ovs_mutex_lock(&group->stats_mutex); + group->packet_count += stats->n_packets; + group->byte_count += stats->n_bytes; + if (bucket) { + bucket->stats.packet_count += stats->n_packets; + bucket->stats.byte_count += stats->n_bytes; + } else { /* Credit to all buckets */ + const struct list *buckets; + + group_dpif_get_buckets(group, &buckets); + LIST_FOR_EACH (bucket, list_node, buckets) { + bucket->stats.packet_count += stats->n_packets; + bucket->stats.byte_count += stats->n_bytes; + } + } + ovs_mutex_unlock(&group->stats_mutex); +} + static enum ofperr group_construct(struct ofgroup *group_) { @@ -3578,34 +3602,19 @@ group_construct(struct ofgroup *group_) } static void -group_destruct__(struct group_dpif *group) - OVS_REQUIRES(group->stats_mutex) -{ - free(group->bucket_stats); - group->bucket_stats = NULL; -} - -static void group_destruct(struct ofgroup *group_) { struct group_dpif *group = group_dpif_cast(group_); - ovs_mutex_lock(&group->stats_mutex); - group_destruct__(group); - ovs_mutex_unlock(&group->stats_mutex); ovs_mutex_destroy(&group->stats_mutex); } static enum ofperr -group_modify(struct ofgroup *group_, struct ofgroup *victim_) +group_modify(struct ofgroup *group_) { struct ofproto_dpif *ofproto = ofproto_dpif_cast(group_->ofproto); struct group_dpif *group = group_dpif_cast(group_); - struct group_dpif *victim = group_dpif_cast(victim_); ovs_mutex_lock(&group->stats_mutex); - if (victim->up.n_buckets < group->up.n_buckets) { - group_destruct__(group); - } group_construct_stats(group); ovs_mutex_unlock(&group->stats_mutex); @@ -3618,12 +3627,21 @@ static enum ofperr group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs) { struct group_dpif *group = group_dpif_cast(group_); + struct ofputil_bucket *bucket; + const struct list *buckets; + struct bucket_counter *bucket_stats; ovs_mutex_lock(&group->stats_mutex); ogs->packet_count = group->packet_count; ogs->byte_count = group->byte_count; - memcpy(ogs->bucket_stats, group->bucket_stats, - group->up.n_buckets * sizeof *group->bucket_stats); + + group_dpif_get_buckets(group, &buckets); + bucket_stats = ogs->bucket_stats; + LIST_FOR_EACH (bucket, list_node, buckets) { + bucket_stats->packet_count = bucket->stats.packet_count; + bucket_stats->byte_count = bucket->stats.byte_count; + bucket_stats++; + } ovs_mutex_unlock(&group->stats_mutex); return 0; diff --git a/ofproto/ofproto-dpif.h b/ofproto/ofproto-dpif.h index 345117a..3380dd1 100644 --- a/ofproto/ofproto-dpif.h +++ b/ofproto/ofproto-dpif.h @@ -125,6 +125,9 @@ void choose_miss_rule(enum ofputil_port_config, struct rule_dpif *no_packet_in_rule, struct rule_dpif **rule, bool take_ref); +void group_dpif_credit_stats(struct group_dpif *, + struct ofputil_bucket *, + const struct dpif_flow_stats *); bool group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id, struct group_dpif **group); diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h index 209f6f9..74a1a3f 100644 --- a/ofproto/ofproto-provider.h +++ b/ofproto/ofproto-provider.h @@ -1692,7 +1692,7 @@ struct ofproto_class { void (*group_destruct)(struct ofgroup *); void (*group_dealloc)(struct ofgroup *); - enum ofperr (*group_modify)(struct ofgroup *, struct ofgroup *victim); + enum ofperr (*group_modify)(struct ofgroup *); enum ofperr (*group_get_stats)(const struct ofgroup *, struct ofputil_group_stats *); diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index b5a59f8..39d9b8c 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -5737,7 +5737,7 @@ modify_group(struct ofproto *ofproto, struct ofputil_group_mod *gm) list_move(&ofgroup->buckets, &gm->buckets); ofgroup->n_buckets = list_size(&ofgroup->buckets); - error = ofproto->ofproto_class->group_modify(ofgroup, victim); + error = ofproto->ofproto_class->group_modify(ofgroup); if (!error) { ofputil_bucket_list_destroy(&victim->buckets); ofproto->n_groups[victim->type]--; -- 1.7.9.5 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev