With that change,
Acked-by: Ben Pfaff <[email protected]>
On Thu, Jul 03, 2014 at 02:46:20PM +1200, Joe Stringer wrote:
> The handle_missed_revalidation() call is inverse from what I had intended:
>
> diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
> index 9c2a8a4..70f9a43 100644
> --- a/ofproto/ofproto-dpif-upcall.c
> +++ b/ofproto/ofproto-dpif-upcall.c
> @@ -1583,7 +1583,7 @@ revalidator_sweep__(struct revalidator *revalidator,
> bool purge)
> if (purge
> || (missed_flow
> && revalidator->udpif->need_revalidate
> - && handle_missed_revalidation(revalidator, ukey))) {
> + && !handle_missed_revalidation(revalidator, ukey))) {
> struct dump_op *op = &ops[n_ops++];
>
> dump_op_init(op, ukey->key, ukey->key_len, ukey);
>
>
> On 3 July 2014 12:29, Joe Stringer <[email protected]> wrote:
>
> > If the datapath doesn't dump a flow for some reason, and the current
> > dump is expected to revalidate all flows in the datapath, then perform
> > revalidation for those flows by fetching them during the sweep phase.
> > If revalidation is not required, then leave the flow in the datapath and
> > don't revalidate it.
> >
> > Signed-off-by: Joe Stringer <[email protected]>
> > ---
> > v2: Make the revalidator_sweep__() logic easier to read.
> > Rebase.
> > RFC: First post.
> > ---
> > ofproto/ofproto-dpif-upcall.c | 55
> > +++++++++++++++++++++++++++++++----------
> > 1 file changed, 42 insertions(+), 13 deletions(-)
> >
> > diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
> > index 74d9686..9c2a8a4 100644
> > --- a/ofproto/ofproto-dpif-upcall.c
> > +++ b/ofproto/ofproto-dpif-upcall.c
> > @@ -46,6 +46,7 @@
> > VLOG_DEFINE_THIS_MODULE(ofproto_dpif_upcall);
> >
> > COVERAGE_DEFINE(upcall_duplicate_flow);
> > +COVERAGE_DEFINE(revalidate_missed_dp_flow);
> >
> > /* A thread that reads upcalls from dpif, forwards each upcall's packet,
> > * and possibly sets up a kernel flow as a cache. */
> > @@ -1537,6 +1538,31 @@ revalidate(struct revalidator *revalidator)
> > dpif_flow_dump_state_uninit(udpif->dpif, state);
> > }
> >
> > +/* Called with exclusive access to 'revalidator' and 'ukey'. */
> > +static bool
> > +handle_missed_revalidation(struct revalidator *revalidator,
> > + struct udpif_key *ukey)
> > + OVS_NO_THREAD_SAFETY_ANALYSIS
> > +{
> > + struct udpif *udpif = revalidator->udpif;
> > + struct nlattr *mask, *actions;
> > + size_t mask_len, actions_len;
> > + struct dpif_flow_stats stats;
> > + struct ofpbuf *buf;
> > + bool keep = false;
> > +
> > + COVERAGE_INC(revalidate_missed_dp_flow);
> > +
> > + if (!dpif_flow_get(udpif->dpif, ukey->key, ukey->key_len, &buf,
> > + &mask, &mask_len, &actions, &actions_len, &stats))
> > {
> > + keep = revalidate_ukey(udpif, ukey, mask, mask_len, actions,
> > + actions_len, &stats);
> > + ofpbuf_delete(buf);
> > + }
> > +
> > + return keep;
> > +}
> > +
> > static void
> > revalidator_sweep__(struct revalidator *revalidator, bool purge)
> > OVS_NO_THREAD_SAFETY_ANALYSIS
> > @@ -1550,21 +1576,24 @@ revalidator_sweep__(struct revalidator
> > *revalidator, bool purge)
> > /* During garbage collection, this revalidator completely owns its
> > ukeys
> > * map, and therefore doesn't need to do any locking. */
> > HMAP_FOR_EACH_SAFE (ukey, next, hmap_node, revalidator->ukeys) {
> > - if (!purge && ukey->mark) {
> > + if (ukey->flow_exists) {
> > + bool missed_flow = !ukey->mark;
> > +
> > ukey->mark = false;
> > - } else if (!ukey->flow_exists) {
> > - ukey_delete(revalidator, ukey);
> > - } else {
> > - struct dump_op *op = &ops[n_ops++];
> > -
> > - /* If we have previously seen a flow in the datapath, but
> > didn't
> > - * see it during the most recent dump, delete it. This allows
> > us
> > - * to clean up the ukey and keep the statistics consistent. */
> > - dump_op_init(op, ukey->key, ukey->key_len, ukey);
> > - if (n_ops == REVALIDATE_MAX_BATCH) {
> > - push_dump_ops(revalidator, ops, n_ops);
> > - n_ops = 0;
> > + if (purge
> > + || (missed_flow
> > + && revalidator->udpif->need_revalidate
> > + && handle_missed_revalidation(revalidator, ukey))) {
> > + struct dump_op *op = &ops[n_ops++];
> > +
> > + dump_op_init(op, ukey->key, ukey->key_len, ukey);
> > + if (n_ops == REVALIDATE_MAX_BATCH) {
> > + push_dump_ops(revalidator, ops, n_ops);
> > + n_ops = 0;
> > + }
> > }
> > + } else {
> > + ukey_delete(revalidator, ukey);
> > }
> > }
> >
> > --
> > 1.7.10.4
> >
> > _______________________________________________
> > dev mailing list
> > [email protected]
> > http://openvswitch.org/mailman/listinfo/dev
> >
> _______________________________________________
> dev mailing list
> [email protected]
> http://openvswitch.org/mailman/listinfo/dev
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev