One suggestion below, otherwise looks right to me,

Acked-by: Jarno Rajahalme <jrajaha...@nicira.com>

On Aug 5, 2014, at 3:25 PM, Pravin B Shelar <pshe...@nicira.com> wrote:

> In case hash collision on mask cache, OVS does extra flow lookup.
> Following patch avoid it.
> 
> Suggested-by: Jarno Rajahalme <jrajaha...@nicira.com>
> Signed-off-by: Pravin B Shelar <pshe...@nicira.com>
> ---
> datapath/flow_table.c |   48 +++++++++++++++++++++++-------------------------
> 1 files changed, 23 insertions(+), 25 deletions(-)
> 
> diff --git a/datapath/flow_table.c b/datapath/flow_table.c
> index cfd5a84..2d33fe7 100644
> --- a/datapath/flow_table.c
> +++ b/datapath/flow_table.c
> @@ -555,6 +555,9 @@ static struct sw_flow *masked_flow_lookup(struct 
> table_instance *ti,
>       return NULL;
> }
> 
> +/* Flow lookup does full lookup on flow table. It starts with
> + * mask from index passed in *index.
> + */
> static struct sw_flow *flow_lookup(struct flow_table *tbl,
>                                  struct table_instance *ti,
>                                  struct mask_array *ma,
> @@ -562,15 +565,27 @@ static struct sw_flow *flow_lookup(struct flow_table 
> *tbl,
>                                  u32 *n_mask_hit,
>                                  u32 *index)
> {
> +     struct sw_flow_mask *mask;
>       struct sw_flow *flow;
>       int i;
> 
> -     for (i = 0; i < ma->max; i++) {
> -             struct sw_flow_mask *mask;
> +     if (likely(*index < ma->max)) {
> +             mask = rcu_dereference_ovsl(ma->masks[*index]);
> +             if (mask) {
> +                     flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
> +                     if (flow)
> +                             return flow;
> +             }
> +     }
> +
> +     for (i = 0; i < ma->max; i++)  {
> +
> +             if (i == *index)
> +                     continue;
> 
>               mask = rcu_dereference_ovsl(ma->masks[i]);
>               if (!mask)
> -                     break;
> +                     return NULL;
> 
>               flow = masked_flow_lookup(ti, key, mask, n_mask_hit);
>               if (flow) { /* Found */
> @@ -603,7 +618,7 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
> flow_table *tbl,
> 
>       *n_mask_hit = 0;
>       if (unlikely(!skb_hash)) {
> -             u32 __always_unused mask_index;
> +             u32 mask_index = 0;
> 
>               return flow_lookup(tbl, ti, ma, key, n_mask_hit, &mask_index);
>       }
> @@ -617,26 +632,9 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
> flow_table *tbl,
>               struct mask_cache_entry *e;
> 
>               e = &entries[index];
> -             if (e->skb_hash == skb_hash) {
> -                     struct sw_flow_mask *cache;
> -                     int i = e->mask_index;
> -
> -                     if (likely(i < ma->max)) {
> -                             cache = rcu_dereference(ma->masks[i]);
> -                             if (cache) {
> -                                     flow = masked_flow_lookup(ti, key,
> -                                                     cache, n_mask_hit);
> -                                     if (flow)
> -                                             return flow;
> -                             }
> -                     }
> -
> -                     /* Cache miss. This is the best cache
> -                      * replacement candidate.  */
> -                     e->skb_hash = 0;

As discussed offline, this behavior is lost in this patch, maybe set the 
sib_hash to zero if the flow_lookup (added below) fails?

> -                     ce = e;
> -                     break;
> -             }
> +             if (e->skb_hash == skb_hash)
> +                     return flow_lookup(tbl, ti, ma, key, n_mask_hit,
> +                                        &e->mask_index);
> 
>               if (!ce || e->skb_hash < ce->skb_hash)
>                       ce = e;  /* A better replacement cache candidate. */
> @@ -658,7 +656,7 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table 
> *tbl,
>       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
>       struct mask_array *ma = rcu_dereference_ovsl(tbl->mask_array);
>       u32 __always_unused n_mask_hit;
> -     u32 __always_unused index;
> +     u32 index = 0;
> 
>       return flow_lookup(tbl, ti, ma, key, &n_mask_hit, &index);
> }
> -- 
> 1.7.1
> 
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to