From: Pravin Shelar <pshe...@nicira.com>

On every packet OVS needs to lookup flow-table with every mask.
the packet flow-key is first masked with mask in the list and
then the masked key is looked up in flow-table.  Therefore number
of masks can affect packet processing performance.

Following patch adds mask pointer to mask cache from last
pakcet lookup in same flow.  Index of mask is stored in
this cache. This cache is indexed by 5 tuple hash (skb rxhash).

Signed-off-by: Pravin B Shelar <pshe...@nicira.com>
---
 datapath/datapath.c   |    3 +-
 datapath/flow_table.c |   80 ++++++++++++++++++++++++++++++++++++++++++++-----
 datapath/flow_table.h |   11 +++++--
 3 files changed, 83 insertions(+), 11 deletions(-)

diff --git a/datapath/datapath.c b/datapath/datapath.c
index f4e415e..c5059e1 100644
--- a/datapath/datapath.c
+++ b/datapath/datapath.c
@@ -262,7 +262,8 @@ void ovs_dp_process_received_packet(struct vport *p, struct 
sk_buff *skb)
        }
 
        /* Look up flow. */
-       flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
+       flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, skb_get_rxhash(skb),
+                                        &n_mask_hit);
        if (unlikely(!flow)) {
                struct dp_upcall_info upcall;
 
diff --git a/datapath/flow_table.c b/datapath/flow_table.c
index 75c1b82..f12dd81 100644
--- a/datapath/flow_table.c
+++ b/datapath/flow_table.c
@@ -49,6 +49,10 @@
 #define TBL_MIN_BUCKETS                1024
 #define REHASH_INTERVAL                (10 * 60 * HZ)
 
+#define MC_HASH_SHIFT          8
+#define MC_HASH_ENTRIES                (1u << MC_HASH_SHIFT)
+#define MC_HASH_SEGS           ((sizeof(uint32_t) * 8) / MC_HASH_SHIFT)
+
 static struct kmem_cache *flow_cache;
 struct kmem_cache *flow_stats_cache __read_mostly;
 
@@ -211,10 +215,16 @@ int ovs_flow_tbl_init(struct flow_table *table)
 {
        struct table_instance *ti;
 
-       ti = table_instance_alloc(TBL_MIN_BUCKETS);
+       table->mask_cache = __alloc_percpu(sizeof(struct mask_cache_entry) *
+                                         MC_HASH_ENTRIES, __alignof__(struct 
mask_cache_entry));
+       if (!table->mask_cache)
+               return -ENOMEM;
 
-       if (!ti)
+       ti = table_instance_alloc(TBL_MIN_BUCKETS);
+       if (!ti) {
+               free_percpu(table->mask_cache);
                return -ENOMEM;
+       }
 
        rcu_assign_pointer(table->ti, ti);
        INIT_LIST_HEAD(&table->mask_list);
@@ -263,6 +273,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
 {
        struct table_instance *ti = (struct table_instance __force *)table->ti;
 
+       free_percpu(table->mask_cache);
        table_instance_destroy(ti, false);
 }
 
@@ -439,11 +450,12 @@ static struct sw_flow *masked_flow_lookup(struct 
table_instance *ti,
        return NULL;
 }
 
-struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
-                                   const struct sw_flow_key *key,
-                                   u32 *n_mask_hit)
+
+static struct sw_flow *flow_lookup(struct flow_table *tbl,
+                                  struct table_instance *ti,
+                                  const struct sw_flow_key *key,
+                                  u32 *n_mask_hit)
 {
-       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
        struct sw_flow_mask *mask;
        struct sw_flow *flow;
 
@@ -457,12 +469,64 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct 
flow_table *tbl,
        return NULL;
 }
 
+struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *tbl,
+                                         const struct sw_flow_key *key,
+                                         u32 skb_hash,
+                                         u32 *n_mask_hit)
+{
+       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+       struct mask_cache_entry  *entries, *ce, *del;
+       struct sw_flow *flow;
+       u32 hash = skb_hash;
+       int seg;
+
+       del = NULL;
+       entries = this_cpu_ptr(tbl->mask_cache);
+
+       for (seg = 0; seg < MC_HASH_SEGS; seg++) {
+               int index;
+
+               index = hash & (MC_HASH_ENTRIES - 1);
+               ce = &entries[index];
+
+               if (ce->skb_hash == skb_hash) {
+                       struct sw_flow_mask *mask;
+                       int i;
+
+                       i = 0;
+                       list_for_each_entry_rcu(mask, &tbl->mask_list, list) {
+                               if (ce->mask_index == i++) {
+                                       flow = masked_flow_lookup(ti, key, 
mask);
+                                       if (flow)  /* Found */
+                                               return flow;
+
+                                       ce->skb_hash = 0;
+                               }
+                       }
+               }
+
+               if (!del || (del->skb_hash && !ce->skb_hash)) {
+                       del = ce;
+               }
+
+               hash >>= MC_HASH_SHIFT;
+       }
+       flow = flow_lookup(tbl, ti, key, n_mask_hit);
+
+       if (flow) {
+               del->skb_hash = skb_hash;
+               del->mask_index = (*n_mask_hit - 1);
+       }
+       return flow;
+}
+
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
                                    const struct sw_flow_key *key)
 {
+       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
        u32 __always_unused n_mask_hit;
 
-       return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
+       return flow_lookup(tbl, ti, key, &n_mask_hit);
 }
 
 int ovs_flow_tbl_num_masks(const struct flow_table *table)
@@ -563,7 +627,7 @@ static int flow_mask_insert(struct flow_table *tbl, struct 
sw_flow *flow,
                        return -ENOMEM;
                mask->key = new->key;
                mask->range = new->range;
-               list_add_rcu(&mask->list, &tbl->mask_list);
+               list_add_tail_rcu(&mask->list, &tbl->mask_list);
        } else {
                BUG_ON(!mask->ref_count);
                mask->ref_count++;
diff --git a/datapath/flow_table.h b/datapath/flow_table.h
index ddf0c01..281d1da 100644
--- a/datapath/flow_table.h
+++ b/datapath/flow_table.h
@@ -36,6 +36,11 @@
 
 #include "flow.h"
 
+struct mask_cache_entry {
+       u32 skb_hash;
+       u32 mask_index;
+};
+
 struct table_instance {
        struct flex_array *buckets;
        unsigned int n_buckets;
@@ -47,6 +52,7 @@ struct table_instance {
 
 struct flow_table {
        struct table_instance __rcu *ti;
+       struct mask_cache_entry __percpu *mask_cache;
        struct list_head mask_list;
        unsigned long last_rehash;
        unsigned int count;
@@ -72,8 +78,9 @@ int  ovs_flow_tbl_num_masks(const struct flow_table *table);
 struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *table,
                                       u32 *bucket, u32 *idx);
 struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
-                                   const struct sw_flow_key *,
-                                   u32 *n_mask_hit);
+                                         const struct sw_flow_key *,
+                                         u32 skb_hash,
+                                         u32 *n_mask_hit);
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
                                    const struct sw_flow_key *);
 
-- 
1.7.9.5

_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to