Acked-by: Ethan Jackson <et...@nicira.com>

On Mon, Jan 13, 2014 at 11:25 AM, Ben Pfaff <b...@nicira.com> wrote:
> Jarno Rajahalme reported up to 40% performance gain on netperf TCP_CRR with
> an earlier version of this patch in combination with a kernel NUMA patch,
> together with a reduction in variance:
>     http://openvswitch.org/pipermail/dev/2014-January/035867.html
>
> Signed-off-by: Ben Pfaff <b...@nicira.com>
> ---
>  lib/classifier.c        |    4 +--
>  lib/classifier.h        |    3 +-
>  lib/dpif-netdev.c       |   28 +++++++++---------
>  ofproto/ofproto-dpif.c  |    8 +++---
>  ofproto/ofproto.c       |   72 
> +++++++++++++++++++++++------------------------
>  tests/test-classifier.c |   28 +++++++++---------
>  utilities/ovs-ofctl.c   |   16 +++++------
>  7 files changed, 80 insertions(+), 79 deletions(-)
>
> diff --git a/lib/classifier.c b/lib/classifier.c
> index 1675283..30a91b7 100644
> --- a/lib/classifier.c
> +++ b/lib/classifier.c
> @@ -176,7 +176,7 @@ classifier_init(struct classifier *cls, const uint8_t 
> *flow_segments)
>      hmap_init(&cls->subtables);
>      list_init(&cls->subtables_priority);
>      hmap_init(&cls->partitions);
> -    ovs_rwlock_init(&cls->rwlock);
> +    fat_rwlock_init(&cls->rwlock);
>      cls->n_flow_segments = 0;
>      if (flow_segments) {
>          while (cls->n_flow_segments < CLS_MAX_INDICES
> @@ -213,7 +213,7 @@ classifier_destroy(struct classifier *cls)
>              free(partition);
>          }
>          hmap_destroy(&cls->partitions);
> -        ovs_rwlock_destroy(&cls->rwlock);
> +        fat_rwlock_destroy(&cls->rwlock);
>      }
>  }
>
> diff --git a/lib/classifier.h b/lib/classifier.h
> index b6b89a0..c3c1c3b 100644
> --- a/lib/classifier.h
> +++ b/lib/classifier.h
> @@ -213,6 +213,7 @@
>   * The classifier may safely be accessed by many reader threads concurrently 
> or
>   * by a single writer. */
>
> +#include "fat-rwlock.h"
>  #include "flow.h"
>  #include "hindex.h"
>  #include "hmap.h"
> @@ -254,7 +255,7 @@ struct classifier {
>      struct list subtables_priority; /* Subtables in descending priority 
> order.
>                                       */
>      struct hmap partitions;     /* Contains "struct cls_partition"s. */
> -    struct ovs_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
> +    struct fat_rwlock rwlock OVS_ACQ_AFTER(ofproto_mutex);
>      struct cls_trie tries[CLS_MAX_TRIES]; /* Prefix tries. */
>      unsigned int n_tries;
>  };
> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
> index fdea0a7..cb64bdc 100644
> --- a/lib/dpif-netdev.c
> +++ b/lib/dpif-netdev.c
> @@ -608,9 +608,9 @@ dpif_netdev_get_stats(const struct dpif *dpif, struct 
> dpif_dp_stats *stats)
>  {
>      struct dp_netdev *dp = get_dp_netdev(dpif);
>
> -    ovs_rwlock_rdlock(&dp->cls.rwlock);
> +    fat_rwlock_rdlock(&dp->cls.rwlock);
>      stats->n_flows = hmap_count(&dp->flow_table);
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>
>      stats->n_hit = ovsthread_counter_read(dp->n_hit);
>      stats->n_missed = ovsthread_counter_read(dp->n_missed);
> @@ -883,11 +883,11 @@ dp_netdev_flow_flush(struct dp_netdev *dp)
>      struct dp_netdev_flow *netdev_flow, *next;
>
>      ovs_mutex_lock(&dp->flow_mutex);
> -    ovs_rwlock_wrlock(&dp->cls.rwlock);
> +    fat_rwlock_wrlock(&dp->cls.rwlock);
>      HMAP_FOR_EACH_SAFE (netdev_flow, next, node, &dp->flow_table) {
>          dp_netdev_remove_flow(dp, netdev_flow);
>      }
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>      ovs_mutex_unlock(&dp->flow_mutex);
>  }
>
> @@ -991,10 +991,10 @@ dp_netdev_lookup_flow(const struct dp_netdev *dp, const 
> struct flow *flow)
>  {
>      struct dp_netdev_flow *netdev_flow;
>
> -    ovs_rwlock_rdlock(&dp->cls.rwlock);
> +    fat_rwlock_rdlock(&dp->cls.rwlock);
>      netdev_flow = dp_netdev_flow_cast(classifier_lookup(&dp->cls, flow, 
> NULL));
>      dp_netdev_flow_ref(netdev_flow);
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>
>      return netdev_flow;
>  }
> @@ -1123,9 +1123,9 @@ dpif_netdev_flow_get(const struct dpif *dpif,
>          return error;
>      }
>
> -    ovs_rwlock_rdlock(&dp->cls.rwlock);
> +    fat_rwlock_rdlock(&dp->cls.rwlock);
>      netdev_flow = dp_netdev_find_flow(dp, &key);
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>
>      if (netdev_flow) {
>          struct dp_netdev_actions *actions = NULL;
> @@ -1174,13 +1174,13 @@ dp_netdev_flow_add(struct dp_netdev *dp, const struct 
> flow *flow,
>      match_init(&match, flow, wc);
>      cls_rule_init(CONST_CAST(struct cls_rule *, &netdev_flow->cr),
>                    &match, NETDEV_RULE_PRIORITY);
> -    ovs_rwlock_wrlock(&dp->cls.rwlock);
> +    fat_rwlock_wrlock(&dp->cls.rwlock);
>      classifier_insert(&dp->cls,
>                        CONST_CAST(struct cls_rule *, &netdev_flow->cr));
>      hmap_insert(&dp->flow_table,
>                  CONST_CAST(struct hmap_node *, &netdev_flow->node),
>                  flow_hash(flow, 0));
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>
>      ovs_mutex_unlock(&netdev_flow->mutex);
>
> @@ -1281,7 +1281,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct 
> dpif_flow_del *del)
>      }
>
>      ovs_mutex_lock(&dp->flow_mutex);
> -    ovs_rwlock_wrlock(&dp->cls.rwlock);
> +    fat_rwlock_wrlock(&dp->cls.rwlock);
>      netdev_flow = dp_netdev_find_flow(dp, &key);
>      if (netdev_flow) {
>          if (del->stats) {
> @@ -1293,7 +1293,7 @@ dpif_netdev_flow_del(struct dpif *dpif, const struct 
> dpif_flow_del *del)
>      } else {
>          error = ENOENT;
>      }
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>      ovs_mutex_unlock(&dp->flow_mutex);
>
>      return error;
> @@ -1332,13 +1332,13 @@ dpif_netdev_flow_dump_next(const struct dpif *dpif, 
> void *state_,
>      struct dp_netdev_flow *netdev_flow;
>      struct hmap_node *node;
>
> -    ovs_rwlock_rdlock(&dp->cls.rwlock);
> +    fat_rwlock_rdlock(&dp->cls.rwlock);
>      node = hmap_at_position(&dp->flow_table, &state->bucket, &state->offset);
>      if (node) {
>          netdev_flow = CONTAINER_OF(node, struct dp_netdev_flow, node);
>          dp_netdev_flow_ref(netdev_flow);
>      }
> -    ovs_rwlock_unlock(&dp->cls.rwlock);
> +    fat_rwlock_unlock(&dp->cls.rwlock);
>      if (!node) {
>          return EOF;
>      }
> diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
> index 62c1e4c..f67de8e 100644
> --- a/ofproto/ofproto-dpif.c
> +++ b/ofproto/ofproto-dpif.c
> @@ -1129,9 +1129,9 @@ destruct(struct ofproto *ofproto_)
>      OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
>          struct cls_cursor cursor;
>
> -        ovs_rwlock_rdlock(&table->cls.rwlock);
> +        fat_rwlock_rdlock(&table->cls.rwlock);
>          cls_cursor_init(&cursor, &table->cls, NULL);
> -        ovs_rwlock_unlock(&table->cls.rwlock);
> +        fat_rwlock_unlock(&table->cls.rwlock);
>          CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
>              ofproto_rule_delete(&ofproto->up, &rule->up);
>          }
> @@ -3012,7 +3012,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
>      }
>
>      cls = &ofproto->up.tables[table_id].cls;
> -    ovs_rwlock_rdlock(&cls->rwlock);
> +    fat_rwlock_rdlock(&cls->rwlock);
>      frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
>      if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
>          /* We must pretend that transport ports are unavailable. */
> @@ -3029,7 +3029,7 @@ rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
>
>      *rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
>      rule_dpif_ref(*rule);
> -    ovs_rwlock_unlock(&cls->rwlock);
> +    fat_rwlock_unlock(&cls->rwlock);
>
>      return *rule != NULL;
>  }
> diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c
> index fbcd306..b2cdb10 100644
> --- a/ofproto/ofproto.c
> +++ b/ofproto/ofproto.c
> @@ -1168,7 +1168,7 @@ ofproto_configure_table(struct ofproto *ofproto, int 
> table_id,
>      }
>
>      table->max_flows = s->max_flows;
> -    ovs_rwlock_wrlock(&table->cls.rwlock);
> +    fat_rwlock_wrlock(&table->cls.rwlock);
>      if (classifier_count(&table->cls) > table->max_flows
>          && table->eviction_fields) {
>          /* 'table' contains more flows than allowed.  We might not be able to
> @@ -1188,7 +1188,7 @@ ofproto_configure_table(struct ofproto *ofproto, int 
> table_id,
>      classifier_set_prefix_fields(&table->cls,
>                                   s->prefix_fields, s->n_prefix_fields);
>
> -    ovs_rwlock_unlock(&table->cls.rwlock);
> +    fat_rwlock_unlock(&table->cls.rwlock);
>  }
>
>  bool
> @@ -1263,9 +1263,9 @@ ofproto_flush__(struct ofproto *ofproto)
>              continue;
>          }
>
> -        ovs_rwlock_rdlock(&table->cls.rwlock);
> +        fat_rwlock_rdlock(&table->cls.rwlock);
>          cls_cursor_init(&cursor, &table->cls, NULL);
> -        ovs_rwlock_unlock(&table->cls.rwlock);
> +        fat_rwlock_unlock(&table->cls.rwlock);
>          CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) {
>              if (!rule->pending) {
>                  ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE);
> @@ -1454,7 +1454,7 @@ ofproto_run(struct ofproto *p)
>                  heap_rebuild(&evg->rules);
>              }
>
> -            ovs_rwlock_rdlock(&table->cls.rwlock);
> +            fat_rwlock_rdlock(&table->cls.rwlock);
>              cls_cursor_init(&cursor, &table->cls, NULL);
>              CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
>                  if (!rule->eviction_group
> @@ -1462,7 +1462,7 @@ ofproto_run(struct ofproto *p)
>                      eviction_group_add_rule(rule);
>                  }
>              }
> -            ovs_rwlock_unlock(&table->cls.rwlock);
> +            fat_rwlock_unlock(&table->cls.rwlock);
>              ovs_mutex_unlock(&ofproto_mutex);
>          }
>      }
> @@ -1612,9 +1612,9 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, 
> struct simap *usage)
>
>      n_rules = 0;
>      OFPROTO_FOR_EACH_TABLE (table, ofproto) {
> -        ovs_rwlock_rdlock(&table->cls.rwlock);
> +        fat_rwlock_rdlock(&table->cls.rwlock);
>          n_rules += classifier_count(&table->cls);
> -        ovs_rwlock_unlock(&table->cls.rwlock);
> +        fat_rwlock_unlock(&table->cls.rwlock);
>      }
>      simap_increase(usage, "rules", n_rules);
>
> @@ -1901,7 +1901,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct 
> match *match,
>
>      /* First do a cheap check whether the rule we're looking for already 
> exists
>       * with the actions that we want.  If it does, then we're done. */
> -    ovs_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
> +    fat_rwlock_rdlock(&ofproto->tables[0].cls.rwlock);
>      rule = rule_from_cls_rule(classifier_find_match_exactly(
>                                    &ofproto->tables[0].cls, match, priority));
>      if (rule) {
> @@ -1913,7 +1913,7 @@ ofproto_add_flow(struct ofproto *ofproto, const struct 
> match *match,
>      } else {
>          must_add = true;
>      }
> -    ovs_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
> +    fat_rwlock_unlock(&ofproto->tables[0].cls.rwlock);
>
>      /* If there's no such rule or the rule doesn't have the actions we want,
>       * fall back to a executing a full flow mod.  We can't optimize this at
> @@ -1952,10 +1952,10 @@ ofproto_delete_flow(struct ofproto *ofproto,
>
>      /* First do a cheap check whether the rule we're looking for has already
>       * been deleted.  If so, then we're done. */
> -    ovs_rwlock_rdlock(&cls->rwlock);
> +    fat_rwlock_rdlock(&cls->rwlock);
>      rule = rule_from_cls_rule(classifier_find_match_exactly(cls, target,
>                                                              priority));
> -    ovs_rwlock_unlock(&cls->rwlock);
> +    fat_rwlock_unlock(&cls->rwlock);
>      if (!rule) {
>          return true;
>      }
> @@ -3060,9 +3060,9 @@ handle_table_stats_request(struct ofconn *ofconn,
>          ots[i].instructions = htonl(OFPIT11_ALL);
>          ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK);
>          ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */
> -        ovs_rwlock_rdlock(&p->tables[i].cls.rwlock);
> +        fat_rwlock_rdlock(&p->tables[i].cls.rwlock);
>          ots[i].active_count = htonl(classifier_count(&p->tables[i].cls));
> -        ovs_rwlock_unlock(&p->tables[i].cls.rwlock);
> +        fat_rwlock_unlock(&p->tables[i].cls.rwlock);
>      }
>
>      p->ofproto_class->get_tables(p, ots);
> @@ -3424,7 +3424,7 @@ collect_rules_loose(struct ofproto *ofproto,
>              struct cls_cursor cursor;
>              struct rule *rule;
>
> -            ovs_rwlock_rdlock(&table->cls.rwlock);
> +            fat_rwlock_rdlock(&table->cls.rwlock);
>              cls_cursor_init(&cursor, &table->cls, &criteria->cr);
>              CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
>                  error = collect_rule(rule, criteria, rules);
> @@ -3432,7 +3432,7 @@ collect_rules_loose(struct ofproto *ofproto,
>                      break;
>                  }
>              }
> -            ovs_rwlock_unlock(&table->cls.rwlock);
> +            fat_rwlock_unlock(&table->cls.rwlock);
>          }
>      }
>
> @@ -3484,10 +3484,10 @@ collect_rules_strict(struct ofproto *ofproto,
>          FOR_EACH_MATCHING_TABLE (table, criteria->table_id, ofproto) {
>              struct rule *rule;
>
> -            ovs_rwlock_rdlock(&table->cls.rwlock);
> +            fat_rwlock_rdlock(&table->cls.rwlock);
>              rule = rule_from_cls_rule(classifier_find_rule_exactly(
>                                            &table->cls, &criteria->cr));
> -            ovs_rwlock_unlock(&table->cls.rwlock);
> +            fat_rwlock_unlock(&table->cls.rwlock);
>              if (rule) {
>                  error = collect_rule(rule, criteria, rules);
>                  if (error) {
> @@ -3635,12 +3635,12 @@ ofproto_get_all_flows(struct ofproto *p, struct ds 
> *results)
>          struct cls_cursor cursor;
>          struct rule *rule;
>
> -        ovs_rwlock_rdlock(&table->cls.rwlock);
> +        fat_rwlock_rdlock(&table->cls.rwlock);
>          cls_cursor_init(&cursor, &table->cls, NULL);
>          CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
>              flow_stats_ds(rule, results);
>          }
> -        ovs_rwlock_unlock(&table->cls.rwlock);
> +        fat_rwlock_unlock(&table->cls.rwlock);
>      }
>  }
>
> @@ -3951,9 +3951,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
>      cls_rule_init(&cr, &fm->match, fm->priority);
>
>      /* Transform "add" into "modify" if there's an existing identical flow. 
> */
> -    ovs_rwlock_rdlock(&table->cls.rwlock);
> +    fat_rwlock_rdlock(&table->cls.rwlock);
>      rule = rule_from_cls_rule(classifier_find_rule_exactly(&table->cls, 
> &cr));
> -    ovs_rwlock_unlock(&table->cls.rwlock);
> +    fat_rwlock_unlock(&table->cls.rwlock);
>      if (rule) {
>          cls_rule_destroy(&cr);
>          if (!rule_is_modifiable(rule)) {
> @@ -3983,9 +3983,9 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
>      if (fm->flags & OFPUTIL_FF_CHECK_OVERLAP) {
>          bool overlaps;
>
> -        ovs_rwlock_rdlock(&table->cls.rwlock);
> +        fat_rwlock_rdlock(&table->cls.rwlock);
>          overlaps = classifier_rule_overlaps(&table->cls, &cr);
> -        ovs_rwlock_unlock(&table->cls.rwlock);
> +        fat_rwlock_unlock(&table->cls.rwlock);
>
>          if (overlaps) {
>              cls_rule_destroy(&cr);
> @@ -4806,13 +4806,13 @@ ofproto_collect_ofmonitor_refresh_rules(const struct 
> ofmonitor *m,
>          struct cls_cursor cursor;
>          struct rule *rule;
>
> -        ovs_rwlock_rdlock(&table->cls.rwlock);
> +        fat_rwlock_rdlock(&table->cls.rwlock);
>          cls_cursor_init(&cursor, &table->cls, &target);
>          CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
>              ovs_assert(!rule->pending); /* XXX */
>              ofproto_collect_ofmonitor_refresh_rule(m, rule, seqno, rules);
>          }
> -        ovs_rwlock_unlock(&table->cls.rwlock);
> +        fat_rwlock_unlock(&table->cls.rwlock);
>      }
>
>      HMAP_FOR_EACH (op, hmap_node, &ofproto->deletions) {
> @@ -6641,9 +6641,9 @@ oftable_init(struct oftable *table)
>  static void
>  oftable_destroy(struct oftable *table)
>  {
> -    ovs_rwlock_rdlock(&table->cls.rwlock);
> +    fat_rwlock_rdlock(&table->cls.rwlock);
>      ovs_assert(classifier_is_empty(&table->cls));
> -    ovs_rwlock_unlock(&table->cls.rwlock);
> +    fat_rwlock_unlock(&table->cls.rwlock);
>      oftable_disable_eviction(table);
>      classifier_destroy(&table->cls);
>      free(table->name);
> @@ -6726,12 +6726,12 @@ oftable_enable_eviction(struct oftable *table,
>      hmap_init(&table->eviction_groups_by_id);
>      heap_init(&table->eviction_groups_by_size);
>
> -    ovs_rwlock_rdlock(&table->cls.rwlock);
> +    fat_rwlock_rdlock(&table->cls.rwlock);
>      cls_cursor_init(&cursor, &table->cls, NULL);
>      CLS_CURSOR_FOR_EACH (rule, cr, &cursor) {
>          eviction_group_add_rule(rule);
>      }
> -    ovs_rwlock_unlock(&table->cls.rwlock);
> +    fat_rwlock_unlock(&table->cls.rwlock);
>  }
>
>  /* Removes 'rule' from the oftable that contains it. */
> @@ -6741,9 +6741,9 @@ oftable_remove_rule__(struct ofproto *ofproto, struct 
> rule *rule)
>  {
>      struct classifier *cls = &ofproto->tables[rule->table_id].cls;
>
> -    ovs_rwlock_wrlock(&cls->rwlock);
> +    fat_rwlock_wrlock(&cls->rwlock);
>      classifier_remove(cls, CONST_CAST(struct cls_rule *, &rule->cr));
> -    ovs_rwlock_unlock(&cls->rwlock);
> +    fat_rwlock_unlock(&cls->rwlock);
>
>      cookies_remove(ofproto, rule);
>
> @@ -6790,9 +6790,9 @@ oftable_insert_rule(struct rule *rule)
>          struct meter *meter = ofproto->meters[meter_id];
>          list_insert(&meter->rules, &rule->meter_list_node);
>      }
> -    ovs_rwlock_wrlock(&table->cls.rwlock);
> +    fat_rwlock_wrlock(&table->cls.rwlock);
>      classifier_insert(&table->cls, CONST_CAST(struct cls_rule *, &rule->cr));
> -    ovs_rwlock_unlock(&table->cls.rwlock);
> +    fat_rwlock_unlock(&table->cls.rwlock);
>      eviction_group_add_rule(rule);
>  }
>
> @@ -6861,7 +6861,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, 
> unsigned long int *vlan_bitmap)
>      OFPROTO_FOR_EACH_TABLE (oftable, ofproto) {
>          const struct cls_subtable *table;
>
> -        ovs_rwlock_rdlock(&oftable->cls.rwlock);
> +        fat_rwlock_rdlock(&oftable->cls.rwlock);
>          HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) {
>              if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) {
>                  const struct cls_rule *rule;
> @@ -6873,7 +6873,7 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, 
> unsigned long int *vlan_bitmap)
>                  }
>              }
>          }
> -        ovs_rwlock_unlock(&oftable->cls.rwlock);
> +        fat_rwlock_unlock(&oftable->cls.rwlock);
>      }
>  }
>
> diff --git a/tests/test-classifier.c b/tests/test-classifier.c
> index 93a2dc1..4282fd4 100644
> --- a/tests/test-classifier.c
> +++ b/tests/test-classifier.c
> @@ -449,13 +449,13 @@ destroy_classifier(struct classifier *cls)
>      struct test_rule *rule, *next_rule;
>      struct cls_cursor cursor;
>
> -    ovs_rwlock_wrlock(&cls->rwlock);
> +    fat_rwlock_wrlock(&cls->rwlock);
>      cls_cursor_init(&cursor, cls, NULL);
>      CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cls_rule, &cursor) {
>          classifier_remove(cls, &rule->cls_rule);
>          free_rule(rule);
>      }
> -    ovs_rwlock_unlock(&cls->rwlock);
> +    fat_rwlock_unlock(&cls->rwlock);
>      classifier_destroy(cls);
>  }
>
> @@ -621,13 +621,13 @@ test_empty(int argc OVS_UNUSED, char *argv[] OVS_UNUSED)
>      struct tcls tcls;
>
>      classifier_init(&cls, flow_segment_u32s);
> -    ovs_rwlock_wrlock(&cls.rwlock);
> +    fat_rwlock_wrlock(&cls.rwlock);
>      classifier_set_prefix_fields(&cls, trie_fields, ARRAY_SIZE(trie_fields));
>      tcls_init(&tcls);
>      assert(classifier_is_empty(&cls));
>      assert(tcls_is_empty(&tcls));
>      compare_classifiers(&cls, &tcls);
> -    ovs_rwlock_unlock(&cls.rwlock);
> +    fat_rwlock_unlock(&cls.rwlock);
>      classifier_destroy(&cls);
>      tcls_destroy(&tcls);
>  }
> @@ -654,7 +654,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] 
> OVS_UNUSED)
>                           hash_bytes(&wc_fields, sizeof wc_fields, 0), 0);
>
>          classifier_init(&cls, flow_segment_u32s);
> -        ovs_rwlock_wrlock(&cls.rwlock);
> +        fat_rwlock_wrlock(&cls.rwlock);
>          classifier_set_prefix_fields(&cls, trie_fields,
>                                       ARRAY_SIZE(trie_fields));
>          tcls_init(&tcls);
> @@ -671,7 +671,7 @@ test_single_rule(int argc OVS_UNUSED, char *argv[] 
> OVS_UNUSED)
>          compare_classifiers(&cls, &tcls);
>
>          free_rule(rule);
> -        ovs_rwlock_unlock(&cls.rwlock);
> +        fat_rwlock_unlock(&cls.rwlock);
>          classifier_destroy(&cls);
>          tcls_destroy(&tcls);
>      }
> @@ -695,7 +695,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] 
> OVS_UNUSED)
>          rule2->aux += 5;
>
>          classifier_init(&cls, flow_segment_u32s);
> -        ovs_rwlock_wrlock(&cls.rwlock);
> +        fat_rwlock_wrlock(&cls.rwlock);
>          classifier_set_prefix_fields(&cls, trie_fields,
>                                       ARRAY_SIZE(trie_fields));
>          tcls_init(&tcls);
> @@ -713,7 +713,7 @@ test_rule_replacement(int argc OVS_UNUSED, char *argv[] 
> OVS_UNUSED)
>          check_tables(&cls, 1, 1, 0);
>          compare_classifiers(&cls, &tcls);
>          tcls_destroy(&tcls);
> -        ovs_rwlock_unlock(&cls.rwlock);
> +        fat_rwlock_unlock(&cls.rwlock);
>          destroy_classifier(&cls);
>      }
>  }
> @@ -809,7 +809,7 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char 
> *argv[] OVS_UNUSED)
>              }
>
>              classifier_init(&cls, flow_segment_u32s);
> -            ovs_rwlock_wrlock(&cls.rwlock);
> +            fat_rwlock_wrlock(&cls.rwlock);
>              classifier_set_prefix_fields(&cls, trie_fields,
>                                           ARRAY_SIZE(trie_fields));
>              tcls_init(&tcls);
> @@ -850,7 +850,7 @@ test_many_rules_in_one_list (int argc OVS_UNUSED, char 
> *argv[] OVS_UNUSED)
>                  compare_classifiers(&cls, &tcls);
>              }
>
> -            ovs_rwlock_unlock(&cls.rwlock);
> +            fat_rwlock_unlock(&cls.rwlock);
>              classifier_destroy(&cls);
>              tcls_destroy(&tcls);
>
> @@ -913,7 +913,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char 
> *argv[] OVS_UNUSED)
>          } while ((1 << count_ones(value_mask)) < N_RULES);
>
>          classifier_init(&cls, flow_segment_u32s);
> -        ovs_rwlock_wrlock(&cls.rwlock);
> +        fat_rwlock_wrlock(&cls.rwlock);
>          classifier_set_prefix_fields(&cls, trie_fields,
>                                       ARRAY_SIZE(trie_fields));
>          tcls_init(&tcls);
> @@ -942,7 +942,7 @@ test_many_rules_in_one_table(int argc OVS_UNUSED, char 
> *argv[] OVS_UNUSED)
>              compare_classifiers(&cls, &tcls);
>          }
>
> -        ovs_rwlock_unlock(&cls.rwlock);
> +        fat_rwlock_unlock(&cls.rwlock);
>          classifier_destroy(&cls);
>          tcls_destroy(&tcls);
>      }
> @@ -977,7 +977,7 @@ test_many_rules_in_n_tables(int n_tables)
>          shuffle(priorities, ARRAY_SIZE(priorities));
>
>          classifier_init(&cls, flow_segment_u32s);
> -        ovs_rwlock_wrlock(&cls.rwlock);
> +        fat_rwlock_wrlock(&cls.rwlock);
>          classifier_set_prefix_fields(&cls, trie_fields,
>                                       ARRAY_SIZE(trie_fields));
>          tcls_init(&tcls);
> @@ -1012,7 +1012,7 @@ test_many_rules_in_n_tables(int n_tables)
>              free_rule(target);
>          }
>
> -        ovs_rwlock_unlock(&cls.rwlock);
> +        fat_rwlock_unlock(&cls.rwlock);
>          destroy_classifier(&cls);
>          tcls_destroy(&tcls);
>      }
> diff --git a/utilities/ovs-ofctl.c b/utilities/ovs-ofctl.c
> index 9b02b25..e8453f3 100644
> --- a/utilities/ovs-ofctl.c
> +++ b/utilities/ovs-ofctl.c
> @@ -2252,13 +2252,13 @@ fte_free_all(struct classifier *cls)
>      struct cls_cursor cursor;
>      struct fte *fte, *next;
>
> -    ovs_rwlock_wrlock(&cls->rwlock);
> +    fat_rwlock_wrlock(&cls->rwlock);
>      cls_cursor_init(&cursor, cls, NULL);
>      CLS_CURSOR_FOR_EACH_SAFE (fte, next, rule, &cursor) {
>          classifier_remove(cls, &fte->rule);
>          fte_free(fte);
>      }
> -    ovs_rwlock_unlock(&cls->rwlock);
> +    fat_rwlock_unlock(&cls->rwlock);
>      classifier_destroy(cls);
>  }
>
> @@ -2277,9 +2277,9 @@ fte_insert(struct classifier *cls, const struct match 
> *match,
>      cls_rule_init(&fte->rule, match, priority);
>      fte->versions[index] = version;
>
> -    ovs_rwlock_wrlock(&cls->rwlock);
> +    fat_rwlock_wrlock(&cls->rwlock);
>      old = fte_from_cls_rule(classifier_replace(cls, &fte->rule));
> -    ovs_rwlock_unlock(&cls->rwlock);
> +    fat_rwlock_unlock(&cls->rwlock);
>      if (old) {
>          fte_version_free(old->versions[index]);
>          fte->versions[!index] = old->versions[!index];
> @@ -2490,7 +2490,7 @@ ofctl_replace_flows(int argc OVS_UNUSED, char *argv[])
>      list_init(&requests);
>
>      /* Delete flows that exist on the switch but not in the file. */
> -    ovs_rwlock_rdlock(&cls.rwlock);
> +    fat_rwlock_rdlock(&cls.rwlock);
>      cls_cursor_init(&cursor, &cls, NULL);
>      CLS_CURSOR_FOR_EACH (fte, rule, &cursor) {
>          struct fte_version *file_ver = fte->versions[FILE_IDX];
> @@ -2514,7 +2514,7 @@ ofctl_replace_flows(int argc OVS_UNUSED, char *argv[])
>              fte_make_flow_mod(fte, FILE_IDX, OFPFC_ADD, protocol, &requests);
>          }
>      }
> -    ovs_rwlock_unlock(&cls.rwlock);
> +    fat_rwlock_unlock(&cls.rwlock);
>      transact_multiple_noreply(vconn, &requests);
>      vconn_close(vconn);
>
> @@ -2556,7 +2556,7 @@ ofctl_diff_flows(int argc OVS_UNUSED, char *argv[])
>      ds_init(&a_s);
>      ds_init(&b_s);
>
> -    ovs_rwlock_rdlock(&cls.rwlock);
> +    fat_rwlock_rdlock(&cls.rwlock);
>      cls_cursor_init(&cursor, &cls, NULL);
>      CLS_CURSOR_FOR_EACH (fte, rule, &cursor) {
>          struct fte_version *a = fte->versions[0];
> @@ -2576,7 +2576,7 @@ ofctl_diff_flows(int argc OVS_UNUSED, char *argv[])
>              }
>          }
>      }
> -    ovs_rwlock_unlock(&cls.rwlock);
> +    fat_rwlock_unlock(&cls.rwlock);
>
>      ds_destroy(&a_s);
>      ds_destroy(&b_s);
> --
> 1.7.10.4
>
> _______________________________________________
> dev mailing list
> dev@openvswitch.org
> http://openvswitch.org/mailman/listinfo/dev
_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to