----- Original Message ----- > From: "Darrell Ball" <dlu...@gmail.com> > To: dlu...@gmail.com, d...@openvswitch.com > Sent: Monday, July 11, 2016 11:07:03 AM > Subject: [ovs-dev] [patch_v8] ovn: Add local router support (RFC). > > This patch adds local router support. The idea is to do openflow rule > calculations and download for logical routers only on HVs where this is > needed. The approach used is to do a flood fill, based local VIF presence > and factoring in logical data path associations recursively. > > The algorithm detects changes in the local and patched datapath sets > and triggers recalculation of the local_routers set. > > The code changes are limited to ovn-controller. > > Signed-off-by: Darrell Ball <dlu...@gmail.com> > --- > ovn/controller/binding.c | 34 ++++++--- > ovn/controller/binding.h | 3 +- > ovn/controller/lflow.c | 4 + > ovn/controller/ovn-controller.c | 157 > +++++++++++++++++++++++++++++++++++++--- > ovn/controller/ovn-controller.h | 7 ++ > ovn/controller/patch.c | 38 +++++++--- > ovn/controller/patch.h | 4 +- > tests/ovn.at | 153 > +++++++++++++++++++++++++++++++++++++++ > 8 files changed, 365 insertions(+), 35 deletions(-) > > diff --git a/ovn/controller/binding.c b/ovn/controller/binding.c > index 4704226..2feed61 100644 > --- a/ovn/controller/binding.c > +++ b/ovn/controller/binding.c > @@ -126,7 +126,8 @@ local_datapath_lookup_by_uuid(struct hmap *hmap_p, const > struct uuid *uuid) > } > > static void > -remove_local_datapath(struct hmap *local_datapaths, struct local_datapath > *ld) > +remove_local_datapath(struct hmap *local_datapaths, struct local_datapath > *ld, > + bool *datapath_assoc_chgd) > { > if (ld->logical_port) { > sset_find_and_delete(&all_lports, ld->logical_port); > @@ -136,17 +137,19 @@ remove_local_datapath(struct hmap *local_datapaths, > struct local_datapath *ld) > hmap_remove(local_datapaths, &ld->hmap_node); > hmap_remove(&local_datapaths_by_uuid, &ld->uuid_hmap_node); > free(ld); > + *datapath_assoc_chgd = true; > } > > static void > remove_local_datapath_by_binding(struct hmap *local_datapaths, > - const struct sbrec_port_binding > *binding_rec) > + const struct sbrec_port_binding > *binding_rec, > + bool *datapath_assoc_chgd) > { > const struct uuid *uuid = &binding_rec->header_.uuid; > struct local_datapath *ld = > local_datapath_lookup_by_uuid(local_datapaths, > uuid); > if (ld) { > - remove_local_datapath(local_datapaths, ld); > + remove_local_datapath(local_datapaths, ld, datapath_assoc_chgd); > } else { > struct local_datapath *ld; > HMAP_FOR_EACH (ld, hmap_node, local_datapaths) { > @@ -160,7 +163,7 @@ remove_local_datapath_by_binding(struct hmap > *local_datapaths, > static void > add_local_datapath(struct hmap *local_datapaths, > const struct sbrec_port_binding *binding_rec, > - const struct uuid *uuid) > + const struct uuid *uuid, bool *datapath_assoc_chgd) > { > if (get_local_datapath(local_datapaths, > binding_rec->datapath->tunnel_key)) { > @@ -174,6 +177,7 @@ add_local_datapath(struct hmap *local_datapaths, > binding_rec->datapath->tunnel_key); > hmap_insert(&local_datapaths_by_uuid, &ld->uuid_hmap_node, > uuid_hash(uuid)); > + *datapath_assoc_chgd = true; > } > > static void > @@ -191,7 +195,8 @@ static void > consider_local_datapath(struct controller_ctx *ctx, struct shash *lports, > const struct sbrec_chassis *chassis_rec, > const struct sbrec_port_binding *binding_rec, > - struct hmap *local_datapaths) > + struct hmap *local_datapaths, > + bool *datapath_assoc_chgd) > { > const struct ovsrec_interface *iface_rec > = shash_find_data(lports, binding_rec->logical_port); > @@ -203,7 +208,8 @@ consider_local_datapath(struct controller_ctx *ctx, > struct shash *lports, > sset_add(&all_lports, binding_rec->logical_port); > } > add_local_datapath(local_datapaths, binding_rec, > - &binding_rec->header_.uuid); > + &binding_rec->header_.uuid, > + datapath_assoc_chgd); > if (iface_rec && ctx->ovs_idl_txn) { > update_qos(iface_rec, binding_rec); > } > @@ -244,7 +250,8 @@ consider_local_datapath(struct controller_ctx *ctx, > struct shash *lports, > sbrec_port_binding_set_chassis(binding_rec, chassis_rec); > sset_add(&all_lports, binding_rec->logical_port); > add_local_datapath(local_datapaths, binding_rec, > - &binding_rec->header_.uuid); > + &binding_rec->header_.uuid, > + datapath_assoc_chgd); > } > } else if (chassis_rec && binding_rec->chassis == chassis_rec > && strcmp(binding_rec->type, "gateway")) { > @@ -269,7 +276,8 @@ static struct shash lports = SHASH_INITIALIZER(&lports); > > void > binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, > - const char *chassis_id, struct hmap *local_datapaths) > + const char *chassis_id, struct hmap *local_datapaths, > + bool *datapath_assoc_chgd) > { > const struct sbrec_chassis *chassis_rec; > const struct sbrec_port_binding *binding_rec; > @@ -297,7 +305,7 @@ binding_run(struct controller_ctx *ctx, const struct > ovsrec_bridge *br_int, > HMAP_INITIALIZER(&keep_local_datapath_by_uuid); > SBREC_PORT_BINDING_FOR_EACH(binding_rec, ctx->ovnsb_idl) { > consider_local_datapath(ctx, &lports, chassis_rec, binding_rec, > - local_datapaths); > + local_datapaths, datapath_assoc_chgd); > struct local_datapath *ld = xzalloc(sizeof *ld); > memcpy(&ld->uuid, &binding_rec->header_.uuid, sizeof ld->uuid); > hmap_insert(&keep_local_datapath_by_uuid, &ld->uuid_hmap_node, > @@ -307,7 +315,8 @@ binding_run(struct controller_ctx *ctx, const struct > ovsrec_bridge *br_int, > HMAP_FOR_EACH_SAFE (old_ld, next, hmap_node, local_datapaths) { > if (!local_datapath_lookup_by_uuid(&keep_local_datapath_by_uuid, > &old_ld->uuid)) { > - remove_local_datapath(local_datapaths, old_ld); > + remove_local_datapath(local_datapaths, old_ld, > + datapath_assoc_chgd); > } > } > hmap_destroy(&keep_local_datapath_by_uuid); > @@ -315,10 +324,11 @@ binding_run(struct controller_ctx *ctx, const struct > ovsrec_bridge *br_int, > } else { > SBREC_PORT_BINDING_FOR_EACH_TRACKED(binding_rec, ctx->ovnsb_idl) { > if (sbrec_port_binding_is_deleted(binding_rec)) { > - remove_local_datapath_by_binding(local_datapaths, > binding_rec); > + remove_local_datapath_by_binding(local_datapaths, > binding_rec, > + datapath_assoc_chgd); > } else { > consider_local_datapath(ctx, &lports, chassis_rec, > binding_rec, > - local_datapaths); > + local_datapaths, > datapath_assoc_chgd); > } > } > } > diff --git a/ovn/controller/binding.h b/ovn/controller/binding.h > index 8753d44..5558591 100644 > --- a/ovn/controller/binding.h > +++ b/ovn/controller/binding.h > @@ -29,7 +29,8 @@ struct sset; > void binding_register_ovs_idl(struct ovsdb_idl *); > void binding_reset_processing(void); > void binding_run(struct controller_ctx *, const struct ovsrec_bridge > *br_int, > - const char *chassis_id, struct hmap *local_datapaths); > + const char *chassis_id, struct hmap *local_datapaths, > + bool *datapath_assoc_chgd); > bool binding_cleanup(struct controller_ctx *, const char *chassis_id); > > #endif /* ovn/binding.h */ > diff --git a/ovn/controller/lflow.c b/ovn/controller/lflow.c > index 00d1d6e..ba0c89f 100644 > --- a/ovn/controller/lflow.c > +++ b/ovn/controller/lflow.c > @@ -437,6 +437,10 @@ consider_logical_flow(const struct lport_index *lports, > return; > } > } > + } else { > + if (!get_local_router(ldp->tunnel_key)) { > + return; > + } > } > > /* Determine translation of logical table IDs to physical table IDs. */ > diff --git a/ovn/controller/ovn-controller.c > b/ovn/controller/ovn-controller.c > index 28ee13e..e0dbb35 100644 > --- a/ovn/controller/ovn-controller.c > +++ b/ovn/controller/ovn-controller.c > @@ -69,6 +69,15 @@ OVS_NO_RETURN static void usage(void); > > static char *ovs_remote; > > +/* Contains "struct local_datapath" nodes whose hash values are the > + * tunnel_key of datapaths with at least one local port binding. */ > +static struct hmap local_datapaths = HMAP_INITIALIZER(&local_datapaths); > +static struct hmap patched_datapaths = HMAP_INITIALIZER(&patched_datapaths); > + > +static struct lport_index lports; > +static struct mcgroup_index mcgroups; > +static struct hmap local_routers; > + > struct local_datapath * > get_local_datapath(const struct hmap *local_datapaths, uint32_t tunnel_key) > { > @@ -128,6 +137,131 @@ get_bridge(struct ovsdb_idl *ovs_idl, const char > *br_name) > return NULL; > } > > +static void > +add_local_router(uint32_t tunnel_key) > +{ > + struct local_router *lr = get_local_router(tunnel_key); > + if (lr) { > + lr->stale = false; > + return; > + } > + lr = xzalloc(sizeof *lr); > + hmap_insert(&local_routers, &lr->hmap_node, tunnel_key); > +} > + > +static void > +build_local_routers(struct controller_ctx *ctx, > + struct lport_index *lports) > +{ > + /* Mark all local routers as stale for later cleanup check */ > + struct local_router *lr; > + HMAP_FOR_EACH (lr, hmap_node, &local_routers) { > + lr->stale = true; > + } > + > + /* Hopefully, no one is using more than 2 or 3 levels of > + * datapath recursion */ > +#define MAX_DATAPATH_DEPTH_SUPPORTED 10 > + bool flood_fill_end = false; > + for (int i = 0; i < MAX_DATAPATH_DEPTH_SUPPORTED; i++) { > + if (flood_fill_end) { > + break; > + } > + flood_fill_end = true; > + const struct sbrec_port_binding *binding_rec; > + SBREC_PORT_BINDING_FOR_EACH(binding_rec, ctx->ovnsb_idl) { > + if ((!strcmp(binding_rec->type, "patch") || > + !strcmp(binding_rec->type, "gateway")) && > + binding_rec->datapath && get_patched_datapath( > + &patched_datapaths, > + binding_rec->datapath->tunnel_key)) { > + > + const char *plp = smap_get(&binding_rec->options, "peer"); > + const struct lport_index *lports_ = lports; > + const struct sbrec_port_binding *peer_binding_rec = > + lport_lookup_by_name(lports_, plp); > + > + struct uuid key; > + if (peer_binding_rec && peer_binding_rec->datapath) { > + if (get_local_datapath(&local_datapaths, > + binding_rec->datapath->tunnel_key) && > + smap_get_uuid( > + &peer_binding_rec->datapath->external_ids, > + "logical-router", &key)) { > + add_local_router( > + peer_binding_rec->datapath->tunnel_key); > + flood_fill_end = false; > + } else if (get_local_datapath(&local_datapaths, > + peer_binding_rec->datapath->tunnel_key) > && > + smap_get_uuid( > + &binding_rec->datapath->external_ids, > + "logical-router", &key)) { > + add_local_router(binding_rec->datapath->tunnel_key); > + flood_fill_end = false; > + } else if (get_local_router( > + binding_rec->datapath->tunnel_key) && > + smap_get_uuid( > + > &peer_binding_rec->datapath->external_ids, > + "logical-router", &key) ) { > + add_local_router( > + peer_binding_rec->datapath->tunnel_key); > + flood_fill_end = false; > + } else if (get_local_router( > + peer_binding_rec->datapath->tunnel_key) > && > + smap_get_uuid( > + &binding_rec->datapath->external_ids, > + "logical-router", &key)) { > + add_local_router(binding_rec->datapath->tunnel_key); > + flood_fill_end = false; > + } else if (get_local_router( > + peer_binding_rec->datapath->tunnel_key) > && > + smap_get_uuid( > + &binding_rec->datapath->external_ids, > + "logical-switch", &key)) { > + /* Add the logical switch to the local routers map, > + * as it may be a Transit LS and we need to continue > + * to follow the patch ports. */ > + add_local_router(binding_rec->datapath->tunnel_key); > + flood_fill_end = false; > + } else if (get_local_router( > + binding_rec->datapath->tunnel_key) && > + smap_get_uuid( > + > &peer_binding_rec->datapath->external_ids, > + "logical-switch", &key)) { > + /* Add the logical switch to the local routers map, > + * as it may be a Transit LS and we need to continue > + * to follow the patch ports. */ > + add_local_router( > + peer_binding_rec->datapath->tunnel_key); > + flood_fill_end = false; > + } > + } > + } > + } > + } > + > + /* Clean up stale local_routers. */ > + struct local_router *lr_cur_node, *lr_next_node; > + HMAP_FOR_EACH_SAFE (lr_cur_node, lr_next_node, hmap_node, > &local_routers) { > + if (lr_cur_node->stale == true) { > + hmap_remove(&local_routers, &lr_cur_node->hmap_node); > + free(lr_cur_node); > + } > + } > +} > + > +const struct local_router * > +get_local_router(uint32_t tunnel_key) > +{ > + struct hmap_node *node = hmap_first_with_hash(&local_routers, > + tunnel_key); > + > + const struct local_router *local_router = node ? > + CONTAINER_OF(node, struct local_router, hmap_node) : NULL; > + return (local_router ? (!local_router->stale ? local_router : NULL) > + : NULL); > +} > + > static const struct ovsrec_bridge * > create_br_int(struct controller_ctx *ctx, > const struct ovsrec_open_vswitch *cfg, > @@ -299,14 +433,6 @@ update_ct_zones(struct sset *lports, struct hmap > *patched_datapaths, > sset_destroy(&all_users); > } > > -/* Contains "struct local_datapath" nodes whose hash values are the > - * tunnel_key of datapaths with at least one local port binding. */ > -static struct hmap local_datapaths = HMAP_INITIALIZER(&local_datapaths); > -static struct hmap patched_datapaths = HMAP_INITIALIZER(&patched_datapaths); > - > -static struct lport_index lports; > -static struct mcgroup_index mcgroups; > - > int > main(int argc, char *argv[]) > { > @@ -392,9 +518,15 @@ main(int argc, char *argv[]) > unixctl_command_register("ct-zone-list", "", 0, 0, > ct_zone_list, &ct_zones); > > + hmap_init(&local_routers); > + static bool datapath_assoc_chgd; > + > /* Main loop. */ > exiting = false; > while (!exiting) { > + > + datapath_assoc_chgd = false; > + > /* Check OVN SB database. */ > char *new_ovnsb_remote = get_ovnsb_remote(ovs_idl_loop.idl); > if (strcmp(ovnsb_remote, new_ovnsb_remote)) { > @@ -425,16 +557,21 @@ main(int argc, char *argv[]) > if (chassis_id) { > chassis_run(&ctx, chassis_id); > encaps_run(&ctx, br_int, chassis_id); > - binding_run(&ctx, br_int, chassis_id, &local_datapaths); > + binding_run(&ctx, br_int, chassis_id, &local_datapaths, > + &datapath_assoc_chgd); > } > > if (br_int && chassis_id) { > patch_run(&ctx, br_int, chassis_id, &local_datapaths, > - &patched_datapaths); > + &patched_datapaths, &datapath_assoc_chgd); > > lport_index_fill(&lports, ctx.ovnsb_idl); > mcgroup_index_fill(&mcgroups, ctx.ovnsb_idl); > > + if (datapath_assoc_chgd) { > + build_local_routers(&ctx, &lports); > + } > + > enum mf_field_id mff_ovn_geneve = ofctrl_run(br_int); > > pinctrl_run(&ctx, &lports, br_int, chassis_id, > &local_datapaths); > diff --git a/ovn/controller/ovn-controller.h > b/ovn/controller/ovn-controller.h > index 470b1f5..a71d2bf 100644 > --- a/ovn/controller/ovn-controller.h > +++ b/ovn/controller/ovn-controller.h > @@ -57,6 +57,11 @@ struct patched_datapath { > * port. */ > }; > > +struct local_router { > + struct hmap_node hmap_node; > + bool stale; > +}; > + > struct patched_datapath *get_patched_datapath(const struct hmap *, > uint32_t tunnel_key); > > @@ -66,6 +71,8 @@ const struct ovsrec_bridge *get_bridge(struct ovsdb_idl *, > const struct sbrec_chassis *get_chassis(struct ovsdb_idl *, > const char *chassis_id); > > +const struct local_router *get_local_router(uint32_t); > + > /* Must be a bit-field ordered from most-preferred (higher number) to > * least-preferred (lower number). */ > enum chassis_tunnel_type { > diff --git a/ovn/controller/patch.c b/ovn/controller/patch.c > index 52d9e8d..b0519fa 100644 > --- a/ovn/controller/patch.c > +++ b/ovn/controller/patch.c > @@ -58,7 +58,8 @@ create_patch_port(struct controller_ctx *ctx, > const char *key, const char *value, > const struct ovsrec_bridge *src, const char *src_name, > const struct ovsrec_bridge *dst, const char *dst_name, > - struct shash *existing_ports) > + struct shash *existing_ports, > + bool *datapath_assoc_chgd) > { > for (size_t i = 0; i < src->n_ports; i++) { > if (match_patch_port(src->ports[i], dst_name)) { > @@ -68,6 +69,10 @@ create_patch_port(struct controller_ctx *ctx, > } > } > > + if (datapath_assoc_chgd) { > + *datapath_assoc_chgd = true; > + } > + > ovsdb_idl_txn_add_comment(ctx->ovs_idl_txn, > "ovn-controller: creating patch port '%s' from '%s' to '%s'", > src_name, src->name, dst->name); > @@ -237,10 +242,12 @@ add_bridge_mappings(struct controller_ctx *ctx, > > char *name1 = patch_port_name(br_int->name, binding->logical_port); > char *name2 = patch_port_name(binding->logical_port, br_int->name); > + > create_patch_port(ctx, patch_port_id, binding->logical_port, > - br_int, name1, br_ln, name2, existing_ports); > + br_int, name1, br_ln, name2, existing_ports, > NULL); > create_patch_port(ctx, patch_port_id, binding->logical_port, > - br_ln, name2, br_int, name1, existing_ports); > + br_ln, name2, br_int, name1, existing_ports, > NULL); > + > free(name1); > free(name2); > } > @@ -250,7 +257,8 @@ add_bridge_mappings(struct controller_ctx *ctx, > > static void > add_patched_datapath(struct hmap *patched_datapaths, > - const struct sbrec_port_binding *binding_rec, bool > local) > + const struct sbrec_port_binding *binding_rec, bool > local, > + bool *datapath_assoc_chgd) > { > struct patched_datapath *pd = get_patched_datapath(patched_datapaths, > binding_rec->datapath->tunnel_key); > @@ -268,6 +276,7 @@ add_patched_datapath(struct hmap *patched_datapaths, > /* stale is set to false. */ > hmap_insert(patched_datapaths, &pd->hmap_node, > binding_rec->datapath->tunnel_key); > + *datapath_assoc_chgd = true; > } > > static void > @@ -284,7 +293,8 @@ add_logical_patch_ports_preprocess(struct hmap > *patched_datapaths) > /* This function should cleanup stale patched datapaths and any memory > * allocated for fields within a stale patched datapath. */ > static void > -add_logical_patch_ports_postprocess(struct hmap *patched_datapaths) > +add_logical_patch_ports_postprocess(struct hmap *patched_datapaths, > + bool *datapath_assoc_chgd) > { > /* Clean up stale patched datapaths. */ > struct patched_datapath *pd_cur_node, *pd_next_node; > @@ -294,6 +304,7 @@ add_logical_patch_ports_postprocess(struct hmap > *patched_datapaths) > hmap_remove(patched_datapaths, &pd_cur_node->hmap_node); > free(pd_cur_node->key); > free(pd_cur_node); > + datapath_assoc_chgd = true; > } > } > } > @@ -325,7 +336,8 @@ add_logical_patch_ports(struct controller_ctx *ctx, > const struct ovsrec_bridge *br_int, > const char *local_chassis_id, > struct shash *existing_ports, > - struct hmap *patched_datapaths) > + struct hmap *patched_datapaths, > + bool *datapath_assoc_chgd) > { > const struct sbrec_chassis *chassis_rec; > chassis_rec = get_chassis(ctx->ovnsb_idl, local_chassis_id); > @@ -357,10 +369,11 @@ add_logical_patch_ports(struct controller_ctx *ctx, > char *dst_name = patch_port_name(peer, local); > create_patch_port(ctx, "ovn-logical-patch-port", local, > br_int, src_name, br_int, dst_name, > - existing_ports); > + existing_ports, datapath_assoc_chgd); > free(dst_name); > free(src_name); > - add_patched_datapath(patched_datapaths, binding, local_port); > + add_patched_datapath(patched_datapaths, binding, local_port, > + datapath_assoc_chgd); > if (local_port) { > if (binding->chassis != chassis_rec && ctx->ovnsb_idl_txn) { > sbrec_port_binding_set_chassis(binding, chassis_rec); > @@ -368,13 +381,14 @@ add_logical_patch_ports(struct controller_ctx *ctx, > } > } > } > - add_logical_patch_ports_postprocess(patched_datapaths); > + add_logical_patch_ports_postprocess(patched_datapaths, > + datapath_assoc_chgd); > } > > void > patch_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, > const char *chassis_id, struct hmap *local_datapaths, > - struct hmap *patched_datapaths) > + struct hmap *patched_datapaths, bool *datapath_assoc_chgd) > { > if (!ctx->ovs_idl_txn) { > return; > @@ -396,7 +410,7 @@ patch_run(struct controller_ctx *ctx, const struct > ovsrec_bridge *br_int, > * should be there. */ > add_bridge_mappings(ctx, br_int, &existing_ports, local_datapaths, > chassis_id); > add_logical_patch_ports(ctx, br_int, chassis_id, &existing_ports, > - patched_datapaths); > + patched_datapaths, datapath_assoc_chgd); > > /* Now 'existing_ports' only still contains patch ports that exist in > the > * database but shouldn't. Delete them from the database. */ > @@ -405,6 +419,8 @@ patch_run(struct controller_ctx *ctx, const struct > ovsrec_bridge *br_int, > struct ovsrec_port *port = port_node->data; > shash_delete(&existing_ports, port_node); > remove_port(ctx, port); > + /* This may rarely lead to false positives. */ > + *datapath_assoc_chgd = true; > } > shash_destroy(&existing_ports); > } > diff --git a/ovn/controller/patch.h b/ovn/controller/patch.h > index 7920a48..76d141c 100644 > --- a/ovn/controller/patch.h > +++ b/ovn/controller/patch.h > @@ -22,12 +22,14 @@ > * This module adds and removes patch ports between the integration bridge > and > * physical bridges, as directed by other-config:ovn-bridge-mappings. */ > > +#include <stdbool.h> > + > struct controller_ctx; > struct hmap; > struct ovsrec_bridge; > > void patch_run(struct controller_ctx *, const struct ovsrec_bridge *br_int, > const char *chassis_id, struct hmap *local_datapaths, > - struct hmap *patched_datapaths); > + struct hmap *patched_datapaths, bool *datapath_assoc_chgd); > > #endif /* ovn/patch.h */ > diff --git a/tests/ovn.at b/tests/ovn.at > index 16262cd..8ea7e93 100644 > --- a/tests/ovn.at > +++ b/tests/ovn.at > @@ -3395,3 +3395,156 @@ AT_CHECK([ovs-appctl -t ovn-controller version], [0], > [ignore]) > OVN_CLEANUP([hv1]) > > AT_CLEANUP > + > +AT_SETUP([ovn -- 2 HVs, 3 LS, 1 lport/LS, 2 peer LRs, local router]) > +AT_KEYWORDS([ovnlocalrouter]) > +AT_SKIP_IF([test $HAVE_PYTHON = no]) > +ovn_start > + > +# This simple testcase is used to demonstrate that logical router > +# openflow rules are not calculated and downloaded on HV1; this is > +# because HV1 has no VIFs attached. Conversely, logical router flows > +# are downloaded to HV2 following a flood fill algorithm as HV2 has > +# at least one VIF attached and logical associations alice/R2, bob/R2 > +# and R2/R1. > + > +# Logical network: > +# Two LRs - R1 and R2 that are connected to each other as peers in > 20.0.0.0/24 > +# network. R1 has switch foo (192.168.1.0/24) connected to it. > +# R2 has alice (172.16.1.0/24) and bob (172.16.2.0/24) connected to it. > + > +ovn-nbctl ls-add foo > +ovn-nbctl ls-add alice > +ovn-nbctl ls-add bob > + > +ovn-nbctl lr-add R1 > +ovn-nbctl lr-add R2 > + > +# Connect foo to R1 > +ovn-nbctl lrp-add R1 foo 00:00:00:01:02:03 192.168.1.1/24 > +ovn-nbctl lsp-add foo rp-foo -- set Logical_Switch_Port rp-foo type=router \ > + options:router-port=foo addresses=\"00:00:00:01:02:03\" > + > +# Connect alice to R2 > +ovn-nbctl lrp-add R2 alice 00:00:00:01:02:04 172.16.1.1/24 > +ovn-nbctl lsp-add alice rp-alice -- set Logical_Switch_Port rp-alice > type=router \ > + options:router-port=alice addresses=\"00:00:00:01:02:04\" > + > +# Connect bob to R2 > +ovn-nbctl lrp-add R2 bob 00:00:00:01:02:05 172.16.2.1/24 > +ovn-nbctl lsp-add bob rp-bob -- set Logical_Switch_Port rp-bob type=router \ > + options:router-port=bob addresses=\"00:00:00:01:02:05\" > + > +# Create logical port alice1 in alice > +ovn-nbctl lsp-add alice alice1 \ > +-- lsp-set-addresses alice1 "f0:00:00:01:02:04 172.16.1.2" > + > +# Create logical port bob1 in bob > +ovn-nbctl lsp-add bob bob1 \ > +-- lsp-set-addresses bob1 "f0:00:00:01:02:05 172.16.2.2" > + > +# Create two hypervisor and create OVS ports corresponding to logical ports. > +net_add n1 > + > +sim_add hv1 > +as hv1 > +ovs-vsctl add-br br-phys > +ovn_attach n1 br-phys 192.168.0.1 > + > +sim_add hv2 > +as hv2 > +ovs-vsctl add-br br-phys > +ovn_attach n1 br-phys 192.168.0.2 > +ovs-vsctl -- add-port br-int hv2-vif1 -- \ > + set interface hv2-vif1 external-ids:iface-id=bob1 \ > + options:tx_pcap=hv2/vif1-tx.pcap \ > + options:rxq_pcap=hv2/vif1-rx.pcap \ > + ofport-request=1 > + > +ovs-vsctl -- add-port br-int hv2-vif2 -- \ > + set interface hv2-vif2 external-ids:iface-id=alice1 \ > + options:tx_pcap=hv2/vif2-tx.pcap \ > + options:rxq_pcap=hv2/vif2-rx.pcap \ > + ofport-request=2 > + > + > +# Pre-populate the hypervisors' ARP tables so that we don't lose any > +# packets for ARP resolution (native tunneling doesn't queue packets > +# for ARP resolution). > +ovn_populate_arp > + > +# Allow some time for ovn-northd and ovn-controller to catch up. > +# XXX This should be more systematic. > +sleep 1 > + > +ip_to_hex() { > + printf "%02x%02x%02x%02x" "$@" > +} > +trim_zeros() { > + sed 's/\(00\)\{1,\}$//' > +} > + > +# Send ip packets between bob1 and alice1 > +src_mac="f00000010205" > +dst_mac="000000010205" > +src_ip=`ip_to_hex 172 16 2 2` > +dst_ip=`ip_to_hex 172 16 1 2` > +packet=${dst_mac}${src_mac}08004500001c0000000040110000${src_ip}${dst_ip}0035111100080000 > +as hv2 ovs-appctl netdev-dummy/receive hv2-vif1 $packet > + > +echo "---------NB dump-----" > +ovn-nbctl show > +echo "---------------------" > +ovn-nbctl list logical_router > +echo "---------------------" > +ovn-nbctl list logical_router_port > +echo "---------------------" > + > +echo "---------SB dump-----" > +ovn-sbctl list datapath_binding > +echo "---------------------" > +ovn-sbctl list port_binding > +echo "---------------------" > +ovn-sbctl dump-flows > + > +echo "------ hv1 dump ----------" > +as hv1 ovs-vsctl show > +as hv1 ovs-ofctl show br-int > +as hv1 ovs-ofctl dump-flows br-int > +echo "------ hv2 dump ----------" > +as hv2 ovs-vsctl show > +as hv2 ovs-ofctl show br-int > +as hv2 ovs-ofctl dump-flows br-int > + > +# Packet to Expect at alice1 > +src_mac="000000010204" > +dst_mac="f00000010204" > +src_ip=`ip_to_hex 172 16 2 2` > +dst_ip=`ip_to_hex 172 16 1 2` > +expected=${dst_mac}${src_mac}08004500001c000000003f110100${src_ip}${dst_ip}0035111100080000 > + > +$PYTHON "$top_srcdir/utilities/ovs-pcap.in" hv2/vif2-tx.pcap | trim_zeros > > received.packets > +echo $expected | trim_zeros > expout > +AT_CHECK([cat received.packets], [0], [expout]) > +
Hi Darrell, The following lines can be replaced with: OVN_CLEANUP([hv1],[hv2]) > +for sim in hv1 hv2; do > + as $sim > + OVS_APP_EXIT_AND_WAIT([ovn-controller]) > + OVS_APP_EXIT_AND_WAIT([ovs-vswitchd]) > + OVS_APP_EXIT_AND_WAIT([ovsdb-server]) > +done > + > +as ovn-sb > +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) > + > +as ovn-nb > +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) > + > +as northd > +OVS_APP_EXIT_AND_WAIT([ovn-northd]) > + > +as main > +OVS_APP_EXIT_AND_WAIT([ovs-vswitchd]) > +OVS_APP_EXIT_AND_WAIT([ovsdb-server]) > + > +AT_CLEANUP > -- > 1.9.1 > > _______________________________________________ > dev mailing list > dev@openvswitch.org > http://openvswitch.org/mailman/listinfo/dev > _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev