From: Babu Shanmugam <bscha...@redhat.com> ovn-northd processes the list of Port_Bindings and hashes the list of queues per chassis. When it finds a port with qos_parameters and without a queue_id, it allocates a free queue for the chassis that this port belongs. The queue_id information is stored in the options field of Port_binding table. Adds an action set_queue to the ingress table 0 of the logical flows which will be translated to openflow set_queue by ovn-controller
ovn-controller opens the netdev corresponding to the tunnel interface's status:tunnel_egress_iface value and configures a HTB qdisc on it. Then for each SB port_binding that has queue_id set, it allocates a queue with the qos_parameters of that port. It also frees up unused queues. This patch replaces the older approach of policing Signed-off-by: Babu Shanmugam <bscha...@redhat.com> --- include/ovn/actions.h | 7 ++ ovn/controller/binding.c | 239 ++++++++++++++++++++++++++++++++++++++++++----- ovn/lib/actions.c | 29 ++++++ ovn/northd/ovn-northd.c | 131 ++++++++++++++++++++++++-- ovn/ovn-nb.xml | 8 +- ovn/ovn-sb.xml | 37 +++++++- tests/ovn.at | 5 + 7 files changed, 417 insertions(+), 39 deletions(-) diff --git a/include/ovn/actions.h b/include/ovn/actions.h index a395ce9..c064275 100644 --- a/include/ovn/actions.h +++ b/include/ovn/actions.h @@ -24,6 +24,13 @@ #include "openvswitch/dynamic-string.h" #include "util.h" +/* Valid arguments to set_queue() action. + * + * QDISC_MIN_QUEUE_ID is the default queue, so user-defined queues should + * start at QDISC_MIN_QUEUE_ID+1. */ +#define QDISC_MIN_QUEUE_ID 0 +#define QDISC_MAX_QUEUE_ID 0xf000 + struct expr; struct lexer; struct ofpbuf; diff --git a/ovn/controller/binding.c b/ovn/controller/binding.c index 3073727..02f55ae 100644 --- a/ovn/controller/binding.c +++ b/ovn/controller/binding.c @@ -22,6 +22,7 @@ #include "lib/poll-loop.h" #include "lib/sset.h" #include "lib/util.h" +#include "lib/netdev.h" #include "lib/vswitch-idl.h" #include "openvswitch/hmap.h" #include "openvswitch/vlog.h" @@ -30,6 +31,8 @@ VLOG_DEFINE_THIS_MODULE(binding); +#define OVN_QOS_TYPE "linux-htb" + /* A set of the iface-id values of local interfaces on this chassis. */ static struct sset local_ids = SSET_INITIALIZER(&local_ids); @@ -42,6 +45,13 @@ binding_reset_processing(void) process_full_binding = true; } +struct qos_queue { + struct hmap_node node; + uint32_t queue_id; + uint32_t max_rate; + uint32_t burst; +}; + void binding_register_ovs_idl(struct ovsdb_idl *ovs_idl) { @@ -55,19 +65,22 @@ binding_register_ovs_idl(struct ovsdb_idl *ovs_idl) ovsdb_idl_add_table(ovs_idl, &ovsrec_table_port); ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_name); ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_interfaces); + ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_qos); ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_name); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_external_ids); - ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_ingress_policing_rate); - ovsdb_idl_add_column(ovs_idl, - &ovsrec_interface_col_ingress_policing_burst); + ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_status); + + ovsdb_idl_add_table(ovs_idl, &ovsrec_table_qos); + ovsdb_idl_add_column(ovs_idl, &ovsrec_qos_col_type); } static bool get_local_iface_ids(const struct ovsrec_bridge *br_int, struct shash *lport_to_iface, - struct sset *all_lports) + struct sset *all_lports, + struct sset *egress_ifaces) { int i; bool changed = false; @@ -89,14 +102,24 @@ get_local_iface_ids(const struct ovsrec_bridge *br_int, iface_rec = port_rec->interfaces[j]; iface_id = smap_get(&iface_rec->external_ids, "iface-id"); - if (!iface_id) { - continue; + + if (iface_id) { + shash_add(lport_to_iface, iface_id, iface_rec); + if (!sset_find_and_delete(&old_local_ids, iface_id)) { + sset_add(&local_ids, iface_id); + sset_add(all_lports, iface_id); + changed = true; + } } - shash_add(lport_to_iface, iface_id, iface_rec); - if (!sset_find_and_delete(&old_local_ids, iface_id)) { - sset_add(&local_ids, iface_id); - sset_add(all_lports, iface_id); - changed = true; + + /* Check if this is a tunnel interface. */ + if (smap_get(&iface_rec->options, "remote_ip")) { + const char *tunnel_iface + = smap_get(&iface_rec->status, "tunnel_egress_iface"); + if (tunnel_iface) { + sset_add(egress_ifaces, + tunnel_iface); + } } } } @@ -162,20 +185,166 @@ add_local_datapath(struct hmap *local_datapaths, } static void -update_qos(const struct ovsrec_interface *iface_rec, - const struct sbrec_port_binding *pb) +get_qos_params(const struct sbrec_port_binding *pb, struct hmap *queue_map) +{ + uint32_t max_rate = smap_get_int(&pb->options, "qos_max_rate", 0); + uint32_t burst = smap_get_int(&pb->options, "qos_burst", 0); + uint32_t queue_id = smap_get_int(&pb->options, "qdisc_queue_id", 0); + + if ((!max_rate && !burst) || !queue_id) { + /* Qos is not configured for this port. */ + return; + } + + struct qos_queue *node = xzalloc(sizeof *node); + hmap_insert(queue_map, &node->node, hash_int(queue_id, 0)); + node->max_rate = max_rate; + node->burst = burst; + node->queue_id = queue_id; +} + +static const struct ovsrec_qos * +get_noop_qos(struct controller_ctx *ctx) +{ + const struct ovsrec_qos *qos; + OVSREC_QOS_FOR_EACH (qos, ctx->ovs_idl) { + if (!strcmp(qos->type, "linux-noop")) { + return qos; + } + } + + if (!ctx->ovs_idl_txn) { + return NULL; + } + qos = ovsrec_qos_insert(ctx->ovs_idl_txn); + ovsrec_qos_set_type(qos, "linux-noop"); + return qos; +} + +static bool +set_noop_qos(struct controller_ctx *ctx, struct sset *egress_ifaces) { - int rate = smap_get_int(&pb->options, "policing_rate", 0); - int burst = smap_get_int(&pb->options, "policing_burst", 0); + if (!ctx->ovs_idl_txn) { + return false; + } - ovsrec_interface_set_ingress_policing_rate(iface_rec, MAX(0, rate)); - ovsrec_interface_set_ingress_policing_burst(iface_rec, MAX(0, burst)); + const struct ovsrec_qos *noop_qos = get_noop_qos(ctx); + if (!noop_qos) { + return false; + } + + const struct ovsrec_port *port; + size_t count = 0; + + OVSREC_PORT_FOR_EACH (port, ctx->ovs_idl) { + if (sset_contains(egress_ifaces, port->name)) { + ovsrec_port_set_qos(port, noop_qos); + count++; + } + if (sset_count(egress_ifaces) == count) { + break; + } + } + return true; +} + +static void +setup_qos(const char *egress_iface, struct hmap *queue_map) +{ + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); + struct netdev *netdev_phy; + + if (!egress_iface) { + /* Queues cannot be configured. */ + return; + } + + int error = netdev_open(egress_iface, NULL, &netdev_phy); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not open netdev (%s)", + egress_iface, ovs_strerror(error)); + return; + } + + /* Check and configure qdisc. */ + const char *qdisc_type; + struct smap qdisc_details; + + smap_init(&qdisc_details); + if (netdev_get_qos(netdev_phy, &qdisc_type, &qdisc_details) != 0 || + qdisc_type[0] == '\0') { + /* Qos is not supported. */ + return; + } + if (strcmp(qdisc_type, OVN_QOS_TYPE)) { + error = netdev_set_qos(netdev_phy, OVN_QOS_TYPE, &qdisc_details); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not configure QoS (%s)", + egress_iface, ovs_strerror(error)); + } + } + + /* Check and delete if needed. */ + struct netdev_queue_dump dump; + unsigned int queue_id; + struct smap queue_details; + struct qos_queue *sb_info; + struct hmap consistent_queues; + + smap_init(&queue_details); + hmap_init(&consistent_queues); + NETDEV_QUEUE_FOR_EACH (&queue_id, &queue_details, &dump, netdev_phy) { + bool is_queue_needed = false; + + HMAP_FOR_EACH_WITH_HASH (sb_info, node, hash_int(queue_id, 0), + queue_map) { + is_queue_needed = true; + if (sb_info->max_rate == + smap_get_int(&queue_details, "max-rate", 0) + && sb_info->burst == smap_get_int(&queue_details, "burst", 0)) { + /* This queue is consistent. */ + hmap_insert(&consistent_queues, &sb_info->node, + hash_int(queue_id, 0)); + break; + } + } + + if (!is_queue_needed) { + error = netdev_delete_queue(netdev_phy, queue_id); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not delete queue %u (%s)", + egress_iface, queue_id, ovs_strerror(error)); + } + } + } + + /* Create/Update queues. */ + HMAP_FOR_EACH (sb_info, node, queue_map) { + if (hmap_contains(&consistent_queues, &sb_info->node)) { + hmap_remove(&consistent_queues, &sb_info->node); + continue; + } + + smap_clear(&queue_details); + smap_add_format(&queue_details, "max-rate", "%d", sb_info->max_rate); + smap_add_format(&queue_details, "burst", "%d", sb_info->burst); + error = netdev_set_queue(netdev_phy, sb_info->queue_id, + &queue_details); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not configure queue %u (%s)", + egress_iface, sb_info->queue_id, ovs_strerror(error)); + } + } + smap_destroy(&queue_details); + hmap_destroy(&consistent_queues); + netdev_close(netdev_phy); } static void consider_local_datapath(struct controller_ctx *ctx, const struct sbrec_chassis *chassis_rec, const struct sbrec_port_binding *binding_rec, + struct hmap *qos_map, struct hmap *local_datapaths, struct shash *lport_to_iface, struct sset *all_lports) @@ -187,8 +356,8 @@ consider_local_datapath(struct controller_ctx *ctx, || (binding_rec->parent_port && binding_rec->parent_port[0] && sset_contains(&local_ids, binding_rec->parent_port))) { add_local_datapath(local_datapaths, binding_rec); - if (iface_rec && ctx->ovs_idl_txn) { - update_qos(iface_rec, binding_rec); + if (iface_rec && qos_map && ctx->ovs_idl_txn) { + get_qos_params(binding_rec, qos_map); } if (binding_rec->chassis == chassis_rec) { return; @@ -256,15 +425,20 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, const struct sbrec_chassis *chassis_rec; const struct sbrec_port_binding *binding_rec; struct shash lport_to_iface = SHASH_INITIALIZER(&lport_to_iface); + struct sset egress_ifaces = SSET_INITIALIZER(&egress_ifaces); + struct hmap qos_map; + bool is_qos_map_used = false; chassis_rec = get_chassis(ctx->ovnsb_idl, chassis_id); if (!chassis_rec) { return; } + hmap_init(&qos_map); if (br_int) { - if (ctx->ovnsb_idl_txn && get_local_iface_ids(br_int, &lport_to_iface, - all_lports)) { + if (ctx->ovnsb_idl_txn + && get_local_iface_ids(br_int, &lport_to_iface, + all_lports, &egress_ifaces)) { process_full_binding = true; } } else { @@ -288,8 +462,9 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, SBREC_PORT_BINDING_FOR_EACH(binding_rec, ctx->ovnsb_idl) { sset_find_and_delete(&removed_lports, binding_rec->logical_port); consider_local_datapath(ctx, chassis_rec, binding_rec, - local_datapaths, &lport_to_iface, - all_lports); + sset_is_empty(&egress_ifaces) ? NULL : + &qos_map, local_datapaths, + &lport_to_iface, all_lports); struct local_datapath *ld = xzalloc(sizeof *ld); memcpy(&ld->uuid, &binding_rec->header_.uuid, sizeof ld->uuid); hmap_insert(&keep_local_datapath_by_uuid, &ld->uuid_hmap_node, @@ -311,6 +486,7 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, sset_find_and_delete(all_lports, cur_id); } + is_qos_map_used = true; process_full_binding = false; } else { SBREC_PORT_BINDING_FOR_EACH_TRACKED(binding_rec, ctx->ovnsb_idl) { @@ -325,13 +501,26 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, } } else { consider_local_datapath(ctx, chassis_rec, binding_rec, - local_datapaths, &lport_to_iface, - all_lports); + sset_is_empty(&egress_ifaces) ? NULL : + &qos_map, local_datapaths, + &lport_to_iface, all_lports); + is_qos_map_used = true; } } } + if (is_qos_map_used + && !sset_is_empty(&egress_ifaces) + && set_noop_qos(ctx, &egress_ifaces)) { + const char *entry; + SSET_FOR_EACH (entry, &egress_ifaces) { + setup_qos(entry, &qos_map); + } + } + shash_destroy(&lport_to_iface); + sset_destroy(&egress_ifaces); + hmap_destroy(&qos_map); } /* Returns true if the database is all cleaned up, false if more work is diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c index b9d1205..60c72e3 100644 --- a/ovn/lib/actions.c +++ b/ovn/lib/actions.c @@ -826,6 +826,33 @@ parse_put_nd_action(struct action_context *ctx) } static void +parse_set_queue_action(struct action_context *ctx) +{ + int queue_id; + + if (!lexer_match(ctx->lexer, LEX_T_LPAREN)) { + action_syntax_error(ctx, "expecting `('"); + return; + } + if (!action_get_int(ctx, &queue_id)) { + return; + } + if (!lexer_match(ctx->lexer, LEX_T_RPAREN)) { + action_syntax_error(ctx, "expecting `)'"); + return; + } + if (queue_id < QDISC_MIN_QUEUE_ID || queue_id > QDISC_MAX_QUEUE_ID) { + action_error(ctx, "Queue ID %d for set_queue is " + "not in valid range %d to %d.", + queue_id, QDISC_MIN_QUEUE_ID, QDISC_MAX_QUEUE_ID); + return; + } + + struct ofpact_queue *set_queue = ofpact_put_SET_QUEUE(ctx->ofpacts); + set_queue->queue_id = queue_id; +} + +static void emit_ct(struct action_context *ctx, bool recirc_next, bool commit, int *ct_mark, int *ct_mark_mask, ovs_be128 *ct_label, ovs_be128 *ct_label_mask) @@ -1125,6 +1152,8 @@ parse_action(struct action_context *ctx) parse_get_nd_action(ctx); } else if (lexer_match_id(ctx->lexer, "put_nd")) { parse_put_nd_action(ctx); + } else if (lexer_match_id(ctx->lexer, "set_queue")) { + parse_set_queue_action(ctx); } else { action_syntax_error(ctx, "expecting action"); } diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c index d6c14cf..b23cb4d 100644 --- a/ovn/northd/ovn-northd.c +++ b/ovn/northd/ovn-northd.c @@ -31,6 +31,7 @@ #include "ovn/lib/ovn-nb-idl.h" #include "ovn/lib/ovn-sb-idl.h" #include "ovn/lib/ovn-util.h" +#include "ovn/actions.h" #include "packets.h" #include "poll-loop.h" #include "smap.h" @@ -275,6 +276,79 @@ allocate_tnlid(struct hmap *set, const char *name, uint32_t max, return 0; } +struct ovn_chassis_qdisc_queues { + struct hmap_node key_node; + uint32_t queue_id; +}; + +static void +destroy_chassis_queues(struct hmap *set) +{ + struct ovn_chassis_qdisc_queues *node; + HMAP_FOR_EACH_POP (node, key_node, set) { + free(node); + } + hmap_destroy(set); +} + +static void +add_chassis_queue(struct hmap *set, const char *chassis_name, + uint32_t queue_id) +{ + struct ovn_chassis_qdisc_queues *node = xmalloc(sizeof *node); + node->queue_id = queue_id; + hmap_insert(set, &node->key_node, hash_string(chassis_name, 0)); +} + +static bool +chassis_queueid_in_use(const struct hmap *set, const char *chassis, + uint32_t queue_id) +{ + const struct ovn_chassis_qdisc_queues *node; + HMAP_FOR_EACH_WITH_HASH (node, key_node, hash_string(chassis, 0), set) { + if (node->queue_id == queue_id) { + return true; + } + } + return false; +} + +static uint32_t +allocate_chassis_queueid(struct hmap *set, const char *chassis) +{ + for (uint32_t queue_id = QDISC_MIN_QUEUE_ID + 1; + queue_id <= QDISC_MAX_QUEUE_ID; + queue_id++) { + if (!chassis_queueid_in_use(set, chassis, queue_id)) { + add_chassis_queue(set, chassis, queue_id); + return queue_id; + } + } + + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); + VLOG_WARN_RL(&rl, "all %s queue ids exhausted", chassis); + return 0; +} + +static void +free_chassis_queueid(struct hmap *set, const char * chassis, uint32_t queue_id) +{ + struct ovn_chassis_qdisc_queues *node; + HMAP_FOR_EACH_WITH_HASH (node, key_node, hash_string(chassis, 0), set) { + if (node->queue_id == queue_id) { + hmap_remove(set, &node->key_node); + break; + } + } +} + +static inline bool +port_has_qos_params(const struct smap *opts) +{ + return (smap_get(opts, "qos_max_rate") || + smap_get(opts, "qos_burst")); +} + /* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or * sb->external_ids:logical-switch. */ struct ovn_datapath { @@ -950,6 +1024,7 @@ build_ipam(struct northd_context *ctx, struct hmap *datapaths, static void join_logical_ports(struct northd_context *ctx, struct hmap *datapaths, struct hmap *ports, + struct hmap *chassis_qdisc_queues, struct ovs_list *sb_only, struct ovs_list *nb_only, struct ovs_list *both) { @@ -982,6 +1057,15 @@ join_logical_ports(struct northd_context *ctx, } op->nbsp = nbsp; ovs_list_remove(&op->list); + + uint32_t queue_id = smap_get_int(&op->sb->options, + "qdisc_queue_id", 0); + if (queue_id && op->sb->chassis) { + add_chassis_queue( + chassis_qdisc_queues, op->sb->chassis->name, + queue_id); + } + ovs_list_push_back(both, &op->list); /* This port exists due to a SB binding, but should @@ -1134,7 +1218,8 @@ join_logical_ports(struct northd_context *ctx, } static void -ovn_port_update_sbrec(const struct ovn_port *op) +ovn_port_update_sbrec(const struct ovn_port *op, + struct hmap *chassis_qdisc_queues) { sbrec_port_binding_set_datapath(op->sb, op->od->sb); if (op->nbrp) { @@ -1162,8 +1247,29 @@ ovn_port_update_sbrec(const struct ovn_port *op) sbrec_port_binding_set_mac(op->sb, NULL, 0); } else { if (strcmp(op->nbsp->type, "router")) { + uint32_t queue_id = smap_get_int( + &op->sb->options, "qdisc_queue_id", 0); + bool has_qos = port_has_qos_params(&op->nbsp->options); + struct smap options; + + if (op->sb->chassis && has_qos && !queue_id) { + queue_id = allocate_chassis_queueid(chassis_qdisc_queues, + op->sb->chassis->name); + } else if (!has_qos && queue_id) { + free_chassis_queueid(chassis_qdisc_queues, + op->sb->chassis->name, + queue_id); + queue_id = 0; + } + + smap_clone(&options, &op->nbsp->options); + if (queue_id) { + smap_add_format(&options, + "qdisc_queue_id", "%d", queue_id); + } + sbrec_port_binding_set_options(op->sb, &options); + smap_destroy(&options); sbrec_port_binding_set_type(op->sb, op->nbsp->type); - sbrec_port_binding_set_options(op->sb, &op->nbsp->options); } else { const char *chassis = NULL; if (op->peer && op->peer->od && op->peer->od->nbr) { @@ -1210,14 +1316,18 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths, struct hmap *ports) { struct ovs_list sb_only, nb_only, both; + struct hmap chassis_qdisc_queues; + + hmap_init(&chassis_qdisc_queues); - join_logical_ports(ctx, datapaths, ports, &sb_only, &nb_only, &both); + join_logical_ports(ctx, datapaths, ports, &chassis_qdisc_queues, + &sb_only, &nb_only, &both); /* For logical ports that are in both databases, update the southbound * record based on northbound data. Also index the in-use tunnel_keys. */ struct ovn_port *op, *next; LIST_FOR_EACH_SAFE (op, next, list, &both) { - ovn_port_update_sbrec(op); + ovn_port_update_sbrec(op, &chassis_qdisc_queues); add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key); if (op->sb->tunnel_key > op->od->port_key_hint) { @@ -1233,7 +1343,7 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths, } op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn); - ovn_port_update_sbrec(op); + ovn_port_update_sbrec(op, &chassis_qdisc_queues); sbrec_port_binding_set_logical_port(op->sb, op->key); sbrec_port_binding_set_tunnel_key(op->sb, tunnel_key); @@ -1245,6 +1355,8 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths, sbrec_port_binding_delete(op->sb); ovn_port_destroy(ports, op); } + + destroy_chassis_queues(&chassis_qdisc_queues); } #define OVN_MIN_MULTICAST 32768 @@ -2401,11 +2513,18 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports, } ds_clear(&match); + ds_clear(&actions); ds_put_format(&match, "inport == %s", op->json_key); build_port_security_l2("eth.src", op->ps_addrs, op->n_ps_addrs, &match); + + const char *queue_id = smap_get(&op->sb->options, "qdisc_queue_id"); + if (queue_id) { + ds_put_format(&actions, "set_queue(%s); ", queue_id); + } + ds_put_cstr(&actions, "next;"); ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_L2, 50, - ds_cstr(&match), "next;"); + ds_cstr(&match), ds_cstr(&actions)); if (op->nbsp->n_port_security) { build_port_security_ip(P_IN, op, lflows); diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml index 4ce295a..03842e8 100644 --- a/ovn/ovn-nb.xml +++ b/ovn/ovn-nb.xml @@ -282,14 +282,14 @@ (empty string) </p> - <column name="options" key="policing_rate"> + <column name="options" key="qos_max_rate"> If set, indicates the maximum rate for data sent from this interface, - in kbps. Data exceeding this rate is dropped. + in bit/s. The traffic will be shaped according to this limit. </column> - <column name="options" key="policing_burst"> + <column name="options" key="qos_burst"> If set, indicates the maximum burst size for data sent from this - interface, in kb. + interface, in bits. </column> </group> </group> diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml index 13c9526..c297c84 100644 --- a/ovn/ovn-sb.xml +++ b/ovn/ovn-sb.xml @@ -1190,6 +1190,29 @@ </dd> <dt> + <code>set_queue(<var>queue_number</var>);</code> + </dt> + + <dd> + <p> + <b>Parameters</b>: Queue number <var>queue_number</var>, in the range 0 to 61440. + </p> + + <p> + This is a logical equivalent of the OpenFlow <code>set_queue</code> + action. It affects packets that egress a hypervisor through a + physical interface. For nonzero <var>queue_number</var>, it + configures packet queuing to match the settings configured for the + <ref table="Port_Binding"/> with + <code>options:qdisc_queue_id</code> matching + <var>queue_number</var>. When <var>queue_number</var> is zero, it + resets queuing to the default strategy. + </p> + + <p><b>Example:</b> <code>set_queue(10);</code></p> + </dd> + + <dt> <code><var>R</var> = put_dhcp_opts(<code>offerip</code> = <var>IP</var>, <var>D1</var> = <var>V1</var>, <var>D2</var> = <var>V2</var>, ..., <var>Dn</var> = <var>Vn</var>);</code> </dt> @@ -1740,14 +1763,20 @@ tcp.flags = RST; (empty string) </p> - <column name="options" key="policing_rate"> + <column name="options" key="qos_max_rate"> If set, indicates the maximum rate for data sent from this interface, - in kbps. Data exceeding this rate is dropped. + in bit/s. The traffic will be shaped according to this limit. </column> - <column name="options" key="policing_burst"> + <column name="options" key="qos_burst"> If set, indicates the maximum burst size for data sent from this - interface, in kb. + interface, in bits. + </column> + + <column name="options" key="qdisc_queue_id" + type='{"type": "integer", "minInteger": 1, "maxInteger": 61440}'> + Indicates the queue number on the physical device. This is same as the + queue_id used in OpenFlow in struct ofp_action_enqueue. </column> </group> diff --git a/tests/ovn.at b/tests/ovn.at index 54fa8c5..a7c597f 100644 --- a/tests/ovn.at +++ b/tests/ovn.at @@ -703,6 +703,11 @@ get_nd(xxreg0, ip6.dst); => Cannot use numeric field xxreg0 where string field i # put_nd put_nd(inport, nd.target, nd.sll); => actions=push:NXM_NX_XXREG0[],push:NXM_OF_ETH_SRC[],push:NXM_NX_ND_SLL[],push:NXM_NX_ND_TARGET[],pop:NXM_NX_XXREG0[],pop:NXM_OF_ETH_SRC[],controller(userdata=00.00.00.04.00.00.00.00),pop:NXM_OF_ETH_SRC[],pop:NXM_NX_XXREG0[], prereqs=((icmp6.type == 0x87 && eth.type == 0x86dd && ip.proto == 0x3a && (eth.type == 0x800 || eth.type == 0x86dd)) || (icmp6.type == 0x88 && eth.type == 0x86dd && ip.proto == 0x3a && (eth.type == 0x800 || eth.type == 0x86dd))) && icmp6.code == 0 && eth.type == 0x86dd && ip.proto == 0x3a && (eth.type == 0x800 || eth.type == 0x86dd) && ip.ttl == 0xff && (eth.type == 0x800 || eth.type == 0x86dd) && icmp6.type == 0x87 && eth.type == 0x86dd && ip.proto == 0x3a && (eth.type == 0x800 || eth.type == 0x86dd) && icmp6.code == 0 && eth.type == 0x86dd && ip.proto == 0x3a && (eth.type == 0x800 || eth.type == 0x86dd) && ip.ttl == 0xff && (eth.type == 0x800 || eth.type == 0x86dd) +# set_queue +set_queue(0); => actions=set_queue:0, prereqs=1 +set_queue(61440); => actions=set_queue:61440, prereqs=1 +set_queue(65535); => Queue ID 65535 for set_queue is not in valid range 0 to 61440. + # Contradictionary prerequisites (allowed but not useful): ip4.src = ip6.src[0..31]; => actions=move:NXM_NX_IPV6_SRC[0..31]->NXM_OF_IP_SRC[], prereqs=eth.type == 0x800 && eth.type == 0x86dd ip4.src <-> ip6.src[0..31]; => actions=push:NXM_NX_IPV6_SRC[0..31],push:NXM_OF_IP_SRC[],pop:NXM_NX_IPV6_SRC[0..31],pop:NXM_OF_IP_SRC[], prereqs=eth.type == 0x800 && eth.type == 0x86dd -- 1.9.1 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev