From: Babu Shanmugam <bscha...@redhat.com> ovn-northd processes the list of Port_Bindings and hashes the list of queues per chassis. When it finds a port with qos_parameters and without a queue_id, it allocates a free queue for the chassis that this port belongs. The queue_id information is stored in the options field of Port_binding table. Adds an action set_queue to the ingress table 0 of the logical flows which will be translated to openflow set_queue by ovn-controller
ovn-controller opens the netdev corresponding to the tunnel interface's status:tunnel_egress_iface value and configures a HTB qdisc on it. Then for each SB port_binding that has queue_id set, it allocates a queue with the qos_parameters of that port. It also frees up unused queues. This patch replaces the older approach of policing Signed-off-by: Babu Shanmugam <bscha...@redhat.com> --- include/ovn/actions.h | 17 +++- ovn/controller/binding.c | 220 +++++++++++++++++++++++++++++++++++++++++++---- ovn/lib/actions.c | 42 +++++++++ ovn/northd/ovn-northd.c | 138 +++++++++++++++++++++++++++-- ovn/ovn-nb.xml | 8 +- ovn/ovn-sb.xml | 38 +++++++- tests/ovn.at | 8 ++ 7 files changed, 438 insertions(+), 33 deletions(-) diff --git a/include/ovn/actions.h b/include/ovn/actions.h index d1942b3..95967f9 100644 --- a/include/ovn/actions.h +++ b/include/ovn/actions.h @@ -26,6 +26,14 @@ #include "openvswitch/uuid.h" #include "util.h" +/* Valid arguments to SET_QUEUE action. + * + * QDISC_MIN_QUEUE_ID is the default queue, so user-defined queues should + * start at QDISC_MIN_QUEUE_ID+1. */ +#define QDISC_MIN_QUEUE_ID 0 +#define QDISC_MAX_QUEUE_ID 0xf000 + +struct expr; struct lexer; struct ofpbuf; struct shash; @@ -66,7 +74,8 @@ struct simap; OVNACT(GET_ND, ovnact_get_mac_bind) \ OVNACT(PUT_ND, ovnact_put_mac_bind) \ OVNACT(PUT_DHCPV4_OPTS, ovnact_put_dhcp_opts) \ - OVNACT(PUT_DHCPV6_OPTS, ovnact_put_dhcp_opts) + OVNACT(PUT_DHCPV6_OPTS, ovnact_put_dhcp_opts) \ + OVNACT(SET_QUEUE, ovnact_set_queue) /* enum ovnact_type, with a member OVNACT_<ENUM> for each action. */ enum OVS_PACKED_ENUM ovnact_type { @@ -219,6 +228,12 @@ struct ovnact_put_dhcp_opts { size_t n_options; }; +/* OVNACT_SET_QUEUE. */ +struct ovnact_set_queue { + struct ovnact ovnact; + uint16_t queue_id; +}; + /* Internal use by the helpers below. */ void ovnact_init(struct ovnact *, enum ovnact_type, size_t len); void *ovnact_put(struct ofpbuf *, enum ovnact_type, size_t len); diff --git a/ovn/controller/binding.c b/ovn/controller/binding.c index 0353a7b..73f6fe9 100644 --- a/ovn/controller/binding.c +++ b/ovn/controller/binding.c @@ -22,6 +22,7 @@ #include "lib/poll-loop.h" #include "lib/sset.h" #include "lib/util.h" +#include "lib/netdev.h" #include "lib/vswitch-idl.h" #include "openvswitch/hmap.h" #include "openvswitch/vlog.h" @@ -30,6 +31,15 @@ VLOG_DEFINE_THIS_MODULE(binding); +#define OVN_QOS_TYPE "linux-htb" + +struct qos_queue { + struct hmap_node node; + uint32_t queue_id; + uint32_t max_rate; + uint32_t burst; +}; + void binding_register_ovs_idl(struct ovsdb_idl *ovs_idl) { @@ -43,19 +53,22 @@ binding_register_ovs_idl(struct ovsdb_idl *ovs_idl) ovsdb_idl_add_table(ovs_idl, &ovsrec_table_port); ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_name); ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_interfaces); + ovsdb_idl_add_column(ovs_idl, &ovsrec_port_col_qos); ovsdb_idl_add_table(ovs_idl, &ovsrec_table_interface); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_name); ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_external_ids); - ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_ingress_policing_rate); - ovsdb_idl_add_column(ovs_idl, - &ovsrec_interface_col_ingress_policing_burst); + ovsdb_idl_add_column(ovs_idl, &ovsrec_interface_col_status); + + ovsdb_idl_add_table(ovs_idl, &ovsrec_table_qos); + ovsdb_idl_add_column(ovs_idl, &ovsrec_qos_col_type); } static void get_local_iface_ids(const struct ovsrec_bridge *br_int, struct shash *lport_to_iface, - struct sset *all_lports) + struct sset *all_lports, + struct sset *egress_ifaces) { int i; @@ -73,11 +86,20 @@ get_local_iface_ids(const struct ovsrec_bridge *br_int, iface_rec = port_rec->interfaces[j]; iface_id = smap_get(&iface_rec->external_ids, "iface-id"); - if (!iface_id) { - continue; + + if (iface_id) { + shash_add(lport_to_iface, iface_id, iface_rec); + sset_add(all_lports, iface_id); + } + + /* Check if this is a tunnel interface. */ + if (smap_get(&iface_rec->options, "remote_ip")) { + const char *tunnel_iface + = smap_get(&iface_rec->status, "tunnel_egress_iface"); + if (tunnel_iface) { + sset_add(egress_ifaces, tunnel_iface); + } } - shash_add(lport_to_iface, iface_id, iface_rec); - sset_add(all_lports, iface_id); } } } @@ -99,20 +121,166 @@ add_local_datapath(struct hmap *local_datapaths, } static void -update_qos(const struct ovsrec_interface *iface_rec, - const struct sbrec_port_binding *pb) +get_qos_params(const struct sbrec_port_binding *pb, struct hmap *queue_map) +{ + uint32_t max_rate = smap_get_int(&pb->options, "qos_max_rate", 0); + uint32_t burst = smap_get_int(&pb->options, "qos_burst", 0); + uint32_t queue_id = smap_get_int(&pb->options, "qdisc_queue_id", 0); + + if ((!max_rate && !burst) || !queue_id) { + /* Qos is not configured for this port. */ + return; + } + + struct qos_queue *node = xzalloc(sizeof *node); + hmap_insert(queue_map, &node->node, hash_int(queue_id, 0)); + node->max_rate = max_rate; + node->burst = burst; + node->queue_id = queue_id; +} + +static const struct ovsrec_qos * +get_noop_qos(struct controller_ctx *ctx) +{ + const struct ovsrec_qos *qos; + OVSREC_QOS_FOR_EACH (qos, ctx->ovs_idl) { + if (!strcmp(qos->type, "linux-noop")) { + return qos; + } + } + + if (!ctx->ovs_idl_txn) { + return NULL; + } + qos = ovsrec_qos_insert(ctx->ovs_idl_txn); + ovsrec_qos_set_type(qos, "linux-noop"); + return qos; +} + +static bool +set_noop_qos(struct controller_ctx *ctx, struct sset *egress_ifaces) +{ + if (!ctx->ovs_idl_txn) { + return false; + } + + const struct ovsrec_qos *noop_qos = get_noop_qos(ctx); + if (!noop_qos) { + return false; + } + + const struct ovsrec_port *port; + size_t count = 0; + + OVSREC_PORT_FOR_EACH (port, ctx->ovs_idl) { + if (sset_contains(egress_ifaces, port->name)) { + ovsrec_port_set_qos(port, noop_qos); + count++; + } + if (sset_count(egress_ifaces) == count) { + break; + } + } + return true; +} + +static void +setup_qos(const char *egress_iface, struct hmap *queue_map) { - int rate = smap_get_int(&pb->options, "policing_rate", 0); - int burst = smap_get_int(&pb->options, "policing_burst", 0); + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); + struct netdev *netdev_phy; + + if (!egress_iface) { + /* Queues cannot be configured. */ + return; + } + + int error = netdev_open(egress_iface, NULL, &netdev_phy); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not open netdev (%s)", + egress_iface, ovs_strerror(error)); + return; + } + + /* Check and configure qdisc. */ + const char *qdisc_type; + struct smap qdisc_details; + + smap_init(&qdisc_details); + if (netdev_get_qos(netdev_phy, &qdisc_type, &qdisc_details) != 0 || + qdisc_type[0] == '\0') { + /* Qos is not supported. */ + return; + } + if (strcmp(qdisc_type, OVN_QOS_TYPE)) { + error = netdev_set_qos(netdev_phy, OVN_QOS_TYPE, &qdisc_details); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not configure QoS (%s)", + egress_iface, ovs_strerror(error)); + } + } + + /* Check and delete if needed. */ + struct netdev_queue_dump dump; + unsigned int queue_id; + struct smap queue_details; + struct qos_queue *sb_info; + struct hmap consistent_queues; + + smap_init(&queue_details); + hmap_init(&consistent_queues); + NETDEV_QUEUE_FOR_EACH (&queue_id, &queue_details, &dump, netdev_phy) { + bool is_queue_needed = false; - ovsrec_interface_set_ingress_policing_rate(iface_rec, MAX(0, rate)); - ovsrec_interface_set_ingress_policing_burst(iface_rec, MAX(0, burst)); + HMAP_FOR_EACH_WITH_HASH (sb_info, node, hash_int(queue_id, 0), + queue_map) { + is_queue_needed = true; + if (sb_info->max_rate == + smap_get_int(&queue_details, "max-rate", 0) + && sb_info->burst == smap_get_int(&queue_details, "burst", 0)) { + /* This queue is consistent. */ + hmap_insert(&consistent_queues, &sb_info->node, + hash_int(queue_id, 0)); + break; + } + } + + if (!is_queue_needed) { + error = netdev_delete_queue(netdev_phy, queue_id); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not delete queue %u (%s)", + egress_iface, queue_id, ovs_strerror(error)); + } + } + } + + /* Create/Update queues. */ + HMAP_FOR_EACH (sb_info, node, queue_map) { + if (hmap_contains(&consistent_queues, &sb_info->node)) { + hmap_remove(&consistent_queues, &sb_info->node); + continue; + } + + smap_clear(&queue_details); + smap_add_format(&queue_details, "max-rate", "%d", sb_info->max_rate); + smap_add_format(&queue_details, "burst", "%d", sb_info->burst); + error = netdev_set_queue(netdev_phy, sb_info->queue_id, + &queue_details); + if (error) { + VLOG_WARN_RL(&rl, "%s: could not configure queue %u (%s)", + egress_iface, sb_info->queue_id, ovs_strerror(error)); + } + } + smap_destroy(&queue_details); + hmap_destroy(&consistent_queues); + netdev_close(netdev_phy); } static void consider_local_datapath(struct controller_ctx *ctx, const struct sbrec_chassis *chassis_rec, const struct sbrec_port_binding *binding_rec, + struct hmap *qos_map, struct hmap *local_datapaths, struct shash *lport_to_iface, struct sset *all_lports) @@ -128,8 +296,8 @@ consider_local_datapath(struct controller_ctx *ctx, sset_add(all_lports, binding_rec->logical_port); } add_local_datapath(local_datapaths, binding_rec); - if (iface_rec && ctx->ovs_idl_txn) { - update_qos(iface_rec, binding_rec); + if (iface_rec && qos_map && ctx->ovs_idl_txn) { + get_qos_params(binding_rec, qos_map); } if (binding_rec->chassis == chassis_rec) { return; @@ -198,14 +366,18 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, const struct sbrec_chassis *chassis_rec; const struct sbrec_port_binding *binding_rec; struct shash lport_to_iface = SHASH_INITIALIZER(&lport_to_iface); + struct sset egress_ifaces = SSET_INITIALIZER(&egress_ifaces); + struct hmap qos_map; chassis_rec = get_chassis(ctx->ovnsb_idl, chassis_id); if (!chassis_rec) { return; } + hmap_init(&qos_map); if (br_int) { - get_local_iface_ids(br_int, &lport_to_iface, all_lports); + get_local_iface_ids(br_int, &lport_to_iface, all_lports, + &egress_ifaces); } /* Run through each binding record to see if it is resident on this @@ -213,11 +385,23 @@ binding_run(struct controller_ctx *ctx, const struct ovsrec_bridge *br_int, * directly connected logical ports and children of those ports. */ SBREC_PORT_BINDING_FOR_EACH(binding_rec, ctx->ovnsb_idl) { consider_local_datapath(ctx, chassis_rec, binding_rec, - local_datapaths, &lport_to_iface, + sset_is_empty(&egress_ifaces) ? NULL : + &qos_map, local_datapaths, &lport_to_iface, all_lports); + + } + + if (!sset_is_empty(&egress_ifaces) + && set_noop_qos(ctx, &egress_ifaces)) { + const char *entry; + SSET_FOR_EACH (entry, &egress_ifaces) { + setup_qos(entry, &qos_map); + } } shash_destroy(&lport_to_iface); + sset_destroy(&egress_ifaces); + hmap_destroy(&qos_map); } /* Returns true if the database is all cleaned up, false if more work is diff --git a/ovn/lib/actions.c b/ovn/lib/actions.c index 28b66ed..5e263f8 100644 --- a/ovn/lib/actions.c +++ b/ovn/lib/actions.c @@ -1606,6 +1606,46 @@ free_PUT_DHCPV6_OPTS(struct ovnact_put_dhcp_opts *pdo) { free_put_dhcp_opts(pdo); } + +static void +parse_SET_QUEUE(struct action_context *ctx) +{ + int queue_id; + + if (!lexer_force_match(ctx->lexer, LEX_T_LPAREN) + || !lexer_get_int(ctx->lexer, &queue_id) + || !lexer_force_match(ctx->lexer, LEX_T_RPAREN)) { + return; + } + + if (queue_id < QDISC_MIN_QUEUE_ID || queue_id > QDISC_MAX_QUEUE_ID) { + lexer_error(ctx->lexer, "Queue ID %d for set_queue is " + "not in valid range %d to %d.", + queue_id, QDISC_MIN_QUEUE_ID, QDISC_MAX_QUEUE_ID); + return; + } + + ovnact_put_SET_QUEUE(ctx->ovnacts)->queue_id = queue_id; +} + +static void +format_SET_QUEUE(const struct ovnact_set_queue *set_queue, struct ds *s) +{ + ds_put_format(s, "set_queue(%d);", set_queue->queue_id); +} + +static void +encode_SET_QUEUE(const struct ovnact_set_queue *set_queue, + const struct ovnact_encode_params *ep OVS_UNUSED, + struct ofpbuf *ofpacts) +{ + ofpact_put_SET_QUEUE(ofpacts)->queue_id = set_queue->queue_id; +} + +static void +free_SET_QUEUE(struct ovnact_set_queue *a OVS_UNUSED) +{ +} /* Parses an assignment or exchange or put_dhcp_opts action. */ static void @@ -1677,6 +1717,8 @@ parse_action(struct action_context *ctx) parse_get_mac_bind(ctx, 128, ovnact_put_GET_ND(ctx->ovnacts)); } else if (lexer_match_id(ctx->lexer, "put_nd")) { parse_put_mac_bind(ctx, 128, ovnact_put_PUT_ND(ctx->ovnacts)); + } else if (lexer_match_id(ctx->lexer, "set_queue")) { + parse_SET_QUEUE(ctx); } else { lexer_syntax_error(ctx->lexer, "expecting action"); } diff --git a/ovn/northd/ovn-northd.c b/ovn/northd/ovn-northd.c index 0ad9190..9ce2af9 100644 --- a/ovn/northd/ovn-northd.c +++ b/ovn/northd/ovn-northd.c @@ -31,6 +31,7 @@ #include "ovn/lib/ovn-nb-idl.h" #include "ovn/lib/ovn-sb-idl.h" #include "ovn/lib/ovn-util.h" +#include "ovn/actions.h" #include "packets.h" #include "poll-loop.h" #include "smap.h" @@ -272,6 +273,86 @@ allocate_tnlid(struct hmap *set, const char *name, uint32_t max, return 0; } +struct ovn_chassis_qdisc_queues { + struct hmap_node key_node; + uint32_t queue_id; + struct uuid chassis_uuid; +}; + +static void +destroy_chassis_queues(struct hmap *set) +{ + struct ovn_chassis_qdisc_queues *node; + HMAP_FOR_EACH_POP (node, key_node, set) { + free(node); + } + hmap_destroy(set); +} + +static void +add_chassis_queue(struct hmap *set, struct uuid *chassis_uuid, + uint32_t queue_id) +{ + struct ovn_chassis_qdisc_queues *node = xmalloc(sizeof *node); + node->queue_id = queue_id; + memcpy(&node->chassis_uuid, chassis_uuid, sizeof node->chassis_uuid); + hmap_insert(set, &node->key_node, uuid_hash(chassis_uuid)); +} + +static bool +chassis_queueid_in_use(const struct hmap *set, struct uuid *chassis_uuid, + uint32_t queue_id) +{ + const struct ovn_chassis_qdisc_queues *node; + HMAP_FOR_EACH_WITH_HASH (node, key_node, uuid_hash(chassis_uuid), set) { + if (uuid_equals(chassis_uuid, &node->chassis_uuid) + && node->queue_id == queue_id) { + return true; + } + } + return false; +} + +static uint32_t +allocate_chassis_queueid(struct hmap *set, struct sbrec_chassis *chassis) +{ + for (uint32_t queue_id = QDISC_MIN_QUEUE_ID + 1; + queue_id <= QDISC_MAX_QUEUE_ID; + queue_id++) { + if (!chassis_queueid_in_use(set, &chassis->header_.uuid, queue_id)) { + add_chassis_queue(set, &chassis->header_.uuid, queue_id); + return queue_id; + } + } + + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); + VLOG_WARN_RL(&rl, "all %s queue ids exhausted", chassis->name); + return 0; +} + +static void +free_chassis_queueid(struct hmap *set, struct sbrec_chassis *chassis, + uint32_t queue_id) +{ + struct ovn_chassis_qdisc_queues *node; + HMAP_FOR_EACH_WITH_HASH (node, key_node, + uuid_hash(&chassis->header_.uuid), + set) { + if (uuid_equals(&chassis->header_.uuid, &node->chassis_uuid) + && node->queue_id == queue_id) { + hmap_remove(set, &node->key_node); + break; + } + } +} + +static inline bool +port_has_qos_params(const struct smap *opts) +{ + return (smap_get(opts, "qos_max_rate") || + smap_get(opts, "qos_burst")); +} + /* The 'key' comes from nbs->header_.uuid or nbr->header_.uuid or * sb->external_ids:logical-switch. */ struct ovn_datapath { @@ -959,6 +1040,7 @@ build_ipam(struct northd_context *ctx, struct hmap *datapaths, static void join_logical_ports(struct northd_context *ctx, struct hmap *datapaths, struct hmap *ports, + struct hmap *chassis_qdisc_queues, struct ovs_list *sb_only, struct ovs_list *nb_only, struct ovs_list *both) { @@ -991,6 +1073,15 @@ join_logical_ports(struct northd_context *ctx, } op->nbsp = nbsp; ovs_list_remove(&op->list); + + uint32_t queue_id = smap_get_int(&op->sb->options, + "qdisc_queue_id", 0); + if (queue_id && op->sb->chassis) { + add_chassis_queue( + chassis_qdisc_queues, &op->sb->chassis->header_.uuid, + queue_id); + } + ovs_list_push_back(both, &op->list); /* This port exists due to a SB binding, but should @@ -1143,7 +1234,8 @@ join_logical_ports(struct northd_context *ctx, } static void -ovn_port_update_sbrec(const struct ovn_port *op) +ovn_port_update_sbrec(const struct ovn_port *op, + struct hmap *chassis_qdisc_queues) { sbrec_port_binding_set_datapath(op->sb, op->od->sb); if (op->nbrp) { @@ -1171,8 +1263,29 @@ ovn_port_update_sbrec(const struct ovn_port *op) sbrec_port_binding_set_mac(op->sb, NULL, 0); } else { if (strcmp(op->nbsp->type, "router")) { + uint32_t queue_id = smap_get_int( + &op->sb->options, "qdisc_queue_id", 0); + bool has_qos = port_has_qos_params(&op->nbsp->options); + struct smap options; + + if (op->sb->chassis && has_qos && !queue_id) { + queue_id = allocate_chassis_queueid(chassis_qdisc_queues, + op->sb->chassis); + } else if (!has_qos && queue_id) { + free_chassis_queueid(chassis_qdisc_queues, + op->sb->chassis, + queue_id); + queue_id = 0; + } + + smap_clone(&options, &op->nbsp->options); + if (queue_id) { + smap_add_format(&options, + "qdisc_queue_id", "%d", queue_id); + } + sbrec_port_binding_set_options(op->sb, &options); + smap_destroy(&options); sbrec_port_binding_set_type(op->sb, op->nbsp->type); - sbrec_port_binding_set_options(op->sb, &op->nbsp->options); } else { const char *chassis = NULL; if (op->peer && op->peer->od && op->peer->od->nbr) { @@ -1243,14 +1356,18 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths, struct hmap *ports) { struct ovs_list sb_only, nb_only, both; + struct hmap chassis_qdisc_queues; + + hmap_init(&chassis_qdisc_queues); - join_logical_ports(ctx, datapaths, ports, &sb_only, &nb_only, &both); + join_logical_ports(ctx, datapaths, ports, &chassis_qdisc_queues, + &sb_only, &nb_only, &both); /* For logical ports that are in both databases, update the southbound * record based on northbound data. Also index the in-use tunnel_keys. */ struct ovn_port *op, *next; LIST_FOR_EACH_SAFE (op, next, list, &both) { - ovn_port_update_sbrec(op); + ovn_port_update_sbrec(op, &chassis_qdisc_queues); add_tnlid(&op->od->port_tnlids, op->sb->tunnel_key); if (op->sb->tunnel_key > op->od->port_key_hint) { @@ -1266,7 +1383,7 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths, } op->sb = sbrec_port_binding_insert(ctx->ovnsb_txn); - ovn_port_update_sbrec(op); + ovn_port_update_sbrec(op, &chassis_qdisc_queues); sbrec_port_binding_set_logical_port(op->sb, op->key); sbrec_port_binding_set_tunnel_key(op->sb, tunnel_key); @@ -1286,6 +1403,8 @@ build_ports(struct northd_context *ctx, struct hmap *datapaths, if (remove_mac_bindings) { cleanup_mac_bindings(ctx, ports); } + + destroy_chassis_queues(&chassis_qdisc_queues); } #define OVN_MIN_MULTICAST 32768 @@ -2529,11 +2648,18 @@ build_lswitch_flows(struct hmap *datapaths, struct hmap *ports, } ds_clear(&match); + ds_clear(&actions); ds_put_format(&match, "inport == %s", op->json_key); build_port_security_l2("eth.src", op->ps_addrs, op->n_ps_addrs, &match); + + const char *queue_id = smap_get(&op->sb->options, "qdisc_queue_id"); + if (queue_id) { + ds_put_format(&actions, "set_queue(%s); ", queue_id); + } + ds_put_cstr(&actions, "next;"); ovn_lflow_add(lflows, op->od, S_SWITCH_IN_PORT_SEC_L2, 50, - ds_cstr(&match), "next;"); + ds_cstr(&match), ds_cstr(&actions)); if (op->nbsp->n_port_security) { build_port_security_ip(P_IN, op, lflows); diff --git a/ovn/ovn-nb.xml b/ovn/ovn-nb.xml index 5719e74..42dfa4f 100644 --- a/ovn/ovn-nb.xml +++ b/ovn/ovn-nb.xml @@ -292,14 +292,14 @@ (empty string) </p> - <column name="options" key="policing_rate"> + <column name="options" key="qos_max_rate"> If set, indicates the maximum rate for data sent from this interface, - in kbps. Data exceeding this rate is dropped. + in bit/s. The traffic will be shaped according to this limit. </column> - <column name="options" key="policing_burst"> + <column name="options" key="qos_burst"> If set, indicates the maximum burst size for data sent from this - interface, in kb. + interface, in bits. </column> </group> </group> diff --git a/ovn/ovn-sb.xml b/ovn/ovn-sb.xml index e119249..6c7e60b 100644 --- a/ovn/ovn-sb.xml +++ b/ovn/ovn-sb.xml @@ -1333,6 +1333,29 @@ </p> </dd> + <dt> + <code>set_queue(<var>queue_number</var>);</code> + </dt> + + <dd> + <p> + <b>Parameters</b>: Queue number <var>queue_number</var>, in the range 0 to 61440. + </p> + + <p> + This is a logical equivalent of the OpenFlow <code>set_queue</code> + action. It affects packets that egress a hypervisor through a + physical interface. For nonzero <var>queue_number</var>, it + configures packet queuing to match the settings configured for the + <ref table="Port_Binding"/> with + <code>options:qdisc_queue_id</code> matching + <var>queue_number</var>. When <var>queue_number</var> is zero, it + resets queuing to the default strategy. + </p> + + <p><b>Example:</b> <code>set_queue(10);</code></p> + </dd> + <dt><code>ct_lb;</code></dt> <dt><code>ct_lb(</code><var>ip</var>[<code>:</code><var>port</var>]...<code>);</code></dt> <dd> @@ -1856,14 +1879,21 @@ tcp.flags = RST; (empty string) </p> - <column name="options" key="policing_rate"> + <column name="options" key="qos_max_rate"> If set, indicates the maximum rate for data sent from this interface, - in kbps. Data exceeding this rate is dropped. + in bit/s. The traffic will be shaped according to this limit. </column> - <column name="options" key="policing_burst"> + <column name="options" key="qos_burst"> If set, indicates the maximum burst size for data sent from this - interface, in kb. + interface, in bits. + </column> + + <column name="options" key="qdisc_queue_id" + type='{"type": "integer", "minInteger": 1, "maxInteger": 61440}'> + Indicates the queue number on the physical device. This is same as the + <code>queue_id</code> used in OpenFlow in <code>struct + ofp_action_enqueue</code>. </column> </group> diff --git a/tests/ovn.at b/tests/ovn.at index a23b422..2fd432b 100644 --- a/tests/ovn.at +++ b/tests/ovn.at @@ -934,6 +934,14 @@ reg1[0] = put_dhcpv6_opts(ia_addr="ae70::4"); reg1[0] = put_dhcpv6_opts(ia_addr=ae70::4, domain_search=ae70::1); DHCPv6 option domain_search requires string value. +# set_queue +set_queue(0); + encodes as set_queue:0 +set_queue(61440); + encodes as set_queue:61440 +set_queue(65535); + Queue ID 65535 for set_queue is not in valid range 0 to 61440. + # Contradictionary prerequisites (allowed but not useful): ip4.src = ip6.src[0..31]; encodes as move:NXM_NX_IPV6_SRC[0..31]->NXM_OF_IP_SRC[] -- 1.9.1 _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev