To reduce flow migration overhead, replace the array-based representation of which set of ports are bound to a particular queue by a bitmask-based one.
The maximum number of DSW event ports remains 64, but after this change can no longer easily be increased by modifying DSW_MAX_PORTS and recompiling. RFC v2: * supply mandantory message to static_assert(). Signed-off-by: Mattias Rönnblom <mattias.ronnb...@ericsson.com> --- drivers/event/dsw/dsw_evdev.c | 39 +++++++++++++++++++++-------------- drivers/event/dsw/dsw_evdev.h | 5 ++++- drivers/event/dsw/dsw_event.c | 10 +++------ 3 files changed, 31 insertions(+), 23 deletions(-) diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c index 1209e73a9d..629c929cb2 100644 --- a/drivers/event/dsw/dsw_evdev.c +++ b/drivers/event/dsw/dsw_evdev.c @@ -144,24 +144,23 @@ dsw_queue_release(struct rte_eventdev *dev __rte_unused, static void queue_add_port(struct dsw_queue *queue, uint16_t port_id) { - queue->serving_ports[queue->num_serving_ports] = port_id; + uint64_t port_mask = UINT64_C(1) << port_id; + + queue->serving_ports |= port_mask; queue->num_serving_ports++; } static bool queue_remove_port(struct dsw_queue *queue, uint16_t port_id) { - uint16_t i; + uint64_t port_mask = UINT64_C(1) << port_id; + + if (queue->serving_ports & port_mask) { + queue->num_serving_ports--; + queue->serving_ports ^= port_mask; + return true; + } - for (i = 0; i < queue->num_serving_ports; i++) - if (queue->serving_ports[i] == port_id) { - uint16_t last_idx = queue->num_serving_ports - 1; - if (i != last_idx) - queue->serving_ports[i] = - queue->serving_ports[last_idx]; - queue->num_serving_ports--; - return true; - } return false; } @@ -256,10 +255,20 @@ initial_flow_to_port_assignment(struct dsw_evdev *dsw) struct dsw_queue *queue = &dsw->queues[queue_id]; uint16_t flow_hash; for (flow_hash = 0; flow_hash < DSW_MAX_FLOWS; flow_hash++) { - uint8_t port_idx = - rte_rand() % queue->num_serving_ports; - uint8_t port_id = - queue->serving_ports[port_idx]; + uint8_t skip = + rte_rand_max(queue->num_serving_ports); + uint8_t port_id; + + for (port_id = 0;; port_id++) { + uint64_t port_mask = UINT64_C(1) << port_id; + + if (queue->serving_ports & port_mask) { + if (skip == 0) + break; + skip--; + } + } + dsw->queues[queue_id].flow_to_port_map[flow_hash] = port_id; } diff --git a/drivers/event/dsw/dsw_evdev.h b/drivers/event/dsw/dsw_evdev.h index 6416a8a898..8166340e1e 100644 --- a/drivers/event/dsw/dsw_evdev.h +++ b/drivers/event/dsw/dsw_evdev.h @@ -234,12 +234,15 @@ struct dsw_port { struct dsw_queue { uint8_t schedule_type; - uint8_t serving_ports[DSW_MAX_PORTS]; + uint64_t serving_ports; uint16_t num_serving_ports; uint8_t flow_to_port_map[DSW_MAX_FLOWS] __rte_cache_aligned; }; +/* Limited by the size of the 'serving_ports' bitmask */ +static_assert(DSW_MAX_PORTS <= 64); + struct dsw_evdev { struct rte_eventdev_data *data; diff --git a/drivers/event/dsw/dsw_event.c b/drivers/event/dsw/dsw_event.c index 93bbeead2e..23488d9030 100644 --- a/drivers/event/dsw/dsw_event.c +++ b/drivers/event/dsw/dsw_event.c @@ -447,13 +447,9 @@ static bool dsw_is_serving_port(struct dsw_evdev *dsw, uint8_t port_id, uint8_t queue_id) { struct dsw_queue *queue = &dsw->queues[queue_id]; - uint16_t i; - - for (i = 0; i < queue->num_serving_ports; i++) - if (queue->serving_ports[i] == port_id) - return true; + uint64_t port_mask = UINT64_C(1) << port_id; - return false; + return queue->serving_ports & port_mask; } static bool @@ -575,7 +571,7 @@ dsw_schedule(struct dsw_evdev *dsw, uint8_t queue_id, uint16_t flow_hash) /* A single-link queue, or atomic/ordered/parallel but * with just a single serving port. */ - port_id = queue->serving_ports[0]; + port_id = rte_bsf64(queue->serving_ports); DSW_LOG_DP(DEBUG, "Event with queue_id %d flow_hash %d is scheduled " "to port %d.\n", queue_id, flow_hash, port_id); -- 2.34.1