> -----Original Message----- > From: McDaniel, Timothy <timothy.mcdan...@intel.com> > Sent: Friday, September 11, 2020 2:18 PM > Cc: dev@dpdk.org; Carrillo, Erik G <erik.g.carri...@intel.com>; Eads, Gage > <gage.e...@intel.com>; Van Haaren, Harry <harry.van.haa...@intel.com>; > jer...@marvell.com > Subject: [PATCH v4 08/22] event/dlb: add infos get and configure > > Add support for configuring the DLB hardware. Please expand the commit message. > > Signed-off-by: Timothy McDaniel <timothy.mcdan...@intel.com> > --- > drivers/event/dlb/dlb.c | 402 +++ > drivers/event/dlb/dlb_iface.c | 11 + > drivers/event/dlb/dlb_iface.h | 11 + > drivers/event/dlb/pf/base/dlb_resource.c | 4098 > +++++++++++++++++++++++++++++- > drivers/event/dlb/pf/dlb_pf.c | 88 + > 5 files changed, 4517 insertions(+), 93 deletions(-) > > diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c > index f89edf2..2dba396 100644 > --- a/drivers/event/dlb/dlb.c > +++ b/drivers/event/dlb/dlb.c > @@ -140,6 +140,23 @@ dlb_hw_query_resources(struct dlb_eventdev *dlb) > return 0; > } > > +void > +dlb_free_qe_mem(struct dlb_port *qm_port) > +{ > + if (qm_port == NULL) > + return; > + > + if (qm_port->qe4) { > + rte_free(qm_port->qe4); > + qm_port->qe4 = NULL; > + } > + > + if (qm_port->consume_qe) { > + rte_free(qm_port->consume_qe); > + qm_port->consume_qe = NULL; > + } > +} Checking for NULL is not strictly required, rte_free() will simply return in that case. > + > /* Wrapper for string to int conversion. Substituted for atoi(...), which is > * unsafe. > */ > @@ -231,6 +248,389 @@ set_num_dir_credits(const char *key __rte_unused, > DLB_MAX_NUM_DIR_CREDITS); > return -EINVAL; > } > + return 0; > +} > + > +/* VDEV-only notes: > + * This function first unmaps all memory mappings and closes the > + * domain's file descriptor, which causes the driver to reset the > + * scheduling domain. Once that completes (when close() returns), we > + * can safely free the dynamically allocated memory used by the > + * scheduling domain. > + * > + * PF-only notes: > + * We will maintain a use count and use that to determine when > + * a reset is required. In PF mode, we never mmap, or munmap > + * device memory, and we own the entire physical PCI device. > + */ > + > +static void > +dlb_hw_reset_sched_domain(const struct rte_eventdev *dev, bool reconfig) > +{ > + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); > + enum dlb_configuration_state config_state; > + int i, j; > + > + /* Close and reset the domain */ > + dlb_iface_domain_close(dlb); > + > + /* Free all dynamically allocated port memory */ > + for (i = 0; i < dlb->num_ports; i++) > + dlb_free_qe_mem(&dlb->ev_ports[i].qm_port); > + > + /* If reconfiguring, mark the device's queues and ports as "previously > + * configured." If the user does not reconfigure them, the PMD will > + * reapply their previous configuration when the device is started. > + */ > + config_state = (reconfig) ? DLB_PREV_CONFIGURED : > DLB_NOT_CONFIGURED; > + > + for (i = 0; i < dlb->num_ports; i++) { > + dlb->ev_ports[i].qm_port.config_state = config_state; > + /* Reset setup_done so ports can be reconfigured */ > + dlb->ev_ports[i].setup_done = false; > + for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) > + dlb->ev_ports[i].link[j].mapped = false; > + } > + > + for (i = 0; i < dlb->num_queues; i++) > + dlb->ev_queues[i].qm_queue.config_state = config_state; > + > + for (i = 0; i < DLB_MAX_NUM_QUEUES; i++) > + dlb->ev_queues[i].setup_done = false; > + > + dlb->num_ports = 0; > + dlb->num_ldb_ports = 0; > + dlb->num_dir_ports = 0; > + dlb->num_queues = 0; > + dlb->num_ldb_queues = 0; > + dlb->num_dir_queues = 0; > + dlb->configured = false; > +} > + > +static int > +dlb_ldb_credit_pool_create(struct dlb_hw_dev *handle) > +{ > + struct dlb_create_ldb_pool_args cfg; > + struct dlb_cmd_response response; > + int ret; > + > + if (handle == NULL) > + return -EINVAL; > + > + if (!handle->cfg.resources.num_ldb_credits) { > + handle->cfg.ldb_credit_pool_id = 0; > + handle->cfg.num_ldb_credits = 0; > + return 0; > + } > + > + cfg.response = (uintptr_t)&response; > + cfg.num_ldb_credits = handle->cfg.resources.num_ldb_credits; > + > + ret = dlb_iface_ldb_credit_pool_create(handle, > + &cfg); > + if (ret < 0) { > + DLB_LOG_ERR("dlb: ldb_credit_pool_create ret=%d (driver > status: %s)\n", > + ret, dlb_error_strings[response.status]); > + } > + > + handle->cfg.ldb_credit_pool_id = response.id; > + handle->cfg.num_ldb_credits = cfg.num_ldb_credits; > + > + return ret; > +} > + > +static int > +dlb_dir_credit_pool_create(struct dlb_hw_dev *handle) > +{ > + struct dlb_create_dir_pool_args cfg; > + struct dlb_cmd_response response; > + int ret; > + > + if (handle == NULL) > + return -EINVAL; > + > + if (!handle->cfg.resources.num_dir_credits) { > + handle->cfg.dir_credit_pool_id = 0; > + handle->cfg.num_dir_credits = 0; > + return 0; > + } > + > + cfg.response = (uintptr_t)&response; > + cfg.num_dir_credits = handle->cfg.resources.num_dir_credits; > + > + ret = dlb_iface_dir_credit_pool_create(handle, &cfg); > + if (ret < 0) > + DLB_LOG_ERR("dlb: dir_credit_pool_create ret=%d (driver > status: %s)\n", > + ret, dlb_error_strings[response.status]); > + > + handle->cfg.dir_credit_pool_id = response.id; > + handle->cfg.num_dir_credits = cfg.num_dir_credits; > + > + return ret; > +} > + > +static int > +dlb_hw_create_sched_domain(struct dlb_hw_dev *handle, > + struct dlb_eventdev *dlb, > + const struct dlb_hw_rsrcs *resources_asked) > +{ > + int ret = 0; > + struct dlb_create_sched_domain_args *config_params; > + struct dlb_cmd_response response; > + > + if (resources_asked == NULL) { > + DLB_LOG_ERR("dlb: dlb_create NULL parameter\n"); > + ret = EINVAL; > + goto error_exit; > + } > + > + /* Map generic qm resources to dlb resources */ > + config_params = &handle->cfg.resources; > + > + config_params->response = (uintptr_t)&response; > + > + /* DIR ports and queues */ > + > + config_params->num_dir_ports = > + resources_asked->num_dir_ports; > + > + config_params->num_dir_credits = > + resources_asked->num_dir_credits; > + > + /* LDB ports and queues */ > + > + config_params->num_ldb_queues = > + resources_asked->num_ldb_queues; > + > + config_params->num_ldb_ports = > + resources_asked->num_ldb_ports; > + > + config_params->num_ldb_credits = > + resources_asked->num_ldb_credits; > + > + config_params->num_atomic_inflights = > + dlb->num_atm_inflights_per_queue * > + config_params->num_ldb_queues; > + > + config_params->num_hist_list_entries = config_params- > >num_ldb_ports * > + DLB_NUM_HIST_LIST_ENTRIES_PER_LDB_PORT; > + > + /* dlb limited to 1 credit pool per queue type */ > + config_params->num_ldb_credit_pools = 1; > + config_params->num_dir_credit_pools = 1; > + > + DLB_LOG_DBG("sched domain create - ldb_qs=%d, ldb_ports=%d, > dir_ports=%d, atomic_inflights=%d, hist_list_entries=%d, ldb_credits=%d, > dir_credits=%d, ldb_cred_pools=%d, dir-credit_pools=%d\n", > + config_params->num_ldb_queues, > + config_params->num_ldb_ports, > + config_params->num_dir_ports, > + config_params->num_atomic_inflights, > + config_params->num_hist_list_entries, > + config_params->num_ldb_credits, > + config_params->num_dir_credits, > + config_params->num_ldb_credit_pools, > + config_params->num_dir_credit_pools); > + > + /* Configure the QM */ > + > + ret = dlb_iface_sched_domain_create(handle, config_params); > + if (ret < 0) { > + DLB_LOG_ERR("dlb: domain create failed, device_id = %d, (driver > ret = %d, extra status: %s)\n", > + handle->device_id, > + ret, > + dlb_error_strings[response.status]); > + goto error_exit; > + } > + > + handle->domain_id = response.id; > + handle->domain_id_valid = 1; > + > + config_params->response = 0; > + > + ret = dlb_ldb_credit_pool_create(handle); > + if (ret < 0) { > + DLB_LOG_ERR("dlb: create ldb credit pool failed\n"); > + goto error_exit2; > + } > + > + ret = dlb_dir_credit_pool_create(handle); > + if (ret < 0) { > + DLB_LOG_ERR("dlb: create dir credit pool failed\n"); > + goto error_exit2; > + } > + > + handle->cfg.configured = true; > + > + return 0; > + > +error_exit2: > + dlb_iface_domain_close(dlb); > + > +error_exit: > + return ret; > +} > + > +/* End HW specific */ > +static void > +dlb_eventdev_info_get(struct rte_eventdev *dev, > + struct rte_event_dev_info *dev_info) > +{ > + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); > + int ret; > + > + ret = dlb_hw_query_resources(dlb); > + if (ret) { > + const struct rte_eventdev_data *data = dev->data; > + > + DLB_LOG_ERR("get resources err=%d, devid=%d\n", > + ret, data->dev_id); > + /* fn is void, so fall through and return values set up in > + * probe > + */ > + } > + > + /* Add num resources currently owned by this domain. > + * These would become available if the scheduling domain were reset > due > + * to the application recalling eventdev_configure to *reconfigure* the > + * domain. > + */ > + evdev_dlb_default_info.max_event_ports += dlb->num_ldb_ports; > + evdev_dlb_default_info.max_event_queues += dlb->num_ldb_queues; > + evdev_dlb_default_info.max_num_events += dlb->num_ldb_credits; > + > + Nit: extra whitespace > + /* In DLB A-stepping hardware, applications are limited to 128 > + * configured ports (load-balanced or directed). The reported number of > + * available ports must reflect this. > + */ > + if (dlb->revision < DLB_REV_B0) { > + int used_ports; > + > + used_ports = DLB_MAX_NUM_LDB_PORTS + > DLB_MAX_NUM_DIR_PORTS - > + dlb->hw_rsrc_query_results.num_ldb_ports - > + dlb->hw_rsrc_query_results.num_dir_ports; > + > + evdev_dlb_default_info.max_event_ports = > + RTE_MIN(evdev_dlb_default_info.max_event_ports, > + 128 - used_ports); > + } > + > + evdev_dlb_default_info.max_event_queues = > + RTE_MIN(evdev_dlb_default_info.max_event_queues, > + RTE_EVENT_MAX_QUEUES_PER_DEV); > + > + evdev_dlb_default_info.max_num_events = > + RTE_MIN(evdev_dlb_default_info.max_num_events, > + dlb->max_num_events_override); > + > + *dev_info = evdev_dlb_default_info; > +} > + > +/* Note: 1 QM instance per QM device, QM instance/device == event device */ > +static int > +dlb_eventdev_configure(const struct rte_eventdev *dev) > +{ > + struct dlb_eventdev *dlb = dlb_pmd_priv(dev); > + struct dlb_hw_dev *handle = &dlb->qm_instance; > + struct dlb_hw_rsrcs *rsrcs = &handle->info.hw_rsrc_max; > + const struct rte_eventdev_data *data = dev->data; > + const struct rte_event_dev_config *config = &data->dev_conf; > + int ret; > + > + /* If this eventdev is already configured, we must release the current > + * scheduling domain before attempting to configure a new one. > + */ > + if (dlb->configured) { > + dlb_hw_reset_sched_domain(dev, true); > + > + ret = dlb_hw_query_resources(dlb); > + if (ret) { > + DLB_LOG_ERR("get resources err=%d, devid=%d\n", > + ret, data->dev_id); > + return ret; > + } > + } > + > + if (config->nb_event_queues > rsrcs->num_queues) { > + DLB_LOG_ERR("nb_event_queues parameter (%d) exceeds the > QM device's capabilities (%d).\n", > + config->nb_event_queues, > + rsrcs->num_queues); > + return -EINVAL; > + } > + if (config->nb_event_ports > (rsrcs->num_ldb_ports > + + rsrcs->num_dir_ports)) { > + DLB_LOG_ERR("nb_event_ports parameter (%d) exceeds the > QM device's capabilities (%d).\n", > + config->nb_event_ports, > + (rsrcs->num_ldb_ports + rsrcs->num_dir_ports)); > + return -EINVAL; > + } > + if (config->nb_events_limit > rsrcs->nb_events_limit) { > + DLB_LOG_ERR("nb_events_limit parameter (%d) exceeds the > QM device's capabilities (%d).\n", > + config->nb_events_limit, > + rsrcs->nb_events_limit); > + return -EINVAL; > + } > + > + if (config->event_dev_cfg & > RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { > + dlb->global_dequeue_wait = false; Nit: don't use braces on single-statement conditionals Thanks, Gage