Add configure function for idxd devices, taking the same parameters as the existing configure function for ioat. The ring_size parameter is used to compute the maximum number of bursts to be supported by the driver, given that the hardware works on individual bursts of descriptors at a time.
Signed-off-by: Bruce Richardson <bruce.richard...@intel.com> --- drivers/raw/ioat/idxd_pci.c | 1 + drivers/raw/ioat/idxd_vdev.c | 1 + drivers/raw/ioat/ioat_common.c | 61 ++++++++++++++++++++++++++ drivers/raw/ioat/ioat_private.h | 3 ++ drivers/raw/ioat/rte_ioat_rawdev_fns.h | 1 + 5 files changed, 67 insertions(+) diff --git a/drivers/raw/ioat/idxd_pci.c b/drivers/raw/ioat/idxd_pci.c index 78c443703..762efd5ac 100644 --- a/drivers/raw/ioat/idxd_pci.c +++ b/drivers/raw/ioat/idxd_pci.c @@ -54,6 +54,7 @@ idxd_is_wq_enabled(struct idxd_rawdev *idxd) static const struct rte_rawdev_ops idxd_pci_ops = { .dev_selftest = idxd_rawdev_test, .dump = idxd_dev_dump, + .dev_configure = idxd_dev_configure, }; /* each portal uses 4 x 4k pages */ diff --git a/drivers/raw/ioat/idxd_vdev.c b/drivers/raw/ioat/idxd_vdev.c index 2b8122cbc..90ad11006 100644 --- a/drivers/raw/ioat/idxd_vdev.c +++ b/drivers/raw/ioat/idxd_vdev.c @@ -35,6 +35,7 @@ struct idxd_vdev_args { static const struct rte_rawdev_ops idxd_vdev_ops = { .dev_selftest = idxd_rawdev_test, .dump = idxd_dev_dump, + .dev_configure = idxd_dev_configure, }; static void * diff --git a/drivers/raw/ioat/ioat_common.c b/drivers/raw/ioat/ioat_common.c index 3dda4ab8e..699661e27 100644 --- a/drivers/raw/ioat/ioat_common.c +++ b/drivers/raw/ioat/ioat_common.c @@ -37,6 +37,67 @@ idxd_dev_dump(struct rte_rawdev *dev, FILE *f) return 0; } +int +idxd_dev_configure(const struct rte_rawdev *dev, + rte_rawdev_obj_t config, size_t config_size) +{ + struct idxd_rawdev *idxd = dev->dev_private; + struct rte_idxd_rawdev *rte_idxd = &idxd->public; + struct rte_ioat_rawdev_config *cfg = config; + uint16_t max_desc = cfg->ring_size; + uint16_t max_batches = max_desc / BATCH_SIZE; + uint16_t i; + + if (config_size != sizeof(*cfg)) + return -EINVAL; + + if (dev->started) { + IOAT_PMD_ERR("%s: Error, device is started.", __func__); + return -EAGAIN; + } + + rte_idxd->hdls_disable = cfg->hdls_disable; + + /* limit the batches to what can be stored in hardware */ + if (max_batches > idxd->max_batches) { + IOAT_PMD_DEBUG("Ring size of %u is too large for this device, need to limit to %u batches of %u", + max_desc, idxd->max_batches, BATCH_SIZE); + max_batches = idxd->max_batches; + max_desc = max_batches * BATCH_SIZE; + } + if (!rte_is_power_of_2(max_desc)) + max_desc = rte_align32pow2(max_desc); + IOAT_PMD_DEBUG("Rawdev %u using %u descriptors in %u batches", + dev->dev_id, max_desc, max_batches); + + /* TODO clean up if reconfiguring an existing device */ + rte_idxd->batch_ring = rte_zmalloc(NULL, + sizeof(*rte_idxd->batch_ring) * max_batches, 0); + if (rte_idxd->batch_ring == NULL) + return -ENOMEM; + + rte_idxd->hdl_ring = rte_zmalloc(NULL, + sizeof(*rte_idxd->hdl_ring) * max_desc, 0); + if (rte_idxd->hdl_ring == NULL) { + rte_free(rte_idxd->batch_ring); + rte_idxd->batch_ring = NULL; + return -ENOMEM; + } + rte_idxd->batch_ring_sz = max_batches; + rte_idxd->hdl_ring_sz = max_desc; + + for (i = 0; i < rte_idxd->batch_ring_sz; i++) { + struct rte_idxd_desc_batch *b = &rte_idxd->batch_ring[i]; + b->batch_desc.completion = rte_mem_virt2iova(&b->comp); + b->batch_desc.desc_addr = rte_mem_virt2iova(&b->null_desc); + b->batch_desc.op_flags = (idxd_op_batch << IDXD_CMD_OP_SHIFT) | + IDXD_FLAG_COMPLETION_ADDR_VALID | + IDXD_FLAG_REQUEST_COMPLETION; + } + + return 0; +} + int idxd_rawdev_create(const char *name, struct rte_device *dev, const struct idxd_rawdev *base_idxd, diff --git a/drivers/raw/ioat/ioat_private.h b/drivers/raw/ioat/ioat_private.h index a873f3f2c..91ded444b 100644 --- a/drivers/raw/ioat/ioat_private.h +++ b/drivers/raw/ioat/ioat_private.h @@ -59,6 +59,9 @@ extern int idxd_rawdev_create(const char *name, struct rte_device *dev, const struct idxd_rawdev *idxd, const struct rte_rawdev_ops *ops); +extern int idxd_dev_configure(const struct rte_rawdev *dev, + rte_rawdev_obj_t config, size_t config_size); + extern int idxd_rawdev_test(uint16_t dev_id); extern int idxd_dev_dump(struct rte_rawdev *dev, FILE *f); diff --git a/drivers/raw/ioat/rte_ioat_rawdev_fns.h b/drivers/raw/ioat/rte_ioat_rawdev_fns.h index abd90514b..7090ac0a1 100644 --- a/drivers/raw/ioat/rte_ioat_rawdev_fns.h +++ b/drivers/raw/ioat/rte_ioat_rawdev_fns.h @@ -185,6 +185,7 @@ struct rte_idxd_rawdev { uint16_t next_ret_hdl; /* the next user hdl to return */ uint16_t last_completed_hdl; /* the last user hdl that has completed */ uint16_t next_free_hdl; /* where the handle for next op will go */ + uint16_t hdls_disable; /* disable tracking completion handles */ struct rte_idxd_user_hdl *hdl_ring; struct rte_idxd_desc_batch *batch_ring; -- 2.25.1