Add the start and stop functions for DSA hardware devices. Signed-off-by: Bruce Richardson <bruce.richard...@intel.com> Signed-off-by: Kevin Laatz <kevin.la...@intel.com> --- drivers/raw/ioat/idxd_pci.c | 52 ++++++++++++++++++++++++++++++++++++ drivers/raw/ioat/idxd_vdev.c | 50 ++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+)
diff --git a/drivers/raw/ioat/idxd_pci.c b/drivers/raw/ioat/idxd_pci.c index 762efd5ac..6655cf9b7 100644 --- a/drivers/raw/ioat/idxd_pci.c +++ b/drivers/raw/ioat/idxd_pci.c @@ -51,10 +51,62 @@ idxd_is_wq_enabled(struct idxd_rawdev *idxd) return (state & WQ_STATE_MASK) == 0x1; } +static void +idxd_pci_dev_stop(struct rte_rawdev *dev) +{ + struct idxd_rawdev *idxd = dev->dev_private; + uint8_t err_code; + + if (!idxd_is_wq_enabled(idxd)) { + IOAT_PMD_ERR("Work queue %d already disabled", idxd->qid); + return; + } + + err_code = idxd_pci_dev_command(idxd, idxd_disable_wq); + if (err_code || idxd_is_wq_enabled(idxd)) { + IOAT_PMD_ERR("Failed disabling work queue %d, error code: %#x", + idxd->qid, err_code); + return; + } + IOAT_PMD_DEBUG("Work queue %d disabled OK", idxd->qid); + + return; +} + +static int +idxd_pci_dev_start(struct rte_rawdev *dev) +{ + struct idxd_rawdev *idxd = dev->dev_private; + uint8_t err_code; + + if (idxd_is_wq_enabled(idxd)) { + IOAT_PMD_WARN("WQ %d already enabled", idxd->qid); + return 0; + } + + if (idxd->public.batch_ring == NULL) { + IOAT_PMD_ERR("WQ %d has not been fully configured", idxd->qid); + return -EINVAL; + } + + err_code = idxd_pci_dev_command(idxd, idxd_enable_wq); + if (err_code || !idxd_is_wq_enabled(idxd)) { + IOAT_PMD_ERR("Failed enabling work queue %d, error code: %#x", + idxd->qid, err_code); + return err_code == 0 ? -1 : err_code; + } + + IOAT_PMD_DEBUG("Work queue %d enabled OK", idxd->qid); + + return 0; +} + static const struct rte_rawdev_ops idxd_pci_ops = { .dev_selftest = idxd_rawdev_test, .dump = idxd_dev_dump, .dev_configure = idxd_dev_configure, + .dev_start = idxd_pci_dev_start, + .dev_stop = idxd_pci_dev_stop, }; /* each portal uses 4 x 4k pages */ diff --git a/drivers/raw/ioat/idxd_vdev.c b/drivers/raw/ioat/idxd_vdev.c index 90ad11006..ab7efd216 100644 --- a/drivers/raw/ioat/idxd_vdev.c +++ b/drivers/raw/ioat/idxd_vdev.c @@ -32,10 +32,60 @@ struct idxd_vdev_args { uint8_t wq_id; }; +static void +idxd_vdev_stop(struct rte_rawdev *dev) +{ + struct idxd_rawdev *idxd = dev->dev_private; + int ret; + + if (!accfg_wq_is_enabled(idxd->u.vdev.wq)) { + IOAT_PMD_ERR("Work queue %s already disabled", + accfg_wq_get_devname(idxd->u.vdev.wq)); + return; + } + + ret = accfg_wq_disable(idxd->u.vdev.wq); + if (ret) { + IOAT_PMD_INFO("Work queue %s not disabled, continuing...", + accfg_wq_get_devname(idxd->u.vdev.wq)); + return; + } + IOAT_PMD_DEBUG("Disabling work queue %s OK", + accfg_wq_get_devname(idxd->u.vdev.wq)); + + return; +} + +static int +idxd_vdev_start(struct rte_rawdev *dev) +{ + struct idxd_rawdev *idxd = dev->dev_private; + int ret; + + if (accfg_wq_is_enabled(idxd->u.vdev.wq)) { + IOAT_PMD_ERR("Work queue %s already enabled", + accfg_wq_get_devname(idxd->u.vdev.wq)); + return 0; + } + + ret = accfg_wq_enable(idxd->u.vdev.wq); + if (ret) { + IOAT_PMD_ERR("Error enabling work queue %s", + accfg_wq_get_devname(idxd->u.vdev.wq)); + return -1; + } + IOAT_PMD_DEBUG("Enabling work queue %s OK", + accfg_wq_get_devname(idxd->u.vdev.wq)); + + return 0; +} + static const struct rte_rawdev_ops idxd_vdev_ops = { .dev_selftest = idxd_rawdev_test, .dump = idxd_dev_dump, .dev_configure = idxd_dev_configure, + .dev_start = idxd_vdev_start, + .dev_stop = idxd_vdev_stop, }; static void * -- 2.25.1