On 02/09/2021 14:13, Chengwen Feng wrote:
Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
showcasing of the dmadev library.
Design of skeleton involves a virtual device which is plugged into VDEV
bus on initialization.
Also, enable compilation of dmadev skeleton drivers.
Signed-off-by: Chengwen Feng <fengcheng...@huawei.com>
---
MAINTAINERS | 1 +
drivers/dma/meson.build | 11 +
drivers/dma/skeleton/meson.build | 7 +
drivers/dma/skeleton/skeleton_dmadev.c | 601 +++++++++++++++++++++++++++++++++
drivers/dma/skeleton/skeleton_dmadev.h | 59 ++++
drivers/dma/skeleton/version.map | 3 +
drivers/meson.build | 1 +
7 files changed, 683 insertions(+)
create mode 100644 drivers/dma/meson.build
create mode 100644 drivers/dma/skeleton/meson.build
create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
create mode 100644 drivers/dma/skeleton/version.map
<snip>
diff --git a/drivers/dma/skeleton/skeleton_dmadev.c
b/drivers/dma/skeleton/skeleton_dmadev.c
new file mode 100644
index 0000000..7033062
--- /dev/null
+++ b/drivers/dma/skeleton/skeleton_dmadev.c
@@ -0,0 +1,601 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 HiSilicon Limited.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_debug.h>
+#include <rte_dev.h>
+#include <rte_eal.h>
+#include <rte_kvargs.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_ring.h>
+
This list of includes is very long, many of these are likely included
via rte_common already, for example. Please check this and remove
redundant includes.
+#include <rte_dmadev_pmd.h>
+
+#include "skeleton_dmadev.h"
+
<snip>
+
+static int
+vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
+{
+ struct skeldma_desc *desc;
+ struct rte_ring *empty;
+ struct rte_ring *pending;
+ struct rte_ring *running;
+ struct rte_ring *completed;
+ uint16_t i;
+
+ desc = rte_zmalloc_socket("dma_skelteon_desc",
+ nb_desc * sizeof(struct skeldma_desc),
+ RTE_CACHE_LINE_SIZE, hw->socket_id);
+ if (desc == NULL) {
+ SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
+ return -ENOMEM;
+ }
+
+ empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
+ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
+ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
+ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
+ hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (empty == NULL || pending == NULL || running == NULL ||
+ completed == NULL) {
+ SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
+ rte_ring_free(empty);
+ rte_ring_free(pending);
+ rte_ring_free(running);
+ rte_ring_free(completed);
+ rte_free(desc);
These pointers should be set to NULL after free'ing, similar to what you
have in "vchan_release()".
+ return -ENOMEM;
+ }
+
+ /* The real usable ring size is *count-1* instead of *count* to
+ * differentiate a free ring from an empty ring.
+ * @see rte_ring_create
+ */
+ for (i = 0; i < nb_desc - 1; i++)
+ (void)rte_ring_enqueue(empty, (void *)(desc + i));
+
+ hw->desc_mem = desc;
+ hw->desc_empty = empty;
+ hw->desc_pending = pending;
+ hw->desc_running = running;
+ hw->desc_completed = completed;
+
+ return 0;
+}
+
+static void
+vchan_release(struct skeldma_hw *hw)
+{
+ if (hw->desc_mem == NULL)
+ return;
+
+ rte_free(hw->desc_mem);
+ hw->desc_mem = NULL;
+ rte_ring_free(hw->desc_empty);
+ hw->desc_empty = NULL;
+ rte_ring_free(hw->desc_pending);
+ hw->desc_pending = NULL;
+ rte_ring_free(hw->desc_running);
+ hw->desc_running = NULL;
+ rte_ring_free(hw->desc_completed);
+ hw->desc_completed = NULL;
+}
+
<snip>
With the minor comments above addressed,
Reviewed-by: Kevin Laatz <kevin.la...@intel.com>