This patch support basic DMA operations which includes
device capability and channel setup.

Signed-off-by: Gagandeep Singh <g.si...@nxp.com>
---
 drivers/dma/dpaa/dpaa_qdma.c | 182 +++++++++++++++++++++++++++++++++++
 drivers/dma/dpaa/dpaa_qdma.h |   6 ++
 2 files changed, 188 insertions(+)

diff --git a/drivers/dma/dpaa/dpaa_qdma.c b/drivers/dma/dpaa/dpaa_qdma.c
index 8b0454abce..0297166550 100644
--- a/drivers/dma/dpaa/dpaa_qdma.c
+++ b/drivers/dma/dpaa/dpaa_qdma.c
@@ -8,6 +8,18 @@
 #include "dpaa_qdma.h"
 #include "dpaa_qdma_logs.h"
 
+static inline void
+qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
+{
+       ccdf->addr_hi = upper_32_bits(addr);
+       ccdf->addr_lo = rte_cpu_to_le_32(lower_32_bits(addr));
+}
+
+static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
+{
+       csgf->cfg = rte_cpu_to_le_32(len & QDMA_SG_LEN_MASK);
+}
+
 static inline int ilog2(int x)
 {
        int log = 0;
@@ -84,6 +96,64 @@ static void fsl_qdma_free_chan_resources(struct 
fsl_qdma_chan *fsl_chan)
 finally:
        fsl_qdma->desc_allocated--;
 }
+
+/*
+ * Pre-request command descriptor and compound S/G for enqueue.
+ */
+static int fsl_qdma_pre_request_enqueue_comp_sd_desc(
+                                       struct fsl_qdma_queue *queue,
+                                       int size, int aligned)
+{
+       struct fsl_qdma_comp *comp_temp;
+       struct fsl_qdma_sdf *sdf;
+       struct fsl_qdma_ddf *ddf;
+       struct fsl_qdma_format *csgf_desc;
+       int i;
+
+       for (i = 0; i < (int)(queue->n_cq + COMMAND_QUEUE_OVERFLLOW); i++) {
+               comp_temp = rte_zmalloc("qdma: comp temp",
+                                       sizeof(*comp_temp), 0);
+               if (!comp_temp)
+                       return -ENOMEM;
+
+               comp_temp->virt_addr =
+               dma_pool_alloc(size, aligned, &comp_temp->bus_addr);
+               if (!comp_temp->virt_addr) {
+                       rte_free(comp_temp);
+                       return -ENOMEM;
+               }
+
+               comp_temp->desc_virt_addr =
+               dma_pool_alloc(size, aligned, &comp_temp->desc_bus_addr);
+               if (!comp_temp->desc_virt_addr)
+                       return -ENOMEM;
+
+               memset(comp_temp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
+               memset(comp_temp->desc_virt_addr, 0,
+                      FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
+
+               csgf_desc = (struct fsl_qdma_format *)comp_temp->virt_addr + 1;
+               sdf = (struct fsl_qdma_sdf *)comp_temp->desc_virt_addr;
+               ddf = (struct fsl_qdma_ddf *)comp_temp->desc_virt_addr + 1;
+               /* Compound Command Descriptor(Frame List Table) */
+               qdma_desc_addr_set64(csgf_desc, comp_temp->desc_bus_addr);
+               /* It must be 32 as Compound S/G Descriptor */
+               qdma_csgf_set_len(csgf_desc, 32);
+               /* Descriptor Buffer */
+               sdf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+                              FSL_QDMA_CMD_RWTTYPE_OFFSET);
+               ddf->cmd = rte_cpu_to_le_32(FSL_QDMA_CMD_RWTTYPE <<
+                              FSL_QDMA_CMD_RWTTYPE_OFFSET);
+               ddf->cmd |= rte_cpu_to_le_32(FSL_QDMA_CMD_LWC <<
+                               FSL_QDMA_CMD_LWC_OFFSET);
+
+               list_add_tail(&comp_temp->list, &queue->comp_free);
+       }
+
+       return 0;
+}
+
+
 static struct fsl_qdma_queue
 *fsl_qdma_alloc_queue_resources(struct fsl_qdma_engine *fsl_qdma)
 {
@@ -311,6 +381,79 @@ static int fsl_qdma_reg_init(struct fsl_qdma_engine 
*fsl_qdma)
        return 0;
 }
 
+static int fsl_qdma_alloc_chan_resources(struct fsl_qdma_chan *fsl_chan)
+{
+       struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
+       struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
+       int ret;
+
+       if (fsl_queue->count++)
+               goto finally;
+
+       INIT_LIST_HEAD(&fsl_queue->comp_free);
+       INIT_LIST_HEAD(&fsl_queue->comp_used);
+
+       ret = fsl_qdma_pre_request_enqueue_comp_sd_desc(fsl_queue,
+                               FSL_QDMA_COMMAND_BUFFER_SIZE, 64);
+       if (ret) {
+               DPAA_QDMA_ERR(
+                       "failed to alloc dma buffer for comp descriptor\n");
+               goto exit;
+       }
+
+finally:
+       return fsl_qdma->desc_allocated++;
+
+exit:
+       return -ENOMEM;
+}
+
+static int
+dpaa_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *dev_info,
+             uint32_t info_sz)
+{
+#define DPAADMA_MAX_DESC        128
+#define DPAADMA_MIN_DESC        128
+
+       RTE_SET_USED(dev);
+       RTE_SET_USED(info_sz);
+
+       dev_info->dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+                            RTE_DMADEV_CAPA_MEM_TO_DEV |
+                            RTE_DMADEV_CAPA_DEV_TO_DEV |
+                            RTE_DMADEV_CAPA_DEV_TO_MEM |
+                            RTE_DMADEV_CAPA_SILENT |
+                            RTE_DMADEV_CAPA_OPS_COPY;
+       dev_info->max_vchans = 1;
+       dev_info->max_desc = DPAADMA_MAX_DESC;
+       dev_info->min_desc = DPAADMA_MIN_DESC;
+
+       return 0;
+}
+
+static int
+dpaa_get_channel(struct fsl_qdma_engine *fsl_qdma,  uint16_t vchan)
+{
+       u32 i, start, end;
+
+       start = fsl_qdma->free_block_id * QDMA_QUEUES;
+       fsl_qdma->free_block_id++;
+
+       end = start + 1;
+       for (i = start; i < end; i++) {
+               struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
+
+               if (fsl_chan->free) {
+                       fsl_chan->free = false;
+                       fsl_qdma_alloc_chan_resources(fsl_chan);
+                       fsl_qdma->vchan_map[vchan] = i;
+                       return 0;
+               }
+       }
+
+       return -1;
+}
+
 static void
 dma_release(void *fsl_chan)
 {
@@ -318,6 +461,43 @@ dma_release(void *fsl_chan)
        fsl_qdma_free_chan_resources((struct fsl_qdma_chan *)fsl_chan);
 }
 
+static int
+dpaa_qdma_configure(__rte_unused struct rte_dmadev *dmadev,
+                   __rte_unused const struct rte_dmadev_conf *dev_conf)
+{
+       return 0;
+}
+
+static int
+dpaa_qdma_start(__rte_unused struct rte_dmadev *dev)
+{
+       return 0;
+}
+
+static int
+dpaa_qdma_close(__rte_unused struct rte_dmadev *dev)
+{
+       return 0;
+}
+
+static int
+dpaa_qdma_queue_setup(struct rte_dmadev *dmadev,
+                     uint16_t vchan,
+                     __rte_unused const struct rte_dmadev_vchan_conf *conf)
+{
+       struct fsl_qdma_engine *fsl_qdma = dmadev->dev_private;
+
+       return dpaa_get_channel(fsl_qdma, vchan);
+}
+
+static struct rte_dmadev_ops dpaa_qdma_ops = {
+       .dev_info_get             = dpaa_info_get,
+       .dev_configure            = dpaa_qdma_configure,
+       .dev_start                = dpaa_qdma_start,
+       .dev_close                = dpaa_qdma_close,
+       .vchan_setup              = dpaa_qdma_queue_setup,
+};
+
 static int
 dpaa_qdma_init(struct rte_dmadev *dmadev)
 {
@@ -430,6 +610,8 @@ dpaa_qdma_probe(__rte_unused struct rte_dpaa_driver 
*dpaa_drv,
        }
 
        dpaa_dev->dmadev = dmadev;
+       dmadev->dev_ops = &dpaa_qdma_ops;
+       dmadev->device = &dpaa_dev->device;
 
        /* Invoke PMD device initialization function */
        ret = dpaa_qdma_init(dmadev);
diff --git a/drivers/dma/dpaa/dpaa_qdma.h b/drivers/dma/dpaa/dpaa_qdma.h
index cc0d1f114e..f482b16334 100644
--- a/drivers/dma/dpaa/dpaa_qdma.h
+++ b/drivers/dma/dpaa/dpaa_qdma.h
@@ -8,6 +8,12 @@
 #define CORE_NUMBER 4
 #define RETRIES        5
 
+#ifndef GENMASK
+#define BITS_PER_LONG  (__SIZEOF_LONG__ * 8)
+#define GENMASK(h, l) \
+               (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif
+
 #define FSL_QDMA_DMR                   0x0
 #define FSL_QDMA_DSR                   0x4
 #define FSL_QDMA_DEIER                 0xe00
-- 
2.25.1

Reply via email to