On 2021/9/3 23:14, Kevin Laatz wrote:
> On 02/09/2021 14:13, Chengwen Feng wrote:
>> Skeleton dmadevice driver, on the lines of rawdev skeleton, is for
>> showcasing of the dmadev library.
>>
>> Design of skeleton involves a virtual device which is plugged into VDEV
>> bus on initialization.
>>
>> Also, enable compilation of dmadev skeleton drivers.
>>
>> Signed-off-by: Chengwen Feng <fengcheng...@huawei.com>
>> ---
>>   MAINTAINERS                            |   1 +
>>   drivers/dma/meson.build                |  11 +
>>   drivers/dma/skeleton/meson.build       |   7 +
>>   drivers/dma/skeleton/skeleton_dmadev.c | 601 
>> +++++++++++++++++++++++++++++++++
>>   drivers/dma/skeleton/skeleton_dmadev.h |  59 ++++
>>   drivers/dma/skeleton/version.map       |   3 +
>>   drivers/meson.build                    |   1 +
>>   7 files changed, 683 insertions(+)
>>   create mode 100644 drivers/dma/meson.build
>>   create mode 100644 drivers/dma/skeleton/meson.build
>>   create mode 100644 drivers/dma/skeleton/skeleton_dmadev.c
>>   create mode 100644 drivers/dma/skeleton/skeleton_dmadev.h
>>   create mode 100644 drivers/dma/skeleton/version.map
>>

snip

>>
>> +
>> +static int
>> +vchan_setup(struct skeldma_hw *hw, uint16_t nb_desc)
>> +{
>> +    struct skeldma_desc *desc;
>> +    struct rte_ring *empty;
>> +    struct rte_ring *pending;
>> +    struct rte_ring *running;
>> +    struct rte_ring *completed;
>> +    uint16_t i;
>> +
>> +    desc = rte_zmalloc_socket("dma_skelteon_desc",
>> +                  nb_desc * sizeof(struct skeldma_desc),
>> +                  RTE_CACHE_LINE_SIZE, hw->socket_id);
>> +    if (desc == NULL) {
>> +        SKELDMA_LOG(ERR, "Malloc dma skeleton desc fail!");
>> +        return -ENOMEM;
>> +    }
>> +
>> +    empty = rte_ring_create("dma_skeleton_desc_empty", nb_desc,
>> +                hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    pending = rte_ring_create("dma_skeleton_desc_pending", nb_desc,
>> +                  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    running = rte_ring_create("dma_skeleton_desc_running", nb_desc,
>> +                  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    completed = rte_ring_create("dma_skeleton_desc_completed", nb_desc,
>> +                  hw->socket_id, RING_F_SP_ENQ | RING_F_SC_DEQ);
>> +    if (empty == NULL || pending == NULL || running == NULL ||
>> +        completed == NULL) {
>> +        SKELDMA_LOG(ERR, "Create dma skeleton desc ring fail!");
>> +        rte_ring_free(empty);
>> +        rte_ring_free(pending);
>> +        rte_ring_free(running);
>> +        rte_ring_free(completed);
>> +        rte_free(desc);
> 
> These pointers should be set to NULL after free'ing, similar to what you have 
> in "vchan_release()".
> 

These pointers are local variables, Need to clean them up?

The set to NULL operation in 'vhcan_release', is because those
pointers are held by dmadev with a longer life cycle.

Thanks

> 
>> +        return -ENOMEM;
>> +    }
>> +
>> +    /* The real usable ring size is *count-1* instead of *count* to
>> +     * differentiate a free ring from an empty ring.
>> +     * @see rte_ring_create
>> +     */
>> +    for (i = 0; i < nb_desc - 1; i++)
>> +        (void)rte_ring_enqueue(empty, (void *)(desc + i));
>> +
>> +    hw->desc_mem = desc;
>> +    hw->desc_empty = empty;
>> +    hw->desc_pending = pending;
>> +    hw->desc_running = running;
>> +    hw->desc_completed = completed;
>> +
>> +    return 0;
>> +}
>> +
>> +static void
>> +vchan_release(struct skeldma_hw *hw)
>> +{
>> +    if (hw->desc_mem == NULL)
>> +        return;
>> +
>> +    rte_free(hw->desc_mem);
>> +    hw->desc_mem = NULL;
>> +    rte_ring_free(hw->desc_empty);
>> +    hw->desc_empty = NULL;
>> +    rte_ring_free(hw->desc_pending);
>> +    hw->desc_pending = NULL;
>> +    rte_ring_free(hw->desc_running);
>> +    hw->desc_running = NULL;
>> +    rte_ring_free(hw->desc_completed);
>> +    hw->desc_completed = NULL;
>> +}
>> +
>>
> <snip>
> 
> With the minor comments above addressed,
> 
> Reviewed-by: Kevin Laatz <kevin.la...@intel.com>
> 
> 
> 
> .

Reply via email to