> -----Original Message-----
> From: Ding, Xuan <xuan.d...@intel.com>
> Sent: Thursday, September 29, 2022 9:33 AM
> To: maxime.coque...@redhat.com; Xia, Chenbo <chenbo....@intel.com>
> Cc: dev@dpdk.org; Hu, Jiayu <jiayu...@intel.com>; He, Xingguang
> <xingguang...@intel.com>; Yang, YvonneX <yvonnex.y...@intel.com>; Jiang,
> Cheng1 <cheng1.ji...@intel.com>; Wang, YuanX <yuanx.w...@intel.com>; Ma,
> WenwuX <wenwux...@intel.com>; Ding, Xuan <xuan.d...@intel.com>
> Subject: [PATCH v3 2/2] examples/vhost: unconfigure DMA vchannel
> 
> From: Xuan Ding <xuan.d...@intel.com>
> 
> This patch applies rte_vhost_async_dma_unconfigure() API
> to manually free DMA vchannels instead of waiting
> until the program ends to be released.
> 
> Signed-off-by: Xuan Ding <xuan.d...@intel.com>
> ---
>  examples/vhost/main.c | 45 ++++++++++++++++++++++++++++++-------------
>  examples/vhost/main.h |  1 +
>  2 files changed, 33 insertions(+), 13 deletions(-)
> 
> diff --git a/examples/vhost/main.c b/examples/vhost/main.c
> index 0fa4753e70..32f396d88a 100644
> --- a/examples/vhost/main.c
> +++ b/examples/vhost/main.c
> @@ -1558,6 +1558,28 @@ vhost_clear_queue(struct vhost_dev *vdev, uint16_t
> queue_id)
>       }
>  }
> 
> +static void
> +vhost_clear_async(struct vhost_dev *vdev, int vid, uint16_t queue_id)
> +{
> +     int16_t dma_id;
> +     uint16_t ref_count;
> +
> +     if (dma_bind[vid].dmas[queue_id].async_enabled) {
> +             vhost_clear_queue(vdev, queue_id);
> +             rte_vhost_async_channel_unregister(vid, queue_id);
> +             dma_bind[vid].dmas[queue_id].async_enabled = false;
> +     }
> +
> +     dma_id = dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dev_id;
> +     dma_bind[vid2socketid[vdev->vid]].dmas[queue_id].dma_ref_count--;
> +     ref_count = dma_bind[vid2socketid[vdev-
> >vid]].dmas[queue_id].dma_ref_count;

Above two lines could be combined like 'ref_count = --XXXX'

But I wonder is this dma_ref_count really needed? It's always 0/1 like
Async_enabled, no? If it target at multi-port sharing same one dma case,
it should not be implemented like this.

Thanks,
Chenbo

> +
> +     if (ref_count == 0 && dma_id != INVALID_DMA_ID) {
> +             if (rte_vhost_async_dma_unconfigure(dma_id, 0) < 0)
> +                     RTE_LOG(ERR, VHOST_PORT, "Failed to unconfigure DMA in
> vhost.\n");
> +     }
> +}
> +
>  /*
>   * Remove a device from the specific data core linked list and from the
>   * main linked list. Synchronization  occurs through the use of the
> @@ -1614,17 +1636,8 @@ destroy_device(int vid)
>               "(%d) device has been removed from data core\n",
>               vdev->vid);
> 
> -     if (dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled) {
> -             vhost_clear_queue(vdev, VIRTIO_RXQ);
> -             rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ);
> -             dma_bind[vid].dmas[VIRTIO_RXQ].async_enabled = false;
> -     }
> -
> -     if (dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled) {
> -             vhost_clear_queue(vdev, VIRTIO_TXQ);
> -             rte_vhost_async_channel_unregister(vid, VIRTIO_TXQ);
> -             dma_bind[vid].dmas[VIRTIO_TXQ].async_enabled = false;
> -     }
> +     vhost_clear_async(vdev, vid, VIRTIO_RXQ);
> +     vhost_clear_async(vdev, vid, VIRTIO_TXQ);
> 
>       rte_free(vdev);
>  }
> @@ -1673,14 +1686,19 @@ vhost_async_channel_register(int vid)
> 
>       if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dev_id !=
> INVALID_DMA_ID) {
>               rx_ret = rte_vhost_async_channel_register(vid, VIRTIO_RXQ);
> -             if (rx_ret == 0)
> +             if (rx_ret == 0) {
> 
>       dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].async_enabled = true;
> +
>       dma_bind[vid2socketid[vid]].dmas[VIRTIO_RXQ].dma_ref_count++;
> +             }
> +
>       }
> 
>       if (dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dev_id !=
> INVALID_DMA_ID) {
>               tx_ret = rte_vhost_async_channel_register(vid, VIRTIO_TXQ);
> -             if (tx_ret == 0)
> +             if (tx_ret == 0) {
> 
>       dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].async_enabled = true;
> +
>       dma_bind[vid2socketid[vid]].dmas[VIRTIO_TXQ].dma_ref_count++;
> +             }
>       }
> 
>       return rx_ret | tx_ret;
> @@ -1886,6 +1904,7 @@ reset_dma(void)
>               for (j = 0; j < RTE_MAX_QUEUES_PER_PORT * 2; j++) {
>                       dma_bind[i].dmas[j].dev_id = INVALID_DMA_ID;
>                       dma_bind[i].dmas[j].async_enabled = false;
> +                     dma_bind[i].dmas[j].dma_ref_count = 0;
>               }
>       }
> 
> diff --git a/examples/vhost/main.h b/examples/vhost/main.h
> index 2fcb8376c5..2b2cf828d3 100644
> --- a/examples/vhost/main.h
> +++ b/examples/vhost/main.h
> @@ -96,6 +96,7 @@ struct dma_info {
>       struct rte_pci_addr addr;
>       int16_t dev_id;
>       bool async_enabled;
> +     uint16_t dma_ref_count;
>  };
> 
>  struct dma_for_vhost {
> --
> 2.17.1

Reply via email to