> -----Original Message-----
> From: dev <dev-boun...@dpdk.org> On Behalf Of Xuan Ding
> Sent: Tuesday, December 10, 2019 12:50 AM
> To: maintai...@dpdk.org
> Cc: dev@dpdk.org; maxime.coque...@redhat.com; Bie, Tiwei
> <tiwei....@intel.com>; Wang, Zhihong <zhihong.w...@intel.com>; Ding, Xuan
> <xuan.d...@intel.com>; sta...@dpdk.org
> Subject: [dpdk-dev] [PATCH v1] net/virtio-user: fix packed ring server
> mode
> 
> This patch fixes the situation where datapath does not work properly when
> vhost reconnects to virtio in server mode with packed ring.
> 
> Currently, virtio and vhost share memory of vring. For split ring, vhost
> can read the status of discriptors directly from the available ring and
> the used ring during reconnection. Therefore, the datapath can continue.
> 
> But for packed ring, when reconnecting to virtio, vhost cannot get the
> status of discriptors only through the descriptor ring. By resetting Tx
> and Rx queues, the datapath can restart from the beginning.
> 
> Fixes: 4c3f5822eb214 ("net/virtio: add packed virtqueue defines")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Xuan Ding <xuan.d...@intel.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c      | 112 +++++++++++++++++++++++-
>  drivers/net/virtio/virtio_ethdev.h      |   3 +
>  drivers/net/virtio/virtio_user_ethdev.c |   8 ++
>  3 files changed, 121 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c
> b/drivers/net/virtio/virtio_ethdev.c
> index 044eb10a7..c0cb0f23c 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c
> @@ -433,6 +433,94 @@ virtio_init_vring(struct virtqueue *vq)
>       virtqueue_disable_intr(vq);
>  }
> 
> +static int
> +virtio_user_reset_rx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
> +{

Hi Xuan,
This function named as virtio_user_reset, but look like it has no relationship 
with virtio_user.
Maybe rename this function to virtqueue_reset and move it to virtqueue.c will 
be more suitable. 
Please also add suffix _packed as this function only workable for packed ring.

Thanks,
Marvin

> +     uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
> +     struct virtio_hw *hw = dev->data->dev_private;
> +     struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> +     struct virtnet_rx *rxvq;
> +     struct vq_desc_extra *dxp;
> +     unsigned int vq_size;
> +     uint16_t desc_idx, i;
> +
> +     vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> +
Virtqueue size has been set to vq_nentries in virtqueue structure. Do we need 
to re-catch it?

> +     vq->vq_packed.used_wrap_counter = 1;
> +     vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +     vq->vq_packed.event_flags_shadow = 0;
> +     vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
> +
> +     rxvq = &vq->rxq;
> +     memset(rxvq->mz->addr, 0, rxvq->mz->len);
> +
> +     for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> +             dxp = &vq->vq_descx[desc_idx];
> +             if (dxp->cookie != NULL) {
> +                     rte_pktmbuf_free(dxp->cookie);
> +                     dxp->cookie = NULL;
> +             }
> +     }
> +
> +     virtio_init_vring(vq);
> +
> +     for (i = 0; i < hw->max_queue_pairs; i++)
> +             if (rxvq->mpool != NULL)
> +                     virtio_dev_rx_queue_setup_finish(dev, i);
> +

Please add parentheses for multiple lines loop content. 

> +     return 0;
> +}
> +
> +static int
> +virtio_user_reset_tx_queues(struct rte_eth_dev *dev, uint16_t queue_idx)
> +{
> +     uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
> +     struct virtio_hw *hw = dev->data->dev_private;
> +     struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
> +     struct virtnet_tx *txvq;
> +     struct vq_desc_extra *dxp;
> +     unsigned int vq_size;
> +     uint16_t desc_idx;
> +
> +     vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
> +
> +     vq->vq_packed.used_wrap_counter = 1;
> +     vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
> +     vq->vq_packed.event_flags_shadow = 0;
> +
> +     txvq = &vq->txq;
> +     memset(txvq->mz->addr, 0, txvq->mz->len);
> +     memset(txvq->virtio_net_hdr_mz->addr, 0,
> +             txvq->virtio_net_hdr_mz->len);
> +
> +     for (desc_idx = 0; desc_idx < vq_size; desc_idx++) {
> +             dxp = &vq->vq_descx[desc_idx];
> +             if (dxp->cookie != NULL) {
> +                     rte_pktmbuf_free(dxp->cookie);
> +                     dxp->cookie = NULL;
> +             }
> +     }
> +
> +     virtio_init_vring(vq);
> +
> +     return 0;
> +}
> +
> +static int
> +virtio_user_reset_queues(struct rte_eth_dev *eth_dev)
> +{
> +     uint16_t i;
> +
> +     /* Vring reset for each Tx queue and Rx queue. */
> +     for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +             virtio_user_reset_rx_queues(eth_dev, i);
> +
> +     for (i = 0; i < eth_dev->data->nb_rx_queues; i++)
> +             virtio_user_reset_tx_queues(eth_dev, i);
> +
> +     return 0;
> +}
> +
>  static int
>  virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
>  {
> @@ -1913,6 +2001,8 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
>                       goto err_vtpci_init;
>       }
> 
> +     rte_spinlock_init(&hw->state_lock);
> +
>       /* reset device and negotiate default features */
>       ret = virtio_init_device(eth_dev,
> VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
>       if (ret < 0)
> @@ -2155,8 +2245,6 @@ virtio_dev_configure(struct rte_eth_dev *dev)
>                       return -EBUSY;
>               }
> 
> -     rte_spinlock_init(&hw->state_lock);
> -
>       hw->use_simple_rx = 1;
> 
>       if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
> @@ -2421,6 +2509,26 @@ virtio_dev_vlan_offload_set(struct rte_eth_dev
> *dev, int mask)
>       return 0;
>  }
> 
> +int
> +virtio_user_reset_device(struct rte_eth_dev *eth_dev, struct virtio_hw
> *hw)
> +{
> +     /* Add lock to avoid queue contention. */
> +     rte_spinlock_lock(&hw->state_lock);
> +     hw->started = 0;
> +     /*
> +      * Waitting for datapath to complete before resetting queues.
> +      * 1 ms should be enough for the ongoing Tx/Rx function to finish.
> +      */
> +     rte_delay_ms(1);
> +
> +     virtio_user_reset_queues(eth_dev);
> +
> +     hw->started = 1;
> +     rte_spinlock_unlock(&hw->state_lock);
> +
> +     return 0;
> +}
> +
>  static int
>  virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> *dev_info)
>  {
> diff --git a/drivers/net/virtio/virtio_ethdev.h
> b/drivers/net/virtio/virtio_ethdev.h
> index a10111758..72e9e3ff8 100644
> --- a/drivers/net/virtio/virtio_ethdev.h
> +++ b/drivers/net/virtio/virtio_ethdev.h
> @@ -49,6 +49,9 @@
> 
>  extern const struct eth_dev_ops virtio_user_secondary_eth_dev_ops;
> 
> +int virtio_user_reset_device(struct rte_eth_dev *eth_dev,
> +             struct virtio_hw *hw);
> +
>  /*
>   * CQ function prototype
>   */
> diff --git a/drivers/net/virtio/virtio_user_ethdev.c
> b/drivers/net/virtio/virtio_user_ethdev.c
> index 3fc172573..49068a578 100644
> --- a/drivers/net/virtio/virtio_user_ethdev.c
> +++ b/drivers/net/virtio/virtio_user_ethdev.c
> @@ -31,6 +31,7 @@ virtio_user_server_reconnect(struct virtio_user_dev
> *dev)
>       int ret;
>       int connectfd;
>       struct rte_eth_dev *eth_dev = &rte_eth_devices[dev->port_id];
> +     struct virtio_hw *hw = eth_dev->data->dev_private;
> 
>       connectfd = accept(dev->listenfd, NULL, NULL);
>       if (connectfd < 0)
> @@ -51,6 +52,13 @@ virtio_user_server_reconnect(struct virtio_user_dev
> *dev)
> 
>       dev->features &= dev->device_features;
> 
> +     /*
> +      * * For packed ring, resetting queues
> +      * is required in reconnection.
> +      */
> +     if (vtpci_packed_queue(hw))
> +             virtio_user_reset_device(eth_dev, hw);
> +
>       ret = virtio_user_start_device(dev);
>       if (ret < 0)
>               return -1;
> --
> 2.17.1

Reply via email to