On Fri, Sep 21, 2018 at 12:33:06PM +0200, Jens Freimann wrote:
> Use packed virtqueue format when reading and writing descriptors
> to/from the ring.
> 
> Signed-off-by: Jens Freimann <jfreim...@redhat.com>
> ---
>  drivers/net/virtio/virtio_ethdev.c | 90 ++++++++++++++++++++++++++++++
>  1 file changed, 90 insertions(+)
> 
> diff --git a/drivers/net/virtio/virtio_ethdev.c 
> b/drivers/net/virtio/virtio_ethdev.c
> index c4ef095ed..c1d95141d 100644
> --- a/drivers/net/virtio/virtio_ethdev.c
> +++ b/drivers/net/virtio/virtio_ethdev.c

The support for virtio-user is missing. And virtio-user will
break when packed ring is enabled. FYI, below is the relvelant
code in virtio-user:

https://github.com/DPDK/dpdk/blob/55d6bb67c9b3/drivers/net/virtio/virtio_user/virtio_user_dev.c#L584-L606


> @@ -141,6 +141,90 @@ static const struct rte_virtio_xstats_name_off 
> rte_virtio_txq_stat_strings[] = {
>  
>  struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
>  
> +static struct virtio_pmd_ctrl *
> +virtio_pq_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
> +                    int *dlen, int pkt_num)
> +{
> +     struct virtqueue *vq = cvq->vq;
> +     int head;
> +     struct vring_desc_packed *desc = vq->vq_ring.desc_packed;
> +     struct virtio_pmd_ctrl *result;
> +     int wrap_counter;
> +     int sum = 0;
> +     int k;
> +
> +     /*
> +      * Format is enforced in qemu code:
> +      * One TX packet for header;
> +      * At least one TX packet per argument;
> +      * One RX packet for ACK.
> +      */
> +     head = vq->vq_avail_idx;
> +     wrap_counter = vq->vq_ring.avail_wrap_counter;
> +     desc[head].flags = VRING_DESC_F_NEXT;
> +     desc[head].addr = cvq->virtio_net_hdr_mem;
> +     desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
> +     vq->vq_free_cnt--;
> +     if (++vq->vq_avail_idx >= vq->vq_nentries) {
> +             vq->vq_avail_idx -= vq->vq_nentries;
> +             vq->vq_ring.avail_wrap_counter ^= 1;
> +     }
> +
> +     for (k = 0; k < pkt_num; k++) {
> +             desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
> +                     + sizeof(struct virtio_net_ctrl_hdr)
> +                     + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
> +             desc[vq->vq_avail_idx].len = dlen[k];
> +             desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT;
> +             sum += dlen[k];
> +             vq->vq_free_cnt--;
> +             _set_desc_avail(&desc[vq->vq_avail_idx],
> +                             vq->vq_ring.avail_wrap_counter);
> +             rte_smp_wmb();
> +             vq->vq_free_cnt--;
> +             if (++vq->vq_avail_idx >= vq->vq_nentries) {
> +                     vq->vq_avail_idx -= vq->vq_nentries;
> +                     vq->vq_ring.avail_wrap_counter ^= 1;
> +             }
> +     }
> +
> +
> +     desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
> +             + sizeof(struct virtio_net_ctrl_hdr);
> +     desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
> +     desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE;
> +     _set_desc_avail(&desc[vq->vq_avail_idx],
> +                     vq->vq_ring.avail_wrap_counter);
> +     _set_desc_avail(&desc[head], wrap_counter);
> +     rte_smp_wmb();
> +
> +     vq->vq_free_cnt--;
> +     if (++vq->vq_avail_idx >= vq->vq_nentries) {
> +             vq->vq_avail_idx -= vq->vq_nentries;
> +             vq->vq_ring.avail_wrap_counter ^= 1;
> +     }
> +
> +     virtqueue_notify(vq);
> +
> +     /* wait for used descriptors in virtqueue */
> +     do {
> +             rte_rmb();
> +             usleep(100);
> +     } while (!_desc_is_used(&desc[head]));
> +
> +     /* now get used descriptors */
> +     while(desc_is_used(&desc[vq->vq_used_cons_idx], &vq->vq_ring)) {
> +             vq->vq_free_cnt++;
> +             if (++vq->vq_used_cons_idx >= vq->vq_nentries) {
> +                     vq->vq_used_cons_idx -= vq->vq_nentries;
> +                     vq->vq_ring.used_wrap_counter ^= 1;
> +             }
> +     }
> +
> +     result = cvq->virtio_net_hdr_mz->addr;
> +     return result;
> +}
> +
>  static int
>  virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
>               int *dlen, int pkt_num)
> @@ -174,6 +258,11 @@ virtio_send_command(struct virtnet_ctl *cvq, struct 
> virtio_pmd_ctrl *ctrl,
>       memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
>               sizeof(struct virtio_pmd_ctrl));
>  
> +     if (vtpci_packed_queue(vq->hw)) {
> +             result = virtio_pq_send_command(cvq, ctrl, dlen, pkt_num);
> +             goto out_unlock;
> +     }
> +
>       /*
>        * Format is enforced in qemu code:
>        * One TX packet for header;
> @@ -245,6 +334,7 @@ virtio_send_command(struct virtnet_ctl *cvq, struct 
> virtio_pmd_ctrl *ctrl,
>  
>       result = cvq->virtio_net_hdr_mz->addr;
>  
> +out_unlock:
>       rte_spinlock_unlock(&cvq->lock);
>       return result->status;
>  }
> -- 
> 2.17.1
> 

Reply via email to