On 20.02.2016 00:03, Daniele Di Proietto wrote:
> Would it be safe to assume that the enabled queues are sequential?
> In this case we could just play with 'real_n_txq' instead of keeping
> a mapping and the patch would be simpler. I'm not sure if that's a
> reasonable assumption though.

I thought about that. IMHO, if we can support this virtio features,
we must support them, because we can't force guest's applications to
not disable random queues.

> Also, on my system, with qemu-2.5.0, vring_state_changed() is called
> after new_device(), leading to the following scenario:
> 
> 2016-02-19T19:04:13.617Z|00001|dpdk(vhost_thread2)|DBG|TX queue mapping
> for /home/daniele/ovs/_run/run/dv0
> 2016-02-19T19:04:13.618Z|00002|dpdk(vhost_thread2)|DBG| 0 -->  0
> 2016-02-19T19:04:13.618Z|00003|dpdk(vhost_thread2)|INFO|State of queue 0 (
> tx_qid 0 ) of vhost device '/home/daniele/ovs/_run/run/dv0' 0 changed to
> 'enabled'
> 2016-02-19T19:04:13.618Z|00004|dpdk(vhost_thread2)|DBG|TX queue mapping
> for /home/daniele/ovs/_run/run/dv0
> 2016-02-19T19:04:13.618Z|00005|dpdk(vhost_thread2)|DBG| 0 -->  0
> 2016-02-19T19:04:13.618Z|00006|dpdk(vhost_thread2)|INFO|State of queue 2 (
> tx_qid 1 ) of vhost device '/home/daniele/ovs/_run/run/dv0' 0 changed to
> 'disabled'
> 2016-02-19T19:04:13.619Z|00007|dpdk(vhost_thread2)|DBG|TX queue mapping
> for /home/daniele/ovs/_run/run/dv1
> 2016-02-19T19:04:13.619Z|00008|dpdk(vhost_thread2)|DBG| 0 -->  0
> 2016-02-19T19:04:13.619Z|00009|dpdk(vhost_thread2)|INFO|State of queue 0 (
> tx_qid 0 ) of vhost device '/home/daniele/ovs/_run/run/dv1' 1 changed to
> 'enabled'
> 2016-02-19T19:04:13.619Z|00010|dpdk(vhost_thread2)|DBG|TX queue mapping
> for /home/daniele/ovs/_run/run/dv1
> 2016-02-19T19:04:13.619Z|00011|dpdk(vhost_thread2)|DBG| 0 -->  0
> 2016-02-19T19:04:13.619Z|00012|dpdk(vhost_thread2)|INFO|State of queue 2 (
> tx_qid 1 ) of vhost device '/home/daniele/ovs/_run/run/dv1' 1 changed to
> 'disabled'
> 2016-02-19T19:04:13.619Z|00013|dpdk(vhost_thread2)|INFO|vHost Device
> '/home/daniele/ovs/_run/run/dv0' 0 has been added
> 2016-02-19T19:04:13.620Z|00014|dpdk(vhost_thread2)|INFO|vHost Device
> '/home/daniele/ovs/_run/run/dv1' 1 has been added
> 
> 
> tx_qid 1 of both devices is still mapped on -1, causing the packets to
> be lost

Thanks for pointing that. There is an inconsistency between dpdk_tx_queue-s
and real virtqueues. I'll fix that.

> One more comment inline.
> 
> Thanks,
> 
> Daniele
> 
> On 11/02/2016 22:21, "Ilya Maximets" <i.maxim...@samsung.com> wrote:
> 
>> Currently virtio driver in guest operating system have to be configured
>> to use exactly same number of queues. If number of queues will be less,
>> some packets will get stuck in queues unused by guest and will not be
>> received.
>>
>> Fix that by using new 'vring_state_changed' callback, which is
>> available for vhost-user since DPDK 2.2.
>> Implementation uses additional mapping from configured tx queues to
>> enabled by virtio driver. This requires mandatory locking of TX queues
>> in __netdev_dpdk_vhost_send(), but this locking was almost always anyway
>> because of calling set_multiq with n_txq = 'ovs_numa_get_n_cores() + 1'.
>>
>> Fixes: 4573fbd38fa1 ("netdev-dpdk: Add vhost-user multiqueue support")
>> Signed-off-by: Ilya Maximets <i.maxim...@samsung.com>
>> Reviewed-by: Aaron Conole <acon...@redhat.com>
>> Acked-by: Flavio Leitner <f...@sysclose.org>
>> ---
>> lib/netdev-dpdk.c | 105
>> +++++++++++++++++++++++++++++++++++++++++++++++-------
>> 1 file changed, 92 insertions(+), 13 deletions(-)
>>
>> diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c
>> index e4f789b..c50b1dd 100644
>> --- a/lib/netdev-dpdk.c
>> +++ b/lib/netdev-dpdk.c
>> @@ -173,6 +173,8 @@ struct dpdk_tx_queue {
>>                                     * from concurrent access.  It is
>> used only
>>                                     * if the queue is shared among
>> different
>>                                     * pmd threads (see
>> 'txq_needs_locking'). */
>> +    int map;                       /* Mapping of configured vhost-user
>> queues
>> +                                    * to enabled by guest. */
>>     uint64_t tsc;
>>     struct rte_mbuf *burst_pkts[MAX_TX_QUEUE_LEN];
>> };
>> @@ -559,6 +561,7 @@ netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev,
>> unsigned int n_txqs)
>>     unsigned i;
>>
>>     netdev->tx_q = dpdk_rte_mzalloc(n_txqs * sizeof *netdev->tx_q);
>> +
>>     for (i = 0; i < n_txqs; i++) {
>>         int numa_id = ovs_numa_get_numa_id(i);
>>
>> @@ -572,6 +575,9 @@ netdev_dpdk_alloc_txq(struct netdev_dpdk *netdev,
>> unsigned int n_txqs)
>>             /* Queues are shared among CPUs. Always flush */
>>             netdev->tx_q[i].flush_tx = true;
>>         }
>> +
>> +        /* Initialize map for vhost devices. */
>> +        netdev->tx_q[i].map = -1;
>>         rte_spinlock_init(&netdev->tx_q[i].tx_lock);
>>     }
>> }
>> @@ -1117,17 +1123,16 @@ __netdev_dpdk_vhost_send(struct netdev *netdev,
>> int qid,
>>     unsigned int total_pkts = cnt;
>>     uint64_t start = 0;
>>
>> -    if (OVS_UNLIKELY(!is_vhost_running(virtio_dev))) {
>> +    qid = vhost_dev->tx_q[qid % vhost_dev->real_n_txq].map;
>> +
>> +    if (OVS_UNLIKELY(!is_vhost_running(virtio_dev) || qid == -1)) {
>>         rte_spinlock_lock(&vhost_dev->stats_lock);
>>         vhost_dev->stats.tx_dropped+= cnt;
>>         rte_spinlock_unlock(&vhost_dev->stats_lock);
>>         goto out;
>>     }
>>
>> -    if (vhost_dev->txq_needs_locking) {
>> -        qid = qid % vhost_dev->real_n_txq;
>> -        rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
>> -    }
>> +    rte_spinlock_lock(&vhost_dev->tx_q[qid].tx_lock);
>>
>>     do {
>>         int vhost_qid = qid * VIRTIO_QNUM + VIRTIO_RXQ;
>> @@ -1165,9 +1170,7 @@ __netdev_dpdk_vhost_send(struct netdev *netdev, int
>> qid,
>>         }
>>     } while (cnt);
>>
>> -    if (vhost_dev->txq_needs_locking) {
>> -        rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
>> -    }
>> +    rte_spinlock_unlock(&vhost_dev->tx_q[qid].tx_lock);
>>
>>     rte_spinlock_lock(&vhost_dev->stats_lock);
>>     netdev_dpdk_vhost_update_tx_counters(&vhost_dev->stats, pkts,
>> total_pkts,
>> @@ -1843,15 +1846,49 @@ netdev_dpdk_vhost_set_queues(struct netdev_dpdk
>> *netdev, struct virtio_net *dev)
>>
>>     netdev->real_n_rxq = qp_num;
>>     netdev->real_n_txq = qp_num;
>> -    if (netdev->up.n_txq > netdev->real_n_txq) {
>> -        netdev->txq_needs_locking = true;
>> -    } else {
>> -        netdev->txq_needs_locking = false;
>> -    }
>> +    netdev->txq_needs_locking = true;
>>
>>     return 0;
>> }
>>
>> +/* Fixes mapping for vhost-user tx queues. */
>> +static void
>> +netdev_dpdk_remap_txqs(struct netdev_dpdk *netdev)
>> +    OVS_REQUIRES(netdev->mutex)
>> +{
>> +    int *enabled_queues, n_enabled = 0;
>> +    int i, k, total_txqs = netdev->real_n_txq;
>> +
>> +    enabled_queues = dpdk_rte_mzalloc(total_txqs * sizeof
>> enabled_queues);
> 
> sizeof *enabled_queues

This is a typo.
Thanks.

Best regards, Ilya Maximets.
_______________________________________________
dev mailing list
dev@openvswitch.org
http://openvswitch.org/mailman/listinfo/dev

Reply via email to