Hi David,

Could you please post a v2 with Chenbo's comments taken into account?

Thanks,
Maxime

On 9/11/20 9:44 AM, Xia, Chenbo wrote:
> Hi David,
> 
> Thanks for working on this. Comments inline.
> 
>> -----Original Message-----
>> From: David Christensen <d...@linux.vnet.ibm.com>
>> Sent: Thursday, September 3, 2020 1:03 AM
>> To: maxime.coque...@redhat.com; Xia, Chenbo <chenbo....@intel.com>; Wang,
>> Zhihong <zhihong.w...@intel.com>; dev@dpdk.org
>> Cc: David Christensen <d...@linux.vnet.ibm.com>
>> Subject: [PATCH] net/vhost: fix xstats wrong after clearing stats
>>
>> The PMD API allows stats and xstats values to be cleared separately.
>> This is a problem for the vhost PMD since some of the xstats values are
>> derived from existing stats values.  For example:
>>
>> testpmd> show port xstats all
>> ...
>> tx_unicast_packets: 17562959
>> ...
>> testpmd> clear port stats all
>> ...
>> show port xstats all
>> ...
>> tx_unicast_packets: 18446744073709551615
>> ...
>>
>> Modify the driver so that stats and xstats values are stored, updated,
>> and cleared separately.
> 
> I think it's fix patch. So please add 'Fixes:XXX' and cc to sta...@dpdk.org
> in your commit message.
> 
>>
>> Signed-off-by: David Christensen <d...@linux.vnet.ibm.com>
>> ---
>>  drivers/net/vhost/rte_eth_vhost.c | 54 ++++++++++++++++++-------------
>>  1 file changed, 32 insertions(+), 22 deletions(-)
>>
>> diff --git a/drivers/net/vhost/rte_eth_vhost.c
>> b/drivers/net/vhost/rte_eth_vhost.c
>> index e55278af6..4e72cc2ca 100644
>> --- a/drivers/net/vhost/rte_eth_vhost.c
>> +++ b/drivers/net/vhost/rte_eth_vhost.c
>> @@ -73,6 +73,9 @@ enum vhost_xstats_pkts {
>>      VHOST_BROADCAST_PKT,
>>      VHOST_MULTICAST_PKT,
>>      VHOST_UNICAST_PKT,
>> +    VHOST_PKT,
>> +    VHOST_BYTE,
>> +    VHOST_MISSED_PKT,
>>      VHOST_ERRORS_PKT,
>>      VHOST_ERRORS_FRAGMENTED,
>>      VHOST_ERRORS_JABBER,
>> @@ -149,11 +152,11 @@ struct vhost_xstats_name_off {
>>  /* [rx]_is prepended to the name string here */
>>  static const struct vhost_xstats_name_off vhost_rxport_stat_strings[] = {
>>      {"good_packets",
>> -     offsetof(struct vhost_queue, stats.pkts)},
>> +     offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>>      {"total_bytes",
>> -     offsetof(struct vhost_queue, stats.bytes)},
>> +     offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>>      {"missed_pkts",
>> -     offsetof(struct vhost_queue, stats.missed_pkts)},
>> +     offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>>      {"broadcast_packets",
>>       offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>>      {"multicast_packets",
>> @@ -189,11 +192,11 @@ static const struct vhost_xstats_name_off
>> vhost_rxport_stat_strings[] = {
>>  /* [tx]_ is prepended to the name string here */
>>  static const struct vhost_xstats_name_off vhost_txport_stat_strings[] = {
>>      {"good_packets",
>> -     offsetof(struct vhost_queue, stats.pkts)},
>> +     offsetof(struct vhost_queue, stats.xstats[VHOST_PKT])},
>>      {"total_bytes",
>> -     offsetof(struct vhost_queue, stats.bytes)},
>> +     offsetof(struct vhost_queue, stats.xstats[VHOST_BYTE])},
>>      {"missed_pkts",
>> -     offsetof(struct vhost_queue, stats.missed_pkts)},
>> +     offsetof(struct vhost_queue, stats.xstats[VHOST_MISSED_PKT])},
>>      {"broadcast_packets",
>>       offsetof(struct vhost_queue, stats.xstats[VHOST_BROADCAST_PKT])},
>>      {"multicast_packets",
>> @@ -291,18 +294,11 @@ vhost_dev_xstats_get(struct rte_eth_dev *dev, struct
>> rte_eth_xstat *xstats,
>>              vq = dev->data->rx_queues[i];
>>              if (!vq)
>>                      continue;
>> -            vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
>> -                            - (vq->stats.xstats[VHOST_BROADCAST_PKT]
>> -                            + vq->stats.xstats[VHOST_MULTICAST_PKT]);
>>      }
> 
> Why not delete the for loop here?
> 
>>      for (i = 0; i < dev->data->nb_tx_queues; i++) {
>>              vq = dev->data->tx_queues[i];
>>              if (!vq)
>>                      continue;
>> -            vq->stats.xstats[VHOST_UNICAST_PKT] = vq->stats.pkts
>> -                            + vq->stats.missed_pkts
>> -                            - (vq->stats.xstats[VHOST_BROADCAST_PKT]
>> -                            + vq->stats.xstats[VHOST_MULTICAST_PKT]);
>>      }
> 
> Ditto.
> 
>>      for (t = 0; t < VHOST_NB_XSTATS_RXPORT; t++) {
>>              xstats[count].value = 0;
>> @@ -346,20 +342,27 @@ vhost_count_multicast_broadcast(struct vhost_queue
>> *vq,
>>                      pstats->xstats[VHOST_BROADCAST_PKT]++;
>>              else
>>                      pstats->xstats[VHOST_MULTICAST_PKT]++;
>> +    } else {
>> +            pstats->xstats[VHOST_UNICAST_PKT]++;
> 
> As this function also count unicast pkts now. The function name should better
> be changed. Besides, in 'eth_vhost_tx' which calls this function, there's a
> comment about why we calls the function. I think that should also be updated.
> 
> Thanks!
> Chenbo
> 
>>      }
>>  }
>>
>>  static void
>> -vhost_update_packet_xstats(struct vhost_queue *vq,
>> -                       struct rte_mbuf **bufs,
>> -                       uint16_t count)
>> +vhost_update_packet_xstats(struct vhost_queue *vq, struct rte_mbuf **bufs,
>> +                       uint16_t count, uint64_t nb_bytes,
>> +                       uint64_t nb_missed)
>>  {
>>      uint32_t pkt_len = 0;
>>      uint64_t i = 0;
>>      uint64_t index;
>>      struct vhost_stats *pstats = &vq->stats;
>>
>> +    pstats->xstats[VHOST_BYTE] += nb_bytes;
>> +    pstats->xstats[VHOST_MISSED_PKT] += nb_missed;
>> +    pstats->xstats[VHOST_UNICAST_PKT] += nb_missed;
>> +
>>      for (i = 0; i < count ; i++) {
>> +            pstats->xstats[VHOST_PKT]++;
>>              pkt_len = bufs[i]->pkt_len;
>>              if (pkt_len == 64) {
>>                      pstats->xstats[VHOST_64_PKT]++;
>> @@ -385,6 +388,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t
>> nb_bufs)
>>      struct vhost_queue *r = q;
>>      uint16_t i, nb_rx = 0;
>>      uint16_t nb_receive = nb_bufs;
>> +    uint64_t nb_bytes = 0;
>>
>>      if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>>              return 0;
>> @@ -419,10 +423,11 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs,
>> uint16_t nb_bufs)
>>              if (r->internal->vlan_strip)
>>                      rte_vlan_strip(bufs[i]);
>>
>> -            r->stats.bytes += bufs[i]->pkt_len;
>> +            nb_bytes += bufs[i]->pkt_len;
>>      }
>>
>> -    vhost_update_packet_xstats(r, bufs, nb_rx);
>> +    r->stats.bytes += nb_bytes;
>> +    vhost_update_packet_xstats(r, bufs, nb_rx, nb_bytes, 0);
>>
>>  out:
>>      rte_atomic32_set(&r->while_queuing, 0);
>> @@ -436,6 +441,8 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t
>> nb_bufs)
>>      struct vhost_queue *r = q;
>>      uint16_t i, nb_tx = 0;
>>      uint16_t nb_send = 0;
>> +    uint64_t nb_bytes = 0;
>> +    uint64_t nb_missed = 0;
>>
>>      if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0))
>>              return 0;
>> @@ -476,13 +483,16 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs,
>> uint16_t nb_bufs)
>>                      break;
>>      }
>>
>> +    for (i = 0; likely(i < nb_tx); i++)
>> +            nb_bytes += bufs[i]->pkt_len;
>> +
>> +    nb_missed = nb_bufs - nb_tx;
>> +
>>      r->stats.pkts += nb_tx;
>> +    r->stats.bytes += nb_bytes;
>>      r->stats.missed_pkts += nb_bufs - nb_tx;
>>
>> -    for (i = 0; likely(i < nb_tx); i++)
>> -            r->stats.bytes += bufs[i]->pkt_len;
>> -
>> -    vhost_update_packet_xstats(r, bufs, nb_tx);
>> +    vhost_update_packet_xstats(r, bufs, nb_tx, nb_bytes, nb_missed);
>>
>>      /* According to RFC2863 page42 section ifHCOutMulticastPkts and
>>       * ifHCOutBroadcastPkts, the counters "multicast" and "broadcast"
>> --
>> 2.18.4
> 

Reply via email to