When UPT is enabled, the driver updates rxprod register to
let the device know that it has processed the received packets
and new buffers are available. However, updating it too
frequently can lead to reduced performance.

This patch adds code to avoid updating the register frequently.

Signed-off-by: Ronak Doshi <dos...@vmware.com>
Acked-by: Jochen Behrens <jbehr...@vmware.com>
---
 drivers/net/vmxnet3/vmxnet3_rxtx.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c 
b/drivers/net/vmxnet3/vmxnet3_rxtx.c
index 7bbae4177e..39ad0726cb 100644
--- a/drivers/net/vmxnet3/vmxnet3_rxtx.c
+++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c
@@ -1007,7 +1007,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts)
 
                /* It's time to renew descriptors */
                vmxnet3_renew_desc(rxq, ring_idx, newm);
-               if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+               if (unlikely(rxq->shared->ctrl.updateRxProd &&
+                        (rxq->cmd_ring[ring_idx].next2fill & 0xf) == 0)) {
                        VMXNET3_WRITE_BAR0_REG(hw, hw->rx_prod_offset[ring_idx] 
+
                                               (rxq->queue_id * 
VMXNET3_REG_ALIGN),
                                               
rxq->cmd_ring[ring_idx].next2fill);
@@ -1027,18 +1028,21 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts)
 
        if (unlikely(nb_rxd == 0)) {
                uint32_t avail;
+               uint32_t posted = 0;
                for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; 
ring_idx++) {
                        avail = 
vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
                        if (unlikely(avail > 0)) {
                                /* try to alloc new buf and renew descriptors */
-                               vmxnet3_post_rx_bufs(rxq, ring_idx);
+                               if (vmxnet3_post_rx_bufs(rxq, ring_idx) > 0)
+                                       posted |= (1 << ring_idx);
                        }
                }
                if (unlikely(rxq->shared->ctrl.updateRxProd)) {
                        for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; 
ring_idx++) {
-                               VMXNET3_WRITE_BAR0_REG(hw, 
hw->rx_prod_offset[ring_idx] +
-                                                      (rxq->queue_id * 
VMXNET3_REG_ALIGN),
-                                                      
rxq->cmd_ring[ring_idx].next2fill);
+                               if (posted & (1 << ring_idx))
+                                       VMXNET3_WRITE_BAR0_REG(hw, 
hw->rx_prod_offset[ring_idx] +
+                                                              (rxq->queue_id * 
VMXNET3_REG_ALIGN),
+                                                              
rxq->cmd_ring[ring_idx].next2fill);
                        }
                }
        }
-- 
2.11.0

Reply via email to