rte_distributor_flush() is using total_outstanding()
function to calculate if it should still wait
for processing packets. However in burst mode
only backlog packets were counted.

This patch fixes that issue by counting also in flight
packets. There are also sum fixes to properly keep
count of in flight packets for each worker in bufs[].count.

Fixes: 775003ad2f96 ("distributor: add new burst-capable library")
Cc: david.h...@intel.com
Cc: sta...@dpdk.org

Signed-off-by: Lukasz Wojciechowski <l.wojciec...@partner.samsung.com>
Acked-by: David Hunt <david.h...@intel.com>
---
 lib/librte_distributor/rte_distributor.c | 12 +++++-------
 1 file changed, 5 insertions(+), 7 deletions(-)

diff --git a/lib/librte_distributor/rte_distributor.c 
b/lib/librte_distributor/rte_distributor.c
index 4bd23a990..2478de3b7 100644
--- a/lib/librte_distributor/rte_distributor.c
+++ b/lib/librte_distributor/rte_distributor.c
@@ -467,6 +467,7 @@ rte_distributor_process(struct rte_distributor *d,
                        /* Sync with worker on GET_BUF flag. */
                        if (__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
                                __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF) {
+                               d->bufs[wid].count = 0;
                                release(d, wid);
                                handle_returns(d, wid);
                        }
@@ -481,11 +482,6 @@ rte_distributor_process(struct rte_distributor *d,
                uint16_t matches[RTE_DIST_BURST_SIZE];
                unsigned int pkts;
 
-               /* Sync with worker on GET_BUF flag. */
-               if (__atomic_load_n(&(d->bufs[wkr].bufptr64[0]),
-                       __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)
-                       d->bufs[wkr].count = 0;
-
                if ((num_mbufs - next_idx) < RTE_DIST_BURST_SIZE)
                        pkts = num_mbufs - next_idx;
                else
@@ -605,8 +601,10 @@ rte_distributor_process(struct rte_distributor *d,
        for (wid = 0 ; wid < d->num_workers; wid++)
                /* Sync with worker on GET_BUF flag. */
                if ((__atomic_load_n(&(d->bufs[wid].bufptr64[0]),
-                       __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF))
+                       __ATOMIC_ACQUIRE) & RTE_DISTRIB_GET_BUF)) {
+                       d->bufs[wid].count = 0;
                        release(d, wid);
+               }
 
        return num_mbufs;
 }
@@ -649,7 +647,7 @@ total_outstanding(const struct rte_distributor *d)
        unsigned int wkr, total_outstanding = 0;
 
        for (wkr = 0; wkr < d->num_workers; wkr++)
-               total_outstanding += d->backlog[wkr].count;
+               total_outstanding += d->backlog[wkr].count + d->bufs[wkr].count;
 
        return total_outstanding;
 }
-- 
2.17.1

Reply via email to