Hi Olivier,
 
> Hi Konstantin,
> 
> On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev wrote:
> > v2:
> >  - update Release Notes (as per comments)
> >
> > Two new sync modes were introduced into rte_ring:
> > relaxed tail sync (RTS) and head/tail sync (HTS).
> > This change provides user with ability to select these
> > modes for ring based mempool via mempool ops API.
> >
> > Signed-off-by: Konstantin Ananyev <konstantin.anan...@intel.com>
> > Acked-by: Gage Eads <gage.e...@intel.com>
> > ---
> >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> >  drivers/mempool/ring/rte_mempool_ring.c | 97 ++++++++++++++++++++++---
> >  2 files changed, 94 insertions(+), 9 deletions(-)
> >
> > diff --git a/doc/guides/rel_notes/release_20_08.rst 
> > b/doc/guides/rel_notes/release_20_08.rst
> > index eaaf11c37..7bdcf3aac 100644
> > --- a/doc/guides/rel_notes/release_20_08.rst
> > +++ b/doc/guides/rel_notes/release_20_08.rst
> > @@ -84,6 +84,12 @@ New Features
> >    * Dump ``rte_flow`` memory consumption.
> >    * Measure packet per second forwarding.
> >
> > +* **Added support for new sync modes into mempool ring driver.**
> > +
> > +  Added ability to select new ring synchronisation modes:
> > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync (ring_mt_hts)``
> > +  via mempool ops API.
> > +
> >
> >  Removed Items
> >  -------------
> > diff --git a/drivers/mempool/ring/rte_mempool_ring.c 
> > b/drivers/mempool/ring/rte_mempool_ring.c
> > index bc123fc52..15ec7dee7 100644
> > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool *mp, void * 
> > const *obj_table,
> >                     obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> >  }
> >
> > +static int
> > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > +   unsigned int n)
> > +{
> > +   return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > +                   obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > +static int
> > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
> > +   unsigned int n)
> > +{
> > +   return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > +                   obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> >  static int
> >  common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned 
> > n)
> >  {
> > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool *mp, void 
> > **obj_table, unsigned n)
> >                     obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> >  }
> >
> > +static int
> > +rts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int 
> > n)
> > +{
> > +   return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > +                   obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> > +static int
> > +hts_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned int 
> > n)
> > +{
> > +   return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > +                   obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > +}
> > +
> >  static unsigned
> >  common_ring_get_count(const struct rte_mempool *mp)
> >  {
> >     return rte_ring_count(mp->pool_data);
> >  }
> >
> > -
> >  static int
> > -common_ring_alloc(struct rte_mempool *mp)
> > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> >  {
> > -   int rg_flags = 0, ret;
> > +   int ret;
> >     char rg_name[RTE_RING_NAMESIZE];
> >     struct rte_ring *r;
> >
> > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> >             return -rte_errno;
> >     }
> >
> > -   /* ring flags */
> > -   if (mp->flags & MEMPOOL_F_SP_PUT)
> > -           rg_flags |= RING_F_SP_ENQ;
> > -   if (mp->flags & MEMPOOL_F_SC_GET)
> > -           rg_flags |= RING_F_SC_DEQ;
> > -
> >     /*
> >      * Allocate the ring that will be used to store objects.
> >      * Ring functions will return appropriate errors if we are
> > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> >     return 0;
> >  }
> >
> > +static int
> > +common_ring_alloc(struct rte_mempool *mp)
> > +{
> > +   uint32_t rg_flags;
> > +
> > +   rg_flags = 0;
> 
> Maybe it could go on the same line
> 
> > +
> > +   /* ring flags */
> 
> Not sure we need to keep this comment
> 
> > +   if (mp->flags & MEMPOOL_F_SP_PUT)
> > +           rg_flags |= RING_F_SP_ENQ;
> > +   if (mp->flags & MEMPOOL_F_SC_GET)
> > +           rg_flags |= RING_F_SC_DEQ;
> > +
> > +   return ring_alloc(mp, rg_flags);
> > +}
> > +
> > +static int
> > +rts_ring_alloc(struct rte_mempool *mp)
> > +{
> > +   if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > +           return -EINVAL;
> 
> Why do we need this? It is a problem to allow sc/sp in this mode (even
> if it's not optimal)?

These new sync modes (RTS, HTS) are for MT.
For SP/SC - there is simply no point to use MT sync modes.
I suppose there are few choices:
1. Make F_SP_PUT/F_SC_GET flags silently override expected ops behaviour
   and create actual ring with ST sync mode for prod/cons. 
2. Report an error.
3. Silently ignore these flags.

As I can see for  "ring_mp_mc" ops, we doing #1, 
while for "stack" we are doing #3.
For RTS/HTS I chosoe #2, as it seems cleaner to me.
Any thoughts from your side what preferable behaviour should be?

> 
> > +
> > +   return ring_alloc(mp, RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ);
> > +}
> > +
> > +static int
> > +hts_ring_alloc(struct rte_mempool *mp)
> > +{
> > +   if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) != 0)
> > +           return -EINVAL;
> > +
> > +   return ring_alloc(mp, RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ);
> > +}
> > +
> >  static void
> >  common_ring_free(struct rte_mempool *mp)
> >  {
> > @@ -130,7 +187,29 @@ static const struct rte_mempool_ops ops_sp_mc = {
> >     .get_count = common_ring_get_count,
> >  };
> >
> > +/* ops for mempool with ring in MT_RTS sync mode */
> > +static const struct rte_mempool_ops ops_mt_rts = {
> > +   .name = "ring_mt_rts",
> > +   .alloc = rts_ring_alloc,
> > +   .free = common_ring_free,
> > +   .enqueue = rts_ring_mp_enqueue,
> > +   .dequeue = rts_ring_mc_dequeue,
> > +   .get_count = common_ring_get_count,
> > +};
> > +
> > +/* ops for mempool with ring in MT_HTS sync mode */
> > +static const struct rte_mempool_ops ops_mt_hts = {
> > +   .name = "ring_mt_hts",
> > +   .alloc = hts_ring_alloc,
> > +   .free = common_ring_free,
> > +   .enqueue = hts_ring_mp_enqueue,
> > +   .dequeue = hts_ring_mc_dequeue,
> > +   .get_count = common_ring_get_count,
> > +};
> > +
> >  MEMPOOL_REGISTER_OPS(ops_mp_mc);
> >  MEMPOOL_REGISTER_OPS(ops_sp_sc);
> >  MEMPOOL_REGISTER_OPS(ops_mp_sc);
> >  MEMPOOL_REGISTER_OPS(ops_sp_mc);
> > +MEMPOOL_REGISTER_OPS(ops_mt_rts);
> > +MEMPOOL_REGISTER_OPS(ops_mt_hts);
 
> Not really related to your patch, but I think we need a function to
> dump the name of available mempool ops. We could even add a description.
> The problem we have is that a user does not know on which criteria is
> should use a driver or another (except for platform drivers).

Agree, it will be usefull.
Though it probably subject for a separate patch.

Reply via email to