> On Mon, Jul 13, 2020 at 02:46:35PM +0000, Ananyev, Konstantin wrote:
> > Hi Olivier,
> >
> > > Hi Konstantin,
> > >
> > > On Fri, Jul 10, 2020 at 03:20:12PM +0000, Ananyev, Konstantin wrote:
> > > >
> > > >
> > > > >
> > > > > Hi Olivier,
> > > > >
> > > > > > Hi Konstantin,
> > > > > >
> > > > > > On Thu, Jul 09, 2020 at 05:55:30PM +0000, Ananyev, Konstantin wrote:
> > > > > > > Hi Olivier,
> > > > > > >
> > > > > > > > Hi Konstantin,
> > > > > > > >
> > > > > > > > On Mon, Jun 29, 2020 at 05:10:24PM +0100, Konstantin Ananyev 
> > > > > > > > wrote:
> > > > > > > > > v2:
> > > > > > > > >  - update Release Notes (as per comments)
> > > > > > > > >
> > > > > > > > > Two new sync modes were introduced into rte_ring:
> > > > > > > > > relaxed tail sync (RTS) and head/tail sync (HTS).
> > > > > > > > > This change provides user with ability to select these
> > > > > > > > > modes for ring based mempool via mempool ops API.
> > > > > > > > >
> > > > > > > > > Signed-off-by: Konstantin Ananyev 
> > > > > > > > > <konstantin.anan...@intel.com>
> > > > > > > > > Acked-by: Gage Eads <gage.e...@intel.com>
> > > > > > > > > ---
> > > > > > > > >  doc/guides/rel_notes/release_20_08.rst  |  6 ++
> > > > > > > > >  drivers/mempool/ring/rte_mempool_ring.c | 97 
> > > > > > > > > ++++++++++++++++++++++---
> > > > > > > > >  2 files changed, 94 insertions(+), 9 deletions(-)
> > > > > > > > >
> > > > > > > > > diff --git a/doc/guides/rel_notes/release_20_08.rst 
> > > > > > > > > b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > index eaaf11c37..7bdcf3aac 100644
> > > > > > > > > --- a/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > +++ b/doc/guides/rel_notes/release_20_08.rst
> > > > > > > > > @@ -84,6 +84,12 @@ New Features
> > > > > > > > >    * Dump ``rte_flow`` memory consumption.
> > > > > > > > >    * Measure packet per second forwarding.
> > > > > > > > >
> > > > > > > > > +* **Added support for new sync modes into mempool ring 
> > > > > > > > > driver.**
> > > > > > > > > +
> > > > > > > > > +  Added ability to select new ring synchronisation modes:
> > > > > > > > > +  ``relaxed tail sync (ring_mt_rts)`` and ``head/tail sync 
> > > > > > > > > (ring_mt_hts)``
> > > > > > > > > +  via mempool ops API.
> > > > > > > > > +
> > > > > > > > >
> > > > > > > > >  Removed Items
> > > > > > > > >  -------------
> > > > > > > > > diff --git a/drivers/mempool/ring/rte_mempool_ring.c 
> > > > > > > > > b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > index bc123fc52..15ec7dee7 100644
> > > > > > > > > --- a/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > +++ b/drivers/mempool/ring/rte_mempool_ring.c
> > > > > > > > > @@ -25,6 +25,22 @@ common_ring_sp_enqueue(struct rte_mempool 
> > > > > > > > > *mp, void * const *obj_table,
> > > > > > > > >                       obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mp_enqueue(struct rte_mempool *mp, void * const 
> > > > > > > > > *obj_table,
> > > > > > > > > +     unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +     return rte_ring_mp_rts_enqueue_bulk(mp->pool_data,
> > > > > > > > > +                     obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mp_enqueue(struct rte_mempool *mp, void * const 
> > > > > > > > > *obj_table,
> > > > > > > > > +     unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +     return rte_ring_mp_hts_enqueue_bulk(mp->pool_data,
> > > > > > > > > +                     obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > >  static int
> > > > > > > > >  common_ring_mc_dequeue(struct rte_mempool *mp, void 
> > > > > > > > > **obj_table, unsigned n)
> > > > > > > > >  {
> > > > > > > > > @@ -39,17 +55,30 @@ common_ring_sc_dequeue(struct rte_mempool 
> > > > > > > > > *mp, void **obj_table, unsigned n)
> > > > > > > > >                       obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_mc_dequeue(struct rte_mempool *mp, void 
> > > > > > > > > **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +     return rte_ring_mc_rts_dequeue_bulk(mp->pool_data,
> > > > > > > > > +                     obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +hts_ring_mc_dequeue(struct rte_mempool *mp, void 
> > > > > > > > > **obj_table, unsigned int n)
> > > > > > > > > +{
> > > > > > > > > +     return rte_ring_mc_hts_dequeue_bulk(mp->pool_data,
> > > > > > > > > +                     obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > >  static unsigned
> > > > > > > > >  common_ring_get_count(const struct rte_mempool *mp)
> > > > > > > > >  {
> > > > > > > > >       return rte_ring_count(mp->pool_data);
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > -
> > > > > > > > >  static int
> > > > > > > > > -common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +ring_alloc(struct rte_mempool *mp, uint32_t rg_flags)
> > > > > > > > >  {
> > > > > > > > > -     int rg_flags = 0, ret;
> > > > > > > > > +     int ret;
> > > > > > > > >       char rg_name[RTE_RING_NAMESIZE];
> > > > > > > > >       struct rte_ring *r;
> > > > > > > > >
> > > > > > > > > @@ -60,12 +89,6 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > >               return -rte_errno;
> > > > > > > > >       }
> > > > > > > > >
> > > > > > > > > -     /* ring flags */
> > > > > > > > > -     if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > -             rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > -     if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > -             rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > -
> > > > > > > > >       /*
> > > > > > > > >        * Allocate the ring that will be used to store objects.
> > > > > > > > >        * Ring functions will return appropriate errors if we 
> > > > > > > > > are
> > > > > > > > > @@ -82,6 +105,40 @@ common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > >       return 0;
> > > > > > > > >  }
> > > > > > > > >
> > > > > > > > > +static int
> > > > > > > > > +common_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > +     uint32_t rg_flags;
> > > > > > > > > +
> > > > > > > > > +     rg_flags = 0;
> > > > > > > >
> > > > > > > > Maybe it could go on the same line
> > > > > > > >
> > > > > > > > > +
> > > > > > > > > +     /* ring flags */
> > > > > > > >
> > > > > > > > Not sure we need to keep this comment
> > > > > > > >
> > > > > > > > > +     if (mp->flags & MEMPOOL_F_SP_PUT)
> > > > > > > > > +             rg_flags |= RING_F_SP_ENQ;
> > > > > > > > > +     if (mp->flags & MEMPOOL_F_SC_GET)
> > > > > > > > > +             rg_flags |= RING_F_SC_DEQ;
> > > > > > > > > +
> > > > > > > > > +     return ring_alloc(mp, rg_flags);
> > > > > > > > > +}
> > > > > > > > > +
> > > > > > > > > +static int
> > > > > > > > > +rts_ring_alloc(struct rte_mempool *mp)
> > > > > > > > > +{
> > > > > > > > > +     if ((mp->flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) 
> > > > > > > > > != 0)
> > > > > > > > > +             return -EINVAL;
> > > > > > > >
> > > > > > > > Why do we need this? It is a problem to allow sc/sp in this 
> > > > > > > > mode (even
> > > > > > > > if it's not optimal)?
> > > > > > >
> > > > > > > These new sync modes (RTS, HTS) are for MT.
> > > > > > > For SP/SC - there is simply no point to use MT sync modes.
> > > > > > > I suppose there are few choices:
> > > > > > > 1. Make F_SP_PUT/F_SC_GET flags silently override expected ops 
> > > > > > > behaviour
> > > > > > >    and create actual ring with ST sync mode for prod/cons.
> > > > > > > 2. Report an error.
> > > > > > > 3. Silently ignore these flags.
> > > > > > >
> > > > > > > As I can see for  "ring_mp_mc" ops, we doing #1,
> > > > > > > while for "stack" we are doing #3.
> > > > > > > For RTS/HTS I chosoe #2, as it seems cleaner to me.
> > > > > > > Any thoughts from your side what preferable behaviour should be?
> > > > > >
> > > > > > The F_SP_PUT/F_SC_GET are only used in rte_mempool_create() to 
> > > > > > select
> > > > > > the default ops among (ring_sp_sc, ring_mp_sc, ring_sp_mc,
> > > > > > ring_mp_mc).
> > > > >
> > > > > As I understand, nothing prevents user from doing:
> > > > >
> > > > > mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
> > > > >                  sizeof(struct rte_pktmbuf_pool_private), socket_id, 
> > > > > 0);
> > > >
> > > > Apologies, hit send accidently.
> > > > I meant user can do:
> > > >
> > > > mp = rte_mempool_create_empty(..., F_SP_PUT | F_SC_GET);
> > > > rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > > >
> > > > An in that case, he'll get SP/SC ring underneath.
> > >
> > > It looks it's not the case. Since commit 449c49b93a6b ("mempool: support
> > > handler operations"), the flags SP_PUT/SC_GET are converted into a call
> > > to rte_mempool_set_ops_byname() in rte_mempool_create() only.
> > >
> > > In rte_mempool_create_empty(), these flags are ignored. It is expected
> > > that the user calls rte_mempool_set_ops_byname() by itself.
> >
> > As I understand the code - not exactly.
> > rte_mempool_create_empty() doesn't make any specific actions based on 
> > 'flags' value,
> > but it does store it's value inside mp->flags.
> > Later, when mempool_ops_alloc_once() is called these flags will be used by
> > common_ring_alloc() and might override selected by ops ring behaviour.
> >
> > >
> > > I don't think it is a good behavior:
> > >
> > > 1/ The documentation of rte_mempool_create_empty() does not say that the
> > >    flags are ignored, and a user can expect that F_SP_PUT | F_SC_GET
> > >    sets the default ops like rte_mempool_create().
> > >
> > > 2/ If rte_mempool_set_ops_byname() is not called after
> > >    rte_mempool_create_empty() (and it looks it happens in dpdk's code),
> > >    the default ops are the ones registered at index 0. This depends on
> > >    the link order.
> > >
> > > So I propose to move the following code in
> > > rte_mempool_create_empty().
> > >
> > >   if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
> > >           ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
> > >   else if (flags & MEMPOOL_F_SP_PUT)
> > >           ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
> > >   else if (flags & MEMPOOL_F_SC_GET)
> > >           ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
> > >   else
> > >           ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
> > >
> > > What do you think?
> >
> > I think it will be a good thing - as in that case we'll always have
> > "ring_mp_mc" selected as default one.
> > As another thought, it porbably would be good to deprecate and later remove
> > MEMPOOL_F_SP_PUT and MEMPOOL_F_SC_GET completely.
> > These days user can select this behaviour via mempool ops and such dualism
> > just makes things more error-prone and harder to maintain.
> > Especially as we don't have clear policy what should be the higher priority
> > for sync mode selection: mempool ops or flags.
> >
> 
> I'll tend to agree, however it would mean deprecate rte_mempool_create()
> too, because we wouldn't be able to set ops with it. Or we would have to
> add a 12th (!) argument to the function, to set the ops name.
> 
> I don't like having that many arguments to this function, but it seems
> it is widely used, probably because it is just one function call (vs
> create_empty + set_ops + populate). So adding a "ops_name" argument is
> maybe the right thing to do, given we can keep abi compat.

My thought was - just keep rte_mempool_create()
parameter list as it is, and always set ops to "ring_mp_mc" for it.
Users who'd like some other ops would force to use
create_empty+set_ops+populate.
That's pretty much the same what we have right now,
the only diff will be ring with SP/SC mode.

Reply via email to