> -----Original Message----- > From: David Marchand <david.march...@redhat.com> > Sent: Wednesday, July 8, 2020 10:30 PM > To: Ruifeng Wang <ruifeng.w...@arm.com> > Cc: Bruce Richardson <bruce.richard...@intel.com>; Vladimir Medvedkin > <vladimir.medved...@intel.com>; John McNamara > <john.mcnam...@intel.com>; Marko Kovacevic > <marko.kovace...@intel.com>; Ray Kinsella <m...@ashroe.eu>; Neil Horman > <nhor...@tuxdriver.com>; dev <dev@dpdk.org>; Ananyev, Konstantin > <konstantin.anan...@intel.com>; Honnappa Nagarahalli > <honnappa.nagaraha...@arm.com>; nd <n...@arm.com> > Subject: Re: [dpdk-dev] [PATCH v7 1/3] lib/lpm: integrate RCU QSBR > > On Tue, Jul 7, 2020 at 5:16 PM Ruifeng Wang <ruifeng.w...@arm.com> > wrote: > > diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index > > b9d49ac87..7889f21b3 100644 > > --- a/lib/librte_lpm/rte_lpm.h > > +++ b/lib/librte_lpm/rte_lpm.h > > @@ -1,5 +1,6 @@ > > /* SPDX-License-Identifier: BSD-3-Clause > > * Copyright(c) 2010-2014 Intel Corporation > > + * Copyright(c) 2020 Arm Limited > > */ > > > > #ifndef _RTE_LPM_H_ > > @@ -20,6 +21,7 @@ > > #include <rte_memory.h> > > #include <rte_common.h> > > #include <rte_vect.h> > > +#include <rte_rcu_qsbr.h> > > > > #ifdef __cplusplus > > extern "C" { > > @@ -62,6 +64,17 @@ extern "C" { > > /** Bitmask used to indicate successful lookup */ > > #define RTE_LPM_LOOKUP_SUCCESS 0x01000000 > > > > +/** @internal Default RCU defer queue entries to reclaim in one go. */ > > +#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16 > > + > > +/** RCU reclamation modes */ > > +enum rte_lpm_qsbr_mode { > > + /** Create defer queue for reclaim. */ > > + RTE_LPM_QSBR_MODE_DQ = 0, > > + /** Use blocking mode reclaim. No defer queue created. */ > > + RTE_LPM_QSBR_MODE_SYNC > > +}; > > + > > #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN > > /** @internal Tbl24 entry structure. */ __extension__ @@ -130,6 > > +143,28 @@ struct rte_lpm { > > __rte_cache_aligned; /**< LPM tbl24 table. */ > > struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */ > > struct rte_lpm_rule *rules_tbl; /**< LPM rules. */ > > +#ifdef ALLOW_EXPERIMENTAL_API > > + /* RCU config. */ > > + struct rte_rcu_qsbr *v; /* RCU QSBR variable. */ > > + enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */ > > + struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */ > > +#endif > > +}; > > I can see failures in travis reports for v7 and v6. > I reproduced them in my env. > > 1 function with some indirect sub-type change: > > [C]'function int rte_lpm_add(rte_lpm*, uint32_t, uint8_t, uint32_t)' > at rte_lpm.c:764:1 has some indirect sub-type changes: > parameter 1 of type 'rte_lpm*' has sub-type changes: > in pointed to type 'struct rte_lpm' at rte_lpm.h:134:1: > type size hasn't changed > 3 data member insertions: > 'rte_rcu_qsbr* rte_lpm::v', at offset 536873600 (in bits) at > rte_lpm.h:148:1 > 'rte_lpm_qsbr_mode rte_lpm::rcu_mode', at offset 536873664 (in bits) > at rte_lpm.h:149:1 > 'rte_rcu_qsbr_dq* rte_lpm::dq', at offset 536873728 (in > bits) at rte_lpm.h:150:1 > Sorry, I thought if ALLOW_EXPERIMENTAL was added, ABI would be kept when experimental was not allowed by user. ABI and ALLOW_EXPERIMENTAL should be two different things.
> > Going back to my proposal of hiding what does not need to be seen. > > Disclaimer, *this is quick & dirty* but it builds and passes ABI check: > > $ git diff > diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index > d498ba761..7109aef6a 100644 > --- a/lib/librte_lpm/rte_lpm.c > +++ b/lib/librte_lpm/rte_lpm.c I understand your proposal in v5 now. A new data structure encloses rte_lpm and new members that for RCU use. In this way, rte_lpm ABI is kept. And we can move out other members in rte_lpm that not need to be exposed in 20.11 release. I will fix the ABI issue in next version. > @@ -115,6 +115,15 @@ rte_lpm_find_existing(const char *name) > return l; > } > > +struct internal_lpm { > + /* Public object */ > + struct rte_lpm lpm; > + /* RCU config. */ > + struct rte_rcu_qsbr *v; /* RCU QSBR variable. */ > + enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */ > + struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */ > +}; > + > /* > * Allocates memory for LPM object > */ > @@ -123,6 +132,7 @@ rte_lpm_create(const char *name, int socket_id, > const struct rte_lpm_config *config) { > char mem_name[RTE_LPM_NAMESIZE]; > + struct internal_lpm *internal = NULL; > struct rte_lpm *lpm = NULL; > struct rte_tailq_entry *te; > uint32_t mem_size, rules_size, tbl8s_size; @@ -141,12 +151,6 @@ > rte_lpm_create(const char *name, int socket_id, > > snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); > > - /* Determine the amount of memory to allocate. */ > - mem_size = sizeof(*lpm); > - rules_size = sizeof(struct rte_lpm_rule) * config->max_rules; > - tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) * > - RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config- > >number_tbl8s); > - > rte_mcfg_tailq_write_lock(); > > /* guarantee there's no existing */ @@ -170,16 +174,23 @@ > rte_lpm_create(const char *name, int socket_id, > goto exit; > } > > + /* Determine the amount of memory to allocate. */ > + mem_size = sizeof(*internal); > + rules_size = sizeof(struct rte_lpm_rule) * config->max_rules; > + tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) * > + RTE_LPM_TBL8_GROUP_NUM_ENTRIES * > + config->number_tbl8s); > + > /* Allocate memory to store the LPM data structures. */ > - lpm = rte_zmalloc_socket(mem_name, mem_size, > + internal = rte_zmalloc_socket(mem_name, mem_size, > RTE_CACHE_LINE_SIZE, socket_id); > - if (lpm == NULL) { > + if (internal == NULL) { > RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); > rte_free(te); > rte_errno = ENOMEM; > goto exit; > } > > + lpm = &internal->lpm; > lpm->rules_tbl = rte_zmalloc_socket(NULL, > (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); > > @@ -226,6 +237,7 @@ rte_lpm_create(const char *name, int socket_id, > void rte_lpm_free(struct rte_lpm *lpm) { > + struct internal_lpm *internal; > struct rte_lpm_list *lpm_list; > struct rte_tailq_entry *te; > > @@ -247,8 +259,9 @@ rte_lpm_free(struct rte_lpm *lpm) > > rte_mcfg_tailq_write_unlock(); > > - if (lpm->dq) > - rte_rcu_qsbr_dq_delete(lpm->dq); > + internal = container_of(lpm, struct internal_lpm, lpm); > + if (internal->dq != NULL) > + rte_rcu_qsbr_dq_delete(internal->dq); > rte_free(lpm->tbl8); > rte_free(lpm->rules_tbl); > rte_free(lpm); > @@ -276,13 +289,15 @@ rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct > rte_lpm_rcu_config *cfg, { > char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE]; > struct rte_rcu_qsbr_dq_parameters params = {0}; > + struct internal_lpm *internal; > > - if ((lpm == NULL) || (cfg == NULL)) { > + if (lpm == NULL || cfg == NULL) { > rte_errno = EINVAL; > return 1; > } > > - if (lpm->v) { > + internal = container_of(lpm, struct internal_lpm, lpm); > + if (internal->v != NULL) { > rte_errno = EEXIST; > return 1; > } > @@ -305,20 +320,19 @@ rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct > rte_lpm_rcu_config *cfg, > params.free_fn = __lpm_rcu_qsbr_free_resource; > params.p = lpm; > params.v = cfg->v; > - lpm->dq = rte_rcu_qsbr_dq_create(¶ms); > - if (lpm->dq == NULL) { > - RTE_LOG(ERR, LPM, > - "LPM QS defer queue creation > failed\n"); > + internal->dq = rte_rcu_qsbr_dq_create(¶ms); > + if (internal->dq == NULL) { > + RTE_LOG(ERR, LPM, "LPM QS defer queue creation > failed\n"); > return 1; > } > if (dq) > - *dq = lpm->dq; > + *dq = internal->dq; > } else { > rte_errno = EINVAL; > return 1; > } > - lpm->rcu_mode = cfg->mode; > - lpm->v = cfg->v; > + internal->rcu_mode = cfg->mode; > + internal->v = cfg->v; > > return 0; > } > @@ -502,12 +516,13 @@ _tbl8_alloc(struct rte_lpm *lpm) static int32_t > tbl8_alloc(struct rte_lpm *lpm) { > + struct internal_lpm *internal = container_of(lpm, struct > internal_lpm, lpm); > int32_t group_idx; /* tbl8 group index. */ > > group_idx = _tbl8_alloc(lpm); > - if ((group_idx == -ENOSPC) && (lpm->dq != NULL)) { > + if (group_idx == -ENOSPC && internal->dq != NULL) { > /* If there are no tbl8 groups try to reclaim one. */ > - if (rte_rcu_qsbr_dq_reclaim(lpm->dq, 1, NULL, NULL, NULL) == > 0) > + if (rte_rcu_qsbr_dq_reclaim(internal->dq, 1, NULL, > NULL, NULL) == 0) > group_idx = _tbl8_alloc(lpm); > } > > @@ -518,20 +533,21 @@ static void > tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) { > struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; > + struct internal_lpm *internal = container_of(lpm, struct > internal_lpm, lpm); > > - if (!lpm->v) { > + if (internal->v == NULL) { > /* Set tbl8 group invalid*/ > __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry, > __ATOMIC_RELAXED); > - } else if (lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) { > + } else if (internal->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) { > /* Wait for quiescent state change. */ > - rte_rcu_qsbr_synchronize(lpm->v, RTE_QSBR_THRID_INVALID); > + rte_rcu_qsbr_synchronize(internal->v, > + RTE_QSBR_THRID_INVALID); > /* Set tbl8 group invalid*/ > __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry, > __ATOMIC_RELAXED); > - } else if (lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { > + } else if (internal->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { > /* Push into QSBR defer queue. */ > - rte_rcu_qsbr_dq_enqueue(lpm->dq, (void *)&tbl8_group_start); > + rte_rcu_qsbr_dq_enqueue(internal->dq, (void > *)&tbl8_group_start); > } > } > > diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index > 7889f21b3..a9568fcdd 100644 > --- a/lib/librte_lpm/rte_lpm.h > +++ b/lib/librte_lpm/rte_lpm.h > @@ -143,12 +143,6 @@ struct rte_lpm { > __rte_cache_aligned; /**< LPM tbl24 table. */ > struct rte_lpm_tbl_entry *tbl8; /**< LPM tbl8 table. */ > struct rte_lpm_rule *rules_tbl; /**< LPM rules. */ -#ifdef > ALLOW_EXPERIMENTAL_API > - /* RCU config. */ > - struct rte_rcu_qsbr *v; /* RCU QSBR variable. */ > - enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */ > - struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */ > -#endif > }; > > /** LPM RCU QSBR configuration structure. */ > > > > > -- > David Marchand