-----Original Message----- From: dev <dev-boun...@dpdk.org> On Behalf Of dev-requ...@dpdk.org Sent: Friday, April 23, 2021 4:31 PM To: dev@dpdk.org Subject: dev Digest, Vol 348, Issue 163 Send dev mailing list submissions to dev@dpdk.org To subscribe or unsubscribe via the World Wide Web, visit https://mails.dpdk.org/listinfo/dev or, via email, send a message with subject or body 'help' to dev-requ...@dpdk.org You can reach the person managing the list at dev-ow...@dpdk.org When replying, please edit your Subject line so it is more specific than "Re: Contents of dev digest..." Today's Topics: 1. Re: [PATCH] net/kni: check rte kni init result (Ferruh Yigit) 2. Re: [PATCH v3 2/2] lib/mempool: distinguish debug counters from cache and pool (Kinsella, Ray) 3. Re: [PATCH v2 1/3] bus/pci: enable PCI master in command register (Kinsella, Ray) 4. Re: [PATCH] doc: announce modification in eventdev structure (Kinsella, Ray) 5. [PATCH 0/2] bugfix for sched (Min Hu (Connor)) 6. [PATCH 1/2] lib/sched: fix return value judgment (Min Hu (Connor)) 7. [PATCH 2/2] lib/sched: optimize exception handling code (Min Hu (Connor)) ---------------------------------------------------------------------- Message: 1 Date: Fri, 23 Apr 2021 11:17:07 +0100 From: Ferruh Yigit <ferruh.yi...@intel.com> To: "Min Hu (Connor)" <humi...@huawei.com>, dev@dpdk.org Subject: Re: [dpdk-dev] [PATCH] net/kni: check rte kni init result Message-ID: <287cdaba-f3b8-141d-7b55-22ac09fa1...@intel.com> Content-Type: text/plain; charset=utf-8; format=flowed On 4/21/2021 3:14 AM, Min Hu (Connor) wrote: > From: Chengwen Feng <fengcheng...@huawei.com> > > This patch adds checking for rte_kni_init() result. > > Fixes: 75e2bc54c018 ("net/kni: add KNI PMD") > Cc: sta...@dpdk.org > > Signed-off-by: Chengwen Feng <fengcheng...@huawei.com> > Signed-off-by: Min Hu (Connor) <humi...@huawei.com> Acked-by: Ferruh Yigit <ferruh.yi...@intel.com> ------------------------------ Message: 2 Date: Fri, 23 Apr 2021 11:41:39 +0100 From: "Kinsella, Ray" <m...@ashroe.eu> To: Olivier Matz <olivier.m...@6wind.com>, Dharmik Thakkar <dharmik.thak...@arm.com> Cc: Andrew Rybchenko <andrew.rybche...@oktetlabs.ru>, dev@dpdk.org, n...@arm.com, joyce.k...@arm.com Subject: Re: [dpdk-dev] [PATCH v3 2/2] lib/mempool: distinguish debug counters from cache and pool Message-ID: <d1e81798-e8b0-c8fb-eb8e-4c54e32f0...@ashroe.eu> Content-Type: text/plain; charset=utf-8 On 21/04/2021 17:29, Olivier Matz wrote: > Hi Dharmik, > > Please see some comments below. > > On Mon, Apr 19, 2021 at 07:08:00PM -0500, Dharmik Thakkar wrote: >> From: Joyce Kong <joyce.k...@arm.com> >> >> If cache is enabled, objects will be retrieved/put from/to cache, >> subsequently from/to the common pool. Now the debug stats calculate >> the objects retrieved/put from/to cache and pool together, it is >> better to distinguish them. >> >> Signed-off-by: Joyce Kong <joyce.k...@arm.com> >> Signed-off-by: Dharmik Thakkar <dharmik.thak...@arm.com> >> Reviewed-by: Ruifeng Wang <ruifeng.w...@arm.com> >> Reviewed-by: Honnappa Nagarahalli <honnappa.nagaraha...@arm.com> >> --- >> lib/librte_mempool/rte_mempool.c | 24 ++++++++++++++++ >> lib/librte_mempool/rte_mempool.h | 47 >> ++++++++++++++++++++++---------- >> 2 files changed, 57 insertions(+), 14 deletions(-) >> >> diff --git a/lib/librte_mempool/rte_mempool.c >> b/lib/librte_mempool/rte_mempool.c >> index afb1239c8d48..339f14455624 100644 >> --- a/lib/librte_mempool/rte_mempool.c >> +++ b/lib/librte_mempool/rte_mempool.c >> @@ -1244,6 +1244,18 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) >> for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { >> sum.put_bulk += mp->stats[lcore_id].put_bulk; >> sum.put_objs += mp->stats[lcore_id].put_objs; >> + sum.put_common_pool_bulk += >> + mp->stats[lcore_id].put_common_pool_bulk; >> + sum.put_common_pool_objs += >> + mp->stats[lcore_id].put_common_pool_objs; >> + sum.put_cache_bulk += mp->stats[lcore_id].put_cache_bulk; >> + sum.put_cache_objs += mp->stats[lcore_id].put_cache_objs; >> + sum.get_common_pool_bulk += >> + mp->stats[lcore_id].get_common_pool_bulk; >> + sum.get_common_pool_objs += >> + mp->stats[lcore_id].get_common_pool_objs; >> + sum.get_cache_bulk += mp->stats[lcore_id].get_cache_bulk; >> + sum.get_cache_objs += mp->stats[lcore_id].get_cache_objs; >> sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk; >> sum.get_success_objs += mp->stats[lcore_id].get_success_objs; >> sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk; >> @@ -1254,6 +1266,18 @@ rte_mempool_dump(FILE *f, struct rte_mempool *mp) >> fprintf(f, " stats:\n"); >> fprintf(f, " put_bulk=%"PRIu64"\n", sum.put_bulk); >> fprintf(f, " put_objs=%"PRIu64"\n", sum.put_objs); >> + fprintf(f, " put_common_pool_bulk=%"PRIu64"\n", >> + sum.put_common_pool_bulk); >> + fprintf(f, " put_common_pool_objs=%"PRIu64"\n", >> + sum.put_common_pool_objs); >> + fprintf(f, " put_cache_bulk=%"PRIu64"\n", sum.put_cache_bulk); >> + fprintf(f, " put_cache_objs=%"PRIu64"\n", sum.put_cache_objs); >> + fprintf(f, " get_common_pool_bulk=%"PRIu64"\n", >> + sum.get_common_pool_bulk); >> + fprintf(f, " get_common_pool_objs=%"PRIu64"\n", >> + sum.get_common_pool_objs); >> + fprintf(f, " get_cache_bulk=%"PRIu64"\n", sum.get_cache_bulk); >> + fprintf(f, " get_cache_objs=%"PRIu64"\n", sum.get_cache_objs); >> fprintf(f, " get_success_bulk=%"PRIu64"\n", sum.get_success_bulk); >> fprintf(f, " get_success_objs=%"PRIu64"\n", sum.get_success_objs); >> fprintf(f, " get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk); >> diff --git a/lib/librte_mempool/rte_mempool.h >> b/lib/librte_mempool/rte_mempool.h >> index 848a19226149..0959f8a3f367 100644 >> --- a/lib/librte_mempool/rte_mempool.h >> +++ b/lib/librte_mempool/rte_mempool.h >> @@ -66,12 +66,20 @@ extern "C" { >> * A structure that stores the mempool statistics (per-lcore). >> */ >> struct rte_mempool_debug_stats { >> - uint64_t put_bulk; /**< Number of puts. */ >> - uint64_t put_objs; /**< Number of objects successfully put. */ >> - uint64_t get_success_bulk; /**< Successful allocation number. */ >> - uint64_t get_success_objs; /**< Objects successfully allocated. */ >> - uint64_t get_fail_bulk; /**< Failed allocation number. */ >> - uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ >> + uint64_t put_bulk; /**< Number of puts. */ >> + uint64_t put_objs; /**< Number of objects successfully >> put. */ >> + uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in >> common pool. */ >> + uint64_t put_common_pool_objs; /**< Number of objects enqueued in >> common pool. */ >> + uint64_t put_cache_bulk; /**< Number of bulks enqueued in >> cache. */ >> + uint64_t put_cache_objs; /**< Number of objects enqueued in >> cache. */ >> + uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from >> common pool. */ >> + uint64_t get_common_pool_objs; /**< Number of objects dequeued from >> common pool. */ >> + uint64_t get_cache_bulk; /**< Number of bulks dequeued from >> cache. */ >> + uint64_t get_cache_objs; /**< Number of objects dequeued from >> cache. */ >> + uint64_t get_success_bulk; /**< Successful allocation number. */ >> + uint64_t get_success_objs; /**< Objects successfully allocated. >> */ >> + uint64_t get_fail_bulk; /**< Failed allocation number. */ >> + uint64_t get_fail_objs; /**< Objects that failed to be >> allocated. */ > > I missed it the first time, but this changes the size of the > rte_mempool_debug_stats structure. I think we don't care about this > ABI breakage because this structure is only defined if > RTE_LIBRTE_MEMPOOL_DEBUG is set. But just in case, adding Ray as Cc. Agreed, if it is just a debugging non-default feature. > About the field themselves, I'm not certain that there is an added > value to have stats for cache gets and puts. My feeling is that the > important stat to monitor is the access to common pool, because it is > the one that highlights a possible performance impact (contention). > The cache stats are more or less equal to "success + fail - common". > Moreover, it will simplify the patch and avoid risks of mistakes. > > What do you think? > >> /** Successful allocation number of contiguous blocks. */ >> uint64_t get_success_blks; >> /** Failed allocation number of contiguous blocks. */ @@ -699,10 >> +707,18 @@ rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, >> void **obj_table, unsigned n) >> { >> struct rte_mempool_ops *ops; >> + int ret; >> >> rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n); >> ops = rte_mempool_get_ops(mp->ops_index); >> - return ops->dequeue(mp, obj_table, n); >> + ret = ops->dequeue(mp, obj_table, n); >> + if (ret == 0) { >> + __MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1); >> + __MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n); >> + __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); >> + __MEMPOOL_STAT_ADD(mp, get_success_objs, n); >> + } >> + return ret; >> } >> >> /** >> @@ -749,6 +765,8 @@ rte_mempool_ops_enqueue_bulk(struct rte_mempool >> *mp, void * const *obj_table, { >> struct rte_mempool_ops *ops; >> >> + __MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1); >> + __MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n); >> rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n); >> ops = rte_mempool_get_ops(mp->ops_index); >> return ops->enqueue(mp, obj_table, n); @@ -1297,14 +1315,18 @@ >> __mempool_generic_put(struct rte_mempool *mp, void * const >> *obj_table, >> >> /* Add elements back into the cache */ >> rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n); >> - >> cache->len += n; >> >> + __MEMPOOL_STAT_ADD(mp, put_cache_bulk, 1); >> + >> if (cache->len >= cache->flushthresh) { >> + __MEMPOOL_STAT_ADD(mp, put_cache_objs, >> + n - (cache->len - cache->size)); >> rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], >> cache->len - cache->size); >> cache->len = cache->size; >> - } >> + } else >> + __MEMPOOL_STAT_ADD(mp, put_cache_objs, n); >> > > In case we keep cache stats, I'd add {} after the else to be > consistent with the if(). > >> return; >> >> @@ -1438,8 +1460,8 @@ __mempool_generic_get(struct rte_mempool *mp, >> void **obj_table, >> >> cache->len -= n; >> >> - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); >> - __MEMPOOL_STAT_ADD(mp, get_success_objs, n); >> + __MEMPOOL_STAT_ADD(mp, get_cache_bulk, 1); >> + __MEMPOOL_STAT_ADD(mp, get_cache_objs, n); > > In case we keep cache stats, I don't think we should remove > get_success stats increment. Else, the success stats will never be > incremented when retrieving objects from the cache. > > >> >> return 0; >> >> @@ -1451,9 +1473,6 @@ __mempool_generic_get(struct rte_mempool *mp, void >> **obj_table, >> if (ret < 0) { >> __MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); >> __MEMPOOL_STAT_ADD(mp, get_fail_objs, n); >> - } else { >> - __MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); >> - __MEMPOOL_STAT_ADD(mp, get_success_objs, n); >> } >> >> return ret; >> -- >> 2.17.1 >> ------------------------------ Message: 3 Date: Fri, 23 Apr 2021 11:43:58 +0100 From: "Kinsella, Ray" <m...@ashroe.eu> To: Haiyue Wang <haiyue.w...@intel.com>, dev@dpdk.org Cc: qi.z.zh...@intel.com, liang-min.w...@intel.com, Neil Horman <nhor...@tuxdriver.com>, Gaetan Rivet <gr...@u256.net> Subject: Re: [dpdk-dev] [PATCH v2 1/3] bus/pci: enable PCI master in command register Message-ID: <1c9d285c-81c6-a977-682a-473e691ab...@ashroe.eu> Content-Type: text/plain; charset=utf-8 On 22/04/2021 02:18, Haiyue Wang wrote: > This adds the support to set 'Bus Master Enable' bit in the PCI command > register. > > Signed-off-by: Haiyue Wang <haiyue.w...@intel.com> > Tested-by: Qi Zhang <qi.z.zh...@intel.com> > --- > drivers/bus/pci/pci_common.c | 20 ++++++++++++++++++++ > drivers/bus/pci/rte_bus_pci.h | 12 ++++++++++++ > drivers/bus/pci/version.map | 1 + > lib/pci/rte_pci.h | 4 ++++ > 4 files changed, 37 insertions(+) > > diff --git a/drivers/bus/pci/pci_common.c b/drivers/bus/pci/pci_common.c > index ee7f96635..b631cb9c7 100644 > --- a/drivers/bus/pci/pci_common.c > +++ b/drivers/bus/pci/pci_common.c > @@ -746,6 +746,26 @@ rte_pci_find_ext_capability(struct rte_pci_device *dev, > uint32_t cap) > return 0; > } > > +int > +rte_pci_enable_bus_master(struct rte_pci_device *dev) > +{ > + uint16_t cmd; > + > + if (rte_pci_read_config(dev, &cmd, sizeof(cmd), RTE_PCI_COMMAND) < 0) { > + RTE_LOG(ERR, EAL, "error in reading PCI command register\n"); > + return -1; > + } > + > + cmd |= RTE_PCI_COMMAND_MASTER; > + > + if (rte_pci_write_config(dev, &cmd, sizeof(cmd), RTE_PCI_COMMAND) < 0) { > + RTE_LOG(ERR, EAL, "error in writing PCI command register\n"); > + return -1; > + } > + > + return 0; > +} > + > struct rte_pci_bus rte_pci_bus = { > .bus = { > .scan = rte_pci_scan, > diff --git a/drivers/bus/pci/rte_bus_pci.h b/drivers/bus/pci/rte_bus_pci.h > index 64886b473..83caf477b 100644 > --- a/drivers/bus/pci/rte_bus_pci.h > +++ b/drivers/bus/pci/rte_bus_pci.h > @@ -249,6 +249,18 @@ void rte_pci_dump(FILE *f); > __rte_experimental > off_t rte_pci_find_ext_capability(struct rte_pci_device *dev, uint32_t cap); > > +/** > + * Enables Bus Master for device's PCI command register. > + * > + * @param dev > + * A pointer to rte_pci_device structure. > + * > + * @return > + * 0 on success, -1 on error in PCI config space read/write. > + */ > +__rte_experimental > +int rte_pci_enable_bus_master(struct rte_pci_device *dev); > + > /** > * Register a PCI driver. > * > diff --git a/drivers/bus/pci/version.map b/drivers/bus/pci/version.map > index f33ed0abd..b271e48a8 100644 > --- a/drivers/bus/pci/version.map > +++ b/drivers/bus/pci/version.map > @@ -20,5 +20,6 @@ DPDK_21 { > EXPERIMENTAL { > global: > Please annotate when the symbol was added. > + rte_pci_enable_bus_master; > rte_pci_find_ext_capability; > }; > diff --git a/lib/pci/rte_pci.h b/lib/pci/rte_pci.h > index a8f8e404a..1f33d687f 100644 > --- a/lib/pci/rte_pci.h > +++ b/lib/pci/rte_pci.h > @@ -32,6 +32,10 @@ extern "C" { > > #define RTE_PCI_VENDOR_ID 0x00 /* 16 bits */ > #define RTE_PCI_DEVICE_ID 0x02 /* 16 bits */ > +#define RTE_PCI_COMMAND 0x04 /* 16 bits */ > + > +/* PCI Command Register */ > +#define RTE_PCI_COMMAND_MASTER 0x4 /* Bus Master Enable */ > > /* PCI Express capability registers */ > #define RTE_PCI_EXP_DEVCTL 8 /* Device Control */ > ------------------------------ Message: 4 Date: Fri, 23 Apr 2021 11:53:19 +0100 From: "Kinsella, Ray" <m...@ashroe.eu> To: gak...@marvell.com, jer...@marvell.com, tho...@monjalon.net, dev@dpdk.org, david.march...@redhat.com Cc: abhinandan.guj...@intel.com, hemant.agra...@nxp.com, nipun.gu...@nxp.com, sachin.sax...@oss.nxp.com, ano...@marvell.com, ma...@nvidia.com, roy.fan.zh...@intel.com, g.si...@nxp.com, erik.g.carri...@intel.com, jay.jayatheert...@intel.com, pbhagavat...@marvell.com, harry.van.haa...@intel.com, sthot...@marvell.com Subject: Re: [dpdk-dev] [PATCH] doc: announce modification in eventdev structure Message-ID: <347ace77-7704-5ac4-7dd9-0cabe88df...@ashroe.eu> Content-Type: text/plain; charset=utf-8 On 15/04/2021 10:08, gak...@marvell.com wrote: > From: Akhil Goyal <gak...@marvell.com> > > A new field ``ca_enqueue`` is added in ``rte_eventdev`` > in the end to maintain ABI. It needs to be moved above > in the structure to align with other enqueue callbacks. > > Signed-off-by: Akhil Goyal <gak...@marvell.com> > --- > doc/guides/rel_notes/deprecation.rst | 4 ++++ > 1 file changed, 4 insertions(+) > > diff --git a/doc/guides/rel_notes/deprecation.rst > b/doc/guides/rel_notes/deprecation.rst > index 2afc84c39..a973de4a9 100644 > --- a/doc/guides/rel_notes/deprecation.rst > +++ b/doc/guides/rel_notes/deprecation.rst > @@ -127,6 +127,10 @@ Deprecation Notices > values to the function ``rte_event_eth_rx_adapter_queue_add`` using > the structure ``rte_event_eth_rx_adapter_queue_add``. > > +* eventdev: The function pointer ``ca_enqueue`` in structure ``rte_eventdev`` > + will be moved after ``txa_enqueue`` so that all enqueue/dequeue > + function pointers are adjacent to each other. > + > * sched: To allow more traffic classes, flexible mapping of pipe queues to > traffic classes, and subport level configuration of pipes and queues > changes will be made to macros, data structures and API functions defined > I admire the disipline - but since you are not actually removing ca_enqueue, just moving it in memory when the new ABI is declared in anycase, this is not required. Thanks, Ray K ------------------------------ Message: 5 Date: Fri, 23 Apr 2021 19:01:10 +0800 From: "Min Hu (Connor)" <humi...@huawei.com> To: <dev@dpdk.org> Cc: <ferruh.yi...@intel.com>, <cristian.dumitre...@intel.com>, <jasvinder.si...@intel.com> Subject: [dpdk-dev] [PATCH 0/2] bugfix for sched Message-ID: <1619175672-20016-1-git-send-email-humi...@huawei.com> Content-Type: text/plain This patch set contains two bugfixes for sched. Huisong Li (2): lib/sched: fix return value judgment lib/sched: optimize exception handling code lib/sched/rte_sched.c | 60 +++++++++++++++++++++++++++------------------------ 1 file changed, 32 insertions(+), 28 deletions(-) -- 2.7.4 ------------------------------ Message: 6 Date: Fri, 23 Apr 2021 19:01:11 +0800 From: "Min Hu (Connor)" <humi...@huawei.com> To: <dev@dpdk.org> Cc: <ferruh.yi...@intel.com>, <cristian.dumitre...@intel.com>, <jasvinder.si...@intel.com> Subject: [dpdk-dev] [PATCH 1/2] lib/sched: fix return value judgment Message-ID: <1619175672-20016-2-git-send-email-humi...@huawei.com> Content-Type: text/plain From: Huisong Li <lihuis...@huawei.com> This patch fixes return value judgment when allocate memory to store the subport profile, and releases memory of 'rte_sched_port' if code fails to apply for this memory. Fixes: 0ea4c6afcaf1 ("sched: add subport profile table") Cc: sta...@dpdk.org Signed-off-by: Huisong Li <lihuis...@huawei.com> Signed-off-by: Min Hu (Connor) <humi...@huawei.com> --- lib/sched/rte_sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index cd87e68..df0ab5c 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -961,9 +961,9 @@ rte_sched_port_config(struct rte_sched_port_params *params) /* Allocate memory to store the subport profile */ port->subport_profiles = rte_zmalloc_socket("subport_profile", size2, RTE_CACHE_LINE_SIZE, params->socket); - if (port == NULL) { + if (port->subport_profiles == NULL) { RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); - + rte_free(port); return NULL; } -- 2.7.4 Ack <savinay.dharma...@intel.com> ------------------------------ Message: 7 Date: Fri, 23 Apr 2021 19:01:12 +0800 From: "Min Hu (Connor)" <humi...@huawei.com> To: <dev@dpdk.org> Cc: <ferruh.yi...@intel.com>, <cristian.dumitre...@intel.com>, <jasvinder.si...@intel.com> Subject: [dpdk-dev] [PATCH 2/2] lib/sched: optimize exception handling code Message-ID: <1619175672-20016-3-git-send-email-humi...@huawei.com> Content-Type: text/plain From: Huisong Li <lihuis...@huawei.com> Currently, rte_sched_free_memory() is called multiple times by the exception handling code in rte_sched_subport_config() and rte_sched_pipe_config(). This patch optimizes them into a unified outlet to free memory. Fixes: ac6fcb841b0f ("sched: update subport rate dynamically") Fixes: 34a90f86657c ("sched: modify pipe functions for config flexibility") Fixes: ce7c4fd7c2ac ("sched: add pipe config to subport level") Cc: sta...@dpdk.org Signed-off-by: Huisong Li <lihuis...@huawei.com> Signed-off-by: Min Hu (Connor) <humi...@huawei.com> --- lib/sched/rte_sched.c | 56 +++++++++++++++++++++++++++------------------------ 1 file changed, 30 insertions(+), 26 deletions(-) diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index df0ab5c..a858f61 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -1090,6 +1090,7 @@ rte_sched_subport_config(struct rte_sched_port *port, uint32_t n_subport_pipe_queues, i; uint32_t size0, size1, bmp_mem_size; int status; + int ret; /* Check user parameters */ if (port == NULL) { @@ -1101,17 +1102,16 @@ rte_sched_subport_config(struct rte_sched_port *port, if (subport_id >= port->n_subports_per_port) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for subport id\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } if (subport_profile_id >= port->n_max_subport_profiles) { RTE_LOG(ERR, SCHED, "%s: " "Number of subport profile exceeds the max limit\n", __func__); - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } /** Memory is allocated only on first invocation of the api for a @@ -1127,9 +1127,8 @@ rte_sched_subport_config(struct rte_sched_port *port, RTE_LOG(NOTICE, SCHED, "%s: Port scheduler params check failed (%d)\n", __func__, status); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } /* Determine the amount of memory to allocate */ @@ -1143,9 +1142,8 @@ rte_sched_subport_config(struct rte_sched_port *port, if (s == NULL) { RTE_LOG(ERR, SCHED, "%s: Memory allocation fails\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -ENOMEM; + ret = -ENOMEM; + goto out; } n_subports++; @@ -1185,12 +1183,11 @@ rte_sched_subport_config(struct rte_sched_port *port, params->red_params[i][j].min_th, params->red_params[i][j].max_th, params->red_params[i][j].maxp_inv) != 0) { - rte_sched_free_memory(port, n_subports); - RTE_LOG(NOTICE, SCHED, "%s: RED configuration init fails\n", __func__); - return -EINVAL; + ret = -EINVAL; + goto out; } } } @@ -1238,9 +1235,8 @@ rte_sched_subport_config(struct rte_sched_port *port, if (s->bmp == NULL) { RTE_LOG(ERR, SCHED, "%s: Subport bitmap init error\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } for (i = 0; i < RTE_SCHED_PORT_N_GRINDERS; i++) @@ -1285,6 +1281,11 @@ rte_sched_subport_config(struct rte_sched_port *port, rte_sched_port_log_subport_profile(port, subport_profile_id); return 0; + +out: + rte_sched_free_memory(port, n_subports); + + return ret; } int @@ -1299,6 +1300,7 @@ rte_sched_pipe_config(struct rte_sched_port *port, struct rte_sched_pipe_profile *params; uint32_t n_subports = subport_id + 1; uint32_t deactivate, profile, i; + int ret; /* Check user parameters */ profile = (uint32_t) pipe_profile; @@ -1313,26 +1315,23 @@ rte_sched_pipe_config(struct rte_sched_port *port, if (subport_id >= port->n_subports_per_port) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter subport id\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } s = port->subports[subport_id]; if (pipe_id >= s->n_pipes_per_subport_enabled) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter pipe id\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } if (!deactivate && profile >= s->n_pipe_profiles) { RTE_LOG(ERR, SCHED, "%s: Incorrect value for parameter pipe profile\n", __func__); - - rte_sched_free_memory(port, n_subports); - return -EINVAL; + ret = -EINVAL; + goto out; } sp = port->subport_profiles + s->profile; @@ -1406,6 +1405,11 @@ rte_sched_pipe_config(struct rte_sched_port *port, } return 0; + +out: + rte_sched_free_memory(port, n_subports); + + return ret; } int -- 2.7.4 End of dev Digest, Vol 348, Issue 163 *************************************