[PATCH] common/cnxk: fix incorrect error checking
Fixes: 804c108b039a ("common/cnxk: set BPHY IRQ handler") Signed-off-by: Weiguo Li --- drivers/common/cnxk/roc_bphy_irq.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/common/cnxk/roc_bphy_irq.c b/drivers/common/cnxk/roc_bphy_irq.c index f4e9b341af..5ba73c98dc 100644 --- a/drivers/common/cnxk/roc_bphy_irq.c +++ b/drivers/common/cnxk/roc_bphy_irq.c @@ -261,9 +261,9 @@ roc_bphy_irq_handler_set(struct roc_bphy_irq_chip *chip, int irq_num, CPU_SET(curr_cpu, &intr_cpuset); retval = pthread_setaffinity_np(pthread_self(), sizeof(intr_cpuset), &intr_cpuset); - if (rc < 0) { + if (retval < 0) { plt_err("Failed to set affinity mask"); - return rc; + return retval; } irq_usr.isr_base = (uint64_t)roc_bphy_intr_handler; -- 2.25.1
[PATCH v2] net/i40e: reduce redundant store operation
For free buffer operation in i40e vector path, it is unnecessary to store 'NULL' into txep.mbuf. This is because when putting mbuf into Tx queue, tx_tail is the sentinel. And when doing tx_free, tx_next_dd is the sentinel. In all processes, mbuf==NULL is not a condition in check. Thus reset of mbuf is unnecessary and can be omitted. Signed-off-by: Feifei Wang Reviewed-by: Ruifeng Wang --- v2: remove the change for scalar path due to scalar path needs to check whether the mbuf is 'NULL' to release and clean up (Haiyue) drivers/net/i40e/i40e_rxtx_vec_common.h | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/net/i40e/i40e_rxtx_vec_common.h b/drivers/net/i40e/i40e_rxtx_vec_common.h index f9a7f46550..26deb59fc4 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_common.h +++ b/drivers/net/i40e/i40e_rxtx_vec_common.h @@ -103,7 +103,6 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq) if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) { for (i = 0; i < n; i++) { free[i] = txep[i].mbuf; - txep[i].mbuf = NULL; } rte_mempool_put_bulk(free[0]->pool, (void **)free, n); goto done; -- 2.25.1
[PATCH v2] app/eventdev: add crypto producer mode
In crypto producer mode, producer core enqueues cryptodev with software generated crypto ops and worker core dequeues crypto completion events from the eventdev. Event crypto metadata used for above processing is pre-populated in each crypto session. Parameter --prod_type_cryptodev can be used to enable crypto producer mode. Parameter --crypto_adptr_mode can be set to select the crypto adapter mode, 0 for OP_NEW and 1 for OP_FORWARD. This mode can be used to measure the performance of crypto adapter. Example: ./dpdk-test-eventdev -l 0-2 -w -w -- \ --prod_type_cryptodev --crypto_adptr_mode 1 --test=perf_atq \ --stlist=a --wlcores 1 --plcores 2 Signed-off-by: Shijith Thotton --- v2: * Fix RHEL compilation warning. app/test-eventdev/evt_common.h | 3 + app/test-eventdev/evt_main.c | 13 +- app/test-eventdev/evt_options.c | 27 ++ app/test-eventdev/evt_options.h | 12 + app/test-eventdev/evt_test.h | 6 + app/test-eventdev/test_perf_atq.c| 51 app/test-eventdev/test_perf_common.c | 406 ++- app/test-eventdev/test_perf_common.h | 16 ++ app/test-eventdev/test_perf_queue.c | 52 doc/guides/tools/testeventdev.rst| 13 + 10 files changed, 592 insertions(+), 7 deletions(-) diff --git a/app/test-eventdev/evt_common.h b/app/test-eventdev/evt_common.h index f466434459..2f301a7e79 100644 --- a/app/test-eventdev/evt_common.h +++ b/app/test-eventdev/evt_common.h @@ -7,6 +7,7 @@ #include #include +#include #include #include @@ -39,6 +40,7 @@ enum evt_prod_type { EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */ EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */ EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */ + EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR, /* Producer type Crypto Adapter. */ EVT_PROD_TYPE_MAX, }; @@ -77,6 +79,7 @@ struct evt_options { uint64_t timer_tick_nsec; uint64_t optm_timer_tick_nsec; enum evt_prod_type prod_type; + enum rte_event_crypto_adapter_mode crypto_adptr_mode; }; static inline bool diff --git a/app/test-eventdev/evt_main.c b/app/test-eventdev/evt_main.c index 194c980c7a..cef0fb1382 100644 --- a/app/test-eventdev/evt_main.c +++ b/app/test-eventdev/evt_main.c @@ -161,12 +161,19 @@ main(int argc, char **argv) goto mempool_destroy; } } + /* Test specific cryptodev setup */ + if (test->ops.cryptodev_setup) { + if (test->ops.cryptodev_setup(test, &opt)) { + evt_err("%s: cryptodev setup failed", opt.test_name); + goto ethdev_destroy; + } + } /* Test specific eventdev setup */ if (test->ops.eventdev_setup) { if (test->ops.eventdev_setup(test, &opt)) { evt_err("%s: eventdev setup failed", opt.test_name); - goto ethdev_destroy; + goto cryptodev_destroy; } } @@ -197,6 +204,10 @@ main(int argc, char **argv) if (test->ops.eventdev_destroy) test->ops.eventdev_destroy(test, &opt); +cryptodev_destroy: + if (test->ops.cryptodev_destroy) + test->ops.cryptodev_destroy(test, &opt); + ethdev_destroy: if (test->ops.ethdev_destroy) test->ops.ethdev_destroy(test, &opt); diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c index 753a7dbd7d..5ad1491020 100644 --- a/app/test-eventdev/evt_options.c +++ b/app/test-eventdev/evt_options.c @@ -122,6 +122,26 @@ evt_parse_timer_prod_type_burst(struct evt_options *opt, return 0; } +static int +evt_parse_crypto_prod_type(struct evt_options *opt, + const char *arg __rte_unused) +{ + opt->prod_type = EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR; + return 0; +} + +static int +evt_parse_crypto_adptr_mode(struct evt_options *opt, const char *arg) +{ + uint8_t mode; + int ret; + + ret = parser_read_uint8(&mode, arg); + opt->crypto_adptr_mode = mode ? RTE_EVENT_CRYPTO_ADAPTER_OP_FORWARD : + RTE_EVENT_CRYPTO_ADAPTER_OP_NEW; + return ret; +} + static int evt_parse_test_name(struct evt_options *opt, const char *arg) { @@ -335,6 +355,7 @@ usage(char *program) "\t--queue_priority : enable queue priority\n" "\t--deq_tmo_nsec : global dequeue timeout\n" "\t--prod_type_ethdev : use ethernet device as producer.\n" + "\t--prod_type_cryptodev : use crypto device as producer.\n" "\t--prod_type_timerdev : use event timer device as producer.\n" "\t expity_nsec would be the timeout\n" "\t in ns.\n" @@ -345,6 +366,8 @@ usage(char *program) "
RE: [PATCH v2 3/7] net/ixgbe: Check that SFF-8472 soft rate select is supported before write
> From: Wang, Haiyue [mailto:haiyue.w...@intel.com] > Sent: Tuesday, 21 December 2021 02.15 > > > -Original Message- > > From: Stephen Douthit > > Sent: Tuesday, December 21, 2021 05:33 > > > > On 12/20/21 02:53, Wang, Haiyue wrote: > > >> -Original Message- > > >> From: Stephen Douthit > > >> Sent: Tuesday, December 7, 2021 06:19 > > >> > > >> Make sure an SFP is really a SFF-8472 device that supports the > optional > > >> soft rate select feature before just blindly poking those I2C > registers. > > >> > > >> Skip all I2C traffic if we know there's no SFP. > > >> > > >> Fixes: f3430431aba ("ixgbe/base: add SFP+ dual-speed support") > > >> Cc: sta...@dpdk.org > > >> > > >> Signed-off-by: Stephen Douthit > > >> --- > > > > > > > > >>/* Set RS0 */ > > >>status = hw->phy.ops.read_i2c_byte(hw, > IXGBE_SFF_SFF_8472_OSCB, > > >> > IXGBE_I2C_EEPROM_DEV_ADDR2, > > >> diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h > b/drivers/net/ixgbe/base/ixgbe_phy.h > > >> index ceefbb3e68..cd57ce040f 100644 > > >> --- a/drivers/net/ixgbe/base/ixgbe_phy.h > > >> +++ b/drivers/net/ixgbe/base/ixgbe_phy.h > > >> @@ -21,6 +21,7 @@ > > >> #define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 > > >> #define IXGBE_SFF_CABLE_SPEC_COMP0x3C > > >> #define IXGBE_SFF_SFF_8472_SWAP 0x5C > > >> +#define IXGBE_SFF_SFF_8472_EOPT 0x5D > > > > > > Looks like this is YOUR platform specific, then this patchset can't > be > > > merged. : - ( > > > > This isn't anything unique to our hardware, these values are coming > from > > the SFF-8472 SFP+ I2C specification. > > > > The ability to do a soft rate select via I2C is an optional feature, > and > > modules that support it are supposed to set bit 3 in byte 93 (0x5d), > the > > "Enhanced Options" register, to advertise the functionality. > > > > Please see section 8.10 and Table 8-6 in the SFF-8472 spec. > > > > Checking the RATE_SELECT bit flag may be overkill since the > transceiver > > is supposed to ignore writes to rate select control bits if the > feature > > isn't implemented. I can drop that check if you like, but the other > > checks for a 8472 device (vs 8079) aren't anything different than > what > > already happens in the driver elsewhere[1]. I'd argue that testing > that > > a feature is supported in hardware before trying to use it is normal > > driver behavior. > > > > If instead you mean that the entire series is somehow applicable only > to > > our hardware, I'm not sure why. > > > > That hotplug issue isn't seen on the same hardware when using the > Linux > > driver; so it's a dpdk problem (at least on C3000 ixgbe devs), and > not a > > I can't find your related fix in two official Linux drivers: > > https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree > /drivers/net/ethernet/intel/ixgbe > https://www.intel.com/content/www/us/en/download/14302/14687/intel- > network-adapter-driver-for-pcie-intel-10-gigabit-ethernet-network- > connections-under-linux.html? > > Normally, DPDK keeps sync with this kind of release. > Working with the Linux kernel mainline drivers is good advice. The official Intel Linux drivers seem to be ages behind the Kernel mainline, and they don't fully support the C3000 NICs, so don’t waste any time there! We recently tried using the official Intel Linux drivers for a C3338 based project (using Kernel 3.19 in 32 bit mode with x2APIC disabled), and they didn't work at all. We ended up backporting the necessary changes from the kernel mainline instead. > > hardware problem. Fixing the hotplug/rateswap issue was my primary > > goal, the other patches fix problems I found along the way while > > debugging. > > > > I can also reproduce the hotplug/rateswap issue on the PLCC-B, an > Intel > > reference design for the C3000 family, so again, not unique to this > > platform. > > I guess this is just in C3000 reference board SDK ? > > I recommend you submit the fix to kernel firstly, you will get more > experts' reviews and fully test: > > https://patchwork.ozlabs.org/project/intel-wired-lan/list/ > https://lists.osuosl.org/mailman/listinfo/intel-wired-lan >
RE: [dpdk-dev] [PATCH] config/x86: add support for AMD platform
[AMD Official Use Only] I don't see any update since Thomas's. Do we have this patch taken care of submitting to 22.03 and backporting it? -Original Message- From: Thomas Monjalon Sent: Wednesday, November 24, 2021 4:37 AM To: techbo...@dpdk.org Cc: Bruce Richardson ; Aman Kumar ; David Marchand ; Song, Keesang ; dev@dpdk.org Subject: Re: [dpdk-dev] [PATCH] config/x86: add support for AMD platform [CAUTION: External Email] Ping techboard for comments 18/11/2021 15:05, Thomas Monjalon: > 18/11/2021 14:52, Bruce Richardson: > > On Thu, Nov 18, 2021 at 01:25:38PM +0100, Thomas Monjalon wrote: > > > I request a techboard decision for this patch. > > > > > > > > > 02/11/2021 20:04, Thomas Monjalon: > > > > 02/11/2021 19:45, David Marchand: > > > > > On Tue, Nov 2, 2021 at 3:53 PM Aman Kumar > > > > > wrote: > > > > > > > > > > > > -Dcpu_instruction_set=znverX meson option can be used to > > > > > > build dpdk for AMD platforms. Supported options are znver1, > > > > > > znver2 and znver3. > > > > > > > > > > > > Signed-off-by: Aman Kumar > > > > > > --- > > > > > > dpdk_conf.set('RTE_CACHE_LINE_SIZE', 64) > > > > > > dpdk_conf.set('RTE_MAX_LCORE', 128) > > > > > > dpdk_conf.set('RTE_MAX_NUMA_NODES', 32) > > > > > > + > > > > > > +# AMD platform support > > > > > > +if get_option('cpu_instruction_set') == 'znver1' > > > > > > +dpdk_conf.set('RTE_MAX_LCORE', 256) elif > > > > > > +get_option('cpu_instruction_set') == 'znver2' > > > > > > +dpdk_conf.set('RTE_MAX_LCORE', 512) elif > > > > > > +get_option('cpu_instruction_set') == 'znver3' > > > > > > +dpdk_conf.set('RTE_MAX_LCORE', 512) endif > > > > > > > > > > I already replied to a similar patch earlier in this release. > > > > > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2 > > > > > F%2Finbox.dpdk.org%2Fdev%2FCAJFAV8z-5amvEnr3mazkTqH-7SZX_C6EqC > > > > > ua6UdMXXHgrcmT6g%40mail.gmail.com%2F&data=04%7C01%7Ckeesan > > > > > g.song%40amd.com%7C621ed6a39c304c905dbb08d9af4717c5%7C3dd8961f > > > > > e4884e608e11a82d994e183d%7C0%7C0%7C637733542157359380%7CUnknow > > > > > n%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik > > > > > 1haWwiLCJXVCI6Mn0%3D%7C3000&sdata=L9IM4ZtukVwY5qdbGgkt2zlu > > > > > sGm0ZmrQmE9oMeg1RDo%3D&reserved=0 > > > > > > > > > > So repeating the same: do you actually _need_ more than 128 > > > > > lcores in a single DPDK application? > > > > > > We did not receive an answer to this question. > > > > > > > Yes I forgot this previous discussion concluding that we should > > > > not increase more than 128 threads. > > > > > > We had a discussion yesterday in techboard meeting. > > > The consensus is that we didn't hear for real need of more than > > > 128 threads, except for configuration usability convenience. > > > > > > Now looking again at the code, this is how it is defined: > > > > > > option('max_lcores', type: 'string', value: 'default', description: > > >'Set maximum number of cores/threads supported by EAL; > > >"default" is different per-arch, "detect" detects the > > > number of cores on the build machine.') > > > config/x86/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128) > > > config/ppc/meson.build: dpdk_conf.set('RTE_MAX_LCORE', 128) > > > config/arm/meson.build: it goes from 4 to 1280! > > > > > > So I feel it is not fair to reject this AMD patch if we allow Arm to go > > > beyond. > > > Techboard, let's have a quick decision please for 21.11-rc4. > > > > > I would support increasing the default value for x86 in this release. > > This patch is not increasing the default for all x86, only for some > CPUs as given at compilation time. > I think it is the same logic as Arm CPU-specific compilation. > > > I believe Dave H. had some patches to decrease the memory footprint > > overhead of such a change. I don't believe that they were merged, > > and while it's a bit late for 21.11 now, those should be considered > > for 22.03 release and then maybe for backport.
[PATCH] mempool: fix the description of some function return values
In rte_mempool_ring.c, the committer uses the symbol ENOBUFS to describe the return value of function common_ring_sc_dequeue, but in rte_mempool.h, the symbol ENOENT is used to describe the return value of function rte_mempool_get. If the user of dpdk uses the symbol ENOENT as the judgment condition of the return value, it may cause some abnormal phenomena in their own programs, such as when the mempool space is exhausted. Fixes: ea5dd2744b90 ("mempool: cache optimisations") Signed-off-by: Zhiheng Chen --- lib/mempool/rte_mempool.h | 14 +++--- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h index 1e7a3c1527..2a7d3455ef 100644 --- a/lib/mempool/rte_mempool.h +++ b/lib/mempool/rte_mempool.h @@ -1521,7 +1521,7 @@ rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table, * Get several objects from the mempool. * * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when + * subsequently from the common pool. Note that it can return -ENOBUFS when * the local cache and common pool are empty, even if cache from other * lcores are full. * @@ -1535,7 +1535,7 @@ rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table, * A pointer to a mempool cache structure. May be NULL if not needed. * @return * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. */ static __rte_always_inline int rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, @@ -1557,7 +1557,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, * mempool creation time (see flags). * * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when + * subsequently from the common pool. Note that it can return -ENOBUFS when * the local cache and common pool are empty, even if cache from other * lcores are full. * @@ -1569,7 +1569,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, * The number of objects to get from the mempool to obj_table. * @return * - 0: Success; objects taken - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. */ static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) @@ -1588,7 +1588,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) * mempool creation (see flags). * * If cache is enabled, objects will be retrieved first from cache, - * subsequently from the common pool. Note that it can return -ENOENT when + * subsequently from the common pool. Note that it can return -ENOBUFS when * the local cache and common pool are empty, even if cache from other * lcores are full. * @@ -1598,7 +1598,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) * A pointer to a void * pointer (object) that will be filled. * @return * - 0: Success; objects taken. - * - -ENOENT: Not enough entries in the mempool; no object is retrieved. + * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. */ static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p) @@ -1777,7 +1777,7 @@ void rte_mempool_list_dump(FILE *f); * The pointer to the mempool matching the name, or NULL if not found. * NULL on error * with rte_errno set appropriately. Possible rte_errno values include: - *- ENOENT - required entry not available to return. + *- ENOBUFS - required entry not available to return. * */ struct rte_mempool *rte_mempool_lookup(const char *name); -- 2.32.0
[Bug 914] Deprication warning from call-sphinx-build.py
https://bugs.dpdk.org/show_bug.cgi?id=914 Bug ID: 914 Summary: Deprication warning from call-sphinx-build.py Product: DPDK Version: 21.11 Hardware: All OS: All Status: UNCONFIRMED Severity: normal Priority: Normal Component: other Assignee: dev@dpdk.org Reporter: jerinjac...@gmail.com Target Milestone: --- Reproducer: 1) Install python 3.12 2) meson -Denable_docs=true build 3) ninja -C build Error log: ../buildtools/call-sphinx-build.py:10: DeprecationWarning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives -- You are receiving this mail because: You are the assignee for the bug.
RE: [PATCH] mempool: fix the description of some function return values
> From: Zhiheng Chen [mailto:chenzhiheng0...@gmail.com] > Sent: Saturday, 18 December 2021 18.14 > > In rte_mempool_ring.c, the committer uses the symbol ENOBUFS to > describe the return value of function common_ring_sc_dequeue, but in > rte_mempool.h, the symbol ENOENT is used to describe the return value > of function rte_mempool_get. If the user of dpdk uses the symbol ENOENT > as the judgment condition of the return value, it may cause some > abnormal phenomena in their own programs, such as when the mempool > space is exhausted. > > Fixes: ea5dd2744b90 ("mempool: cache optimisations") > > Signed-off-by: Zhiheng Chen > --- > lib/mempool/rte_mempool.h | 14 +++--- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/lib/mempool/rte_mempool.h b/lib/mempool/rte_mempool.h > index 1e7a3c1527..2a7d3455ef 100644 > --- a/lib/mempool/rte_mempool.h > +++ b/lib/mempool/rte_mempool.h > @@ -1521,7 +1521,7 @@ rte_mempool_do_generic_get(struct rte_mempool > *mp, void **obj_table, > * Get several objects from the mempool. > * > * If cache is enabled, objects will be retrieved first from cache, > - * subsequently from the common pool. Note that it can return -ENOENT > when > + * subsequently from the common pool. Note that it can return -ENOBUFS > when > * the local cache and common pool are empty, even if cache from other > * lcores are full. > * > @@ -1535,7 +1535,7 @@ rte_mempool_do_generic_get(struct rte_mempool > *mp, void **obj_table, > * A pointer to a mempool cache structure. May be NULL if not > needed. > * @return > * - 0: Success; objects taken. > - * - -ENOENT: Not enough entries in the mempool; no object is > retrieved. > + * - -ENOBUFS: Not enough entries in the mempool; no object is > retrieved. > */ > static __rte_always_inline int > rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, > @@ -1557,7 +1557,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, > void **obj_table, > * mempool creation time (see flags). > * > * If cache is enabled, objects will be retrieved first from cache, > - * subsequently from the common pool. Note that it can return -ENOENT > when > + * subsequently from the common pool. Note that it can return -ENOBUFS > when > * the local cache and common pool are empty, even if cache from other > * lcores are full. > * > @@ -1569,7 +1569,7 @@ rte_mempool_generic_get(struct rte_mempool *mp, > void **obj_table, > * The number of objects to get from the mempool to obj_table. > * @return > * - 0: Success; objects taken > - * - -ENOENT: Not enough entries in the mempool; no object is > retrieved. > + * - -ENOBUFS: Not enough entries in the mempool; no object is > retrieved. > */ > static __rte_always_inline int > rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, > unsigned int n) > @@ -1588,7 +1588,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void > **obj_table, unsigned int n) > * mempool creation (see flags). > * > * If cache is enabled, objects will be retrieved first from cache, > - * subsequently from the common pool. Note that it can return -ENOENT > when > + * subsequently from the common pool. Note that it can return -ENOBUFS > when > * the local cache and common pool are empty, even if cache from other > * lcores are full. > * > @@ -1598,7 +1598,7 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void > **obj_table, unsigned int n) > * A pointer to a void * pointer (object) that will be filled. > * @return > * - 0: Success; objects taken. > - * - -ENOENT: Not enough entries in the mempool; no object is > retrieved. > + * - -ENOBUFS: Not enough entries in the mempool; no object is > retrieved. > */ > static __rte_always_inline int > rte_mempool_get(struct rte_mempool *mp, void **obj_p) Good catch! These functions call some underlying functions, and the descriptions of those functions don't mention -ENOENT or -ENOBUFS; they only say <0 to indicate error. Please consider updating the underlying functions too, so the chain of descriptions is complete. The description of this function should not be based on code inspection of its underlying public functions, it should be based on the descriptions of its underlying functions. Alternatively, describe the return value like the underlying functions: <0 means error returned from dequeue function. > @@ -1777,7 +1777,7 @@ void rte_mempool_list_dump(FILE *f); > * The pointer to the mempool matching the name, or NULL if not > found. > * NULL on error > * with rte_errno set appropriately. Possible rte_errno values > include: > - *- ENOENT - required entry not available to return. > + *- ENOBUFS - required entry not available to return. > * > */ > struct rte_mempool *rte_mempool_lookup(const char *name); This one is using ENOENT, so don't change the description. Ref: http://code.dpdk.org/dpdk/latest/source/lib/mempool/rte_mempool.c#L1356
Re: [dpdk-dev] [PATCH 22.02 1/2] common/cnxk: support to set channel mask for SDP interfaces
On Tue, Nov 9, 2021 at 3:12 PM wrote: > > From: Satheesh Paul > > ROC changes to support setting channel mask for SDP interfaces. > > Signed-off-by: Satheesh Paul Series Acked-by: Jerin Jacob Series applied to dpdk-next-net-mrvl/for-next-net. Thanks. > --- > drivers/common/cnxk/roc_npc.c | 13 + > drivers/common/cnxk/roc_npc.h | 3 +++ > drivers/common/cnxk/roc_npc_mcam.c | 10 ++ > drivers/common/cnxk/roc_npc_priv.h | 3 +++ > 4 files changed, 29 insertions(+) > > diff --git a/drivers/common/cnxk/roc_npc.c b/drivers/common/cnxk/roc_npc.c > index 503c74748f..d18dfd4259 100644 > --- a/drivers/common/cnxk/roc_npc.c > +++ b/drivers/common/cnxk/roc_npc.c > @@ -1152,6 +1152,19 @@ roc_npc_flow_create(struct roc_npc *roc_npc, const > struct roc_npc_attr *attr, > int rc; > > npc->channel = roc_npc->channel; > + npc->is_sdp_link = roc_nix_is_sdp(roc_npc->roc_nix); > + if (npc->is_sdp_link) { > + if (roc_npc->is_sdp_mask_set) { > + npc->sdp_channel = roc_npc->sdp_channel; > + npc->sdp_channel_mask = roc_npc->sdp_channel_mask; > + } else { > + /* By default set the channel and mask to cover > +* the whole SDP channel range. > +*/ > + npc->sdp_channel = (uint16_t)NIX_CHAN_SDP_CH_START; > + npc->sdp_channel_mask = > (uint16_t)NIX_CHAN_SDP_CH_START; > + } > + } > > flow = plt_zmalloc(sizeof(*flow), 0); > if (flow == NULL) { > diff --git a/drivers/common/cnxk/roc_npc.h b/drivers/common/cnxk/roc_npc.h > index e13d557136..8c24126ae8 100644 > --- a/drivers/common/cnxk/roc_npc.h > +++ b/drivers/common/cnxk/roc_npc.h > @@ -195,6 +195,9 @@ struct roc_npc { > uint64_t rx_parse_nibble; > /* Parsed RSS Flowkey cfg for current flow being created */ > uint32_t flowkey_cfg_state; > + bool is_sdp_mask_set; > + uint16_t sdp_channel; > + uint16_t sdp_channel_mask; > > #define ROC_NPC_MEM_SZ (5 * 1024) > uint8_t reserved[ROC_NPC_MEM_SZ]; > diff --git a/drivers/common/cnxk/roc_npc_mcam.c > b/drivers/common/cnxk/roc_npc_mcam.c > index ba7f89b45b..80851d6f9f 100644 > --- a/drivers/common/cnxk/roc_npc_mcam.c > +++ b/drivers/common/cnxk/roc_npc_mcam.c > @@ -575,6 +575,16 @@ npc_mcam_alloc_and_write(struct npc *npc, struct > roc_npc_flow *flow, > flow->npc_action |= (uint64_t)pf_func << 4; > flow->mcam_data[0] |= (uint64_t)inl_dev->channel; > flow->mcam_mask[0] |= (uint64_t)inl_dev->chan_mask; > + } else if (npc->is_sdp_link) { > + req->entry_data.kw[0] &= ~(GENMASK(11, 0)); > + req->entry_data.kw_mask[0] &= ~(GENMASK(11, 0)); > + req->entry_data.kw[0] |= (uint64_t)npc->sdp_channel; > + req->entry_data.kw_mask[0] |= > + (uint64_t)npc->sdp_channel_mask; > + flow->mcam_data[0] &= ~(GENMASK(11, 0)); > + flow->mcam_mask[0] &= ~(GENMASK(11, 0)); > + flow->mcam_data[0] |= (uint64_t)npc->sdp_channel; > + flow->mcam_mask[0] |= (uint64_t)npc->sdp_channel_mask; > } else { > req->entry_data.kw[0] |= (uint64_t)npc->channel; > req->entry_data.kw_mask[0] |= (BIT_ULL(12) - 1); > diff --git a/drivers/common/cnxk/roc_npc_priv.h > b/drivers/common/cnxk/roc_npc_priv.h > index 712302bc5c..86c10ea082 100644 > --- a/drivers/common/cnxk/roc_npc_priv.h > +++ b/drivers/common/cnxk/roc_npc_priv.h > @@ -360,6 +360,9 @@ struct npc { > uint32_t keyw[NPC_MAX_INTF];/* max key + data len bits */ > uint32_t mcam_entries; /* mcam entries supported */ > uint16_t channel; /* RX Channel number */ > + bool is_sdp_link; > + uint16_t sdp_channel; > + uint16_t sdp_channel_mask; > uint32_t rss_grps; /* rss groups supported */ > uint16_t flow_prealloc_size;/* Pre allocated mcam size */ > uint16_t flow_max_priority; /* Max priority for flow */ > -- > 2.25.4 >
[PATCH v1] eventdev/crypto_adapter: move crypto ops to circular buffer
Move crypto ops to circular buffer to retain crypto ops when cryptodev/eventdev are temporarily full Update crypto adapter caps get to return SW_CAP if PMD callback is not registered Signed-off-by: Ganapati Kundapura diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c index d840803..4469a89 100644 --- a/lib/eventdev/rte_event_crypto_adapter.c +++ b/lib/eventdev/rte_event_crypto_adapter.c @@ -25,11 +25,27 @@ #define CRYPTO_ADAPTER_MEM_NAME_LEN 32 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100 +#define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE) +#define CRYPTO_ADAPTER_BUFFER_SZ 1024 + /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD * iterations of eca_crypto_adapter_enq_run() */ #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024 +struct crypto_ops_circular_buffer { + /* index of head element in circular buffer */ + uint16_t head; + /* index of tail element in circular buffer */ + uint16_t tail; + /* number elements in buffer */ + uint16_t count; + /* size of circular buffer */ + uint16_t size; + /* Pointer to hold rte_crypto_ops for batching */ + struct rte_crypto_op **op_buffer; +} __rte_cache_aligned; + struct event_crypto_adapter { /* Event device identifier */ uint8_t eventdev_id; @@ -47,6 +63,8 @@ struct event_crypto_adapter { struct crypto_device_info *cdevs; /* Loop counter to flush crypto ops */ uint16_t transmit_loop_count; + /* Circular buffer for batching crypto ops to eventdev */ + struct crypto_ops_circular_buffer ebuf; /* Per instance stats structure */ struct rte_event_crypto_adapter_stats crypto_stats; /* Configuration callback for rte_service configuration */ @@ -93,8 +111,8 @@ struct crypto_device_info { struct crypto_queue_pair_info { /* Set to indicate queue pair is enabled */ bool qp_enabled; - /* Pointer to hold rte_crypto_ops for batching */ - struct rte_crypto_op **op_buffer; + /* Circular buffer for batching crypto ops to cdev */ + struct crypto_ops_circular_buffer cbuf; /* No of crypto ops accumulated */ uint8_t len; } __rte_cache_aligned; @@ -141,6 +159,77 @@ eca_init(void) return 0; } +static inline bool +eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp) +{ + return bufp->count >= BATCH_SIZE; +} + +static inline void +eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp) +{ + rte_free(bufp->op_buffer); +} + +static inline int +eca_circular_buffer_init(const char *name, +struct crypto_ops_circular_buffer *bufp, +uint16_t sz) +{ + bufp->op_buffer = rte_zmalloc(name, + sizeof(struct rte_crypto_op *) * sz, + 0); + if (bufp->op_buffer == NULL) + return -ENOMEM; + + bufp->size = sz; + return 0; +} + +static inline int +eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp, + struct rte_crypto_op *op) +{ + uint16_t *tailp = &bufp->tail; + + bufp->op_buffer[*tailp] = op; + *tailp = (*tailp + 1) % bufp->size; + bufp->count++; + + return 0; +} + +static inline int +eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, + uint8_t cdev_id, uint16_t qp_id, + uint16_t *nb_ops_flushed) +{ + uint16_t n = 0; + uint16_t *headp = &bufp->head; + uint16_t *tailp = &bufp->tail; + struct rte_crypto_op **ops = bufp->op_buffer; + + if (*tailp > *headp) + n = *tailp - *headp; + else if (*tailp < *headp) + n = bufp->size - *headp; + else { + *nb_ops_flushed = 0; + return 0; /* buffer empty */ + } + + *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, + &ops[*headp], n); + bufp->count -= *nb_ops_flushed; + if (!bufp->count) { + *headp = 0; + *tailp = 0; + } else + *headp = (*headp + *nb_ops_flushed) % bufp->size; + + return *nb_ops_flushed == n ? 0 : -1; +} + static inline struct event_crypto_adapter * eca_id_to_adapter(uint8_t id) { @@ -237,10 +326,19 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id, return -ENOMEM; } + if (eca_circular_buffer_init("eca_edev_circular_buffer", +&adapter->ebuf, +CRYPTO_ADAPTER_BUFFER_SZ)) { + RTE_EDEV_LOG_ERR("Failed to get mem for edev buffer"); + rte_free(adapter); + return -ENOMEM; + } + ret = rte_event_dev_info_get(dev_id, &dev_in
Re: [dpdk-dev] [PATCH ] common/cnxk: fix nibble parsing order when dumping MCAM
On Wed, Nov 24, 2021 at 1:43 PM wrote: > > From: Satheesh Paul > > Fix the order in which layer flags and layer type fields > are parsed when dumping the MCAM data. > > Fixes: 9869c39918 ("common/cnxk: support flow entry dump") > Cc: sta...@dpdk.org > > Signed-off-by: Satheesh Paul Acked-by: Jerin Jacob Applied to dpdk-next-net-mrvl/for-next-net. Thanks > --- > drivers/common/cnxk/roc_npc_mcam_dump.c | 40 - > 1 file changed, 20 insertions(+), 20 deletions(-) > > diff --git a/drivers/common/cnxk/roc_npc_mcam_dump.c > b/drivers/common/cnxk/roc_npc_mcam_dump.c > index 19b4901a52..278056591e 100644 > --- a/drivers/common/cnxk/roc_npc_mcam_dump.c > +++ b/drivers/common/cnxk/roc_npc_mcam_dump.c > @@ -159,6 +159,12 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > + if (rx_parse->laflags) { > + data = npc_get_nibbles(flow, 2, offset); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LA_FLAGS:%#02X\n", data); > + offset += 8; > + } > + > if (rx_parse->latype) { > data = npc_get_nibbles(flow, 1, offset); > fprintf(file, "\tNPC_PARSE_NIBBLE_LA_LTYPE:%s\n", > @@ -166,9 +172,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->laflags) { > + if (rx_parse->lbflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LA_FLAGS:%#02X\n", data); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LB_FLAGS:%#02X\n", data); > offset += 8; > } > > @@ -179,9 +185,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->lbflags) { > + if (rx_parse->lcflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LB_FLAGS:%#02X\n", data); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LC_FLAGS:%#02X\n", data); > offset += 8; > } > > @@ -192,9 +198,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->lcflags) { > + if (rx_parse->ldflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LC_FLAGS:%#02X\n", data); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LD_FLAGS:%#02X\n", data); > offset += 8; > } > > @@ -205,9 +211,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->ldflags) { > + if (rx_parse->leflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LD_FLAGS:%#02X\n", data); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LE_FLAGS:%#02X\n", data); > offset += 8; > } > > @@ -218,9 +224,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->leflags) { > + if (rx_parse->lfflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LE_FLAGS:%#02X\n", data); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LF_FLAGS:%#02X\n", data); > offset += 8; > } > > @@ -231,9 +237,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->lfflags) { > + if (rx_parse->lgflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LF_FLAGS:%#02X\n", data); > + fprintf(file, "\tNPC_PARSE_NIBBLE_LG_FLAGS:%#02X\n", data); > offset += 8; > } > > @@ -244,10 +250,9 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > offset += 4; > } > > - if (rx_parse->lgflags) { > + if (rx_parse->lhflags) { > data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LG_FLAGS:%#02X\n", data); > - offset += 8; > + fprintf(file, "\tNPC_PARSE_NIBBLE_LH_FLAGS:%#02X\n", data); > } > > if (rx_parse->lhtype) { > @@ -256,11 +261,6 @@ npc_flow_print_parse_nibbles(FILE *file, struct > roc_npc_flow *flow, > ltype_str[NPC_LID_LH][data]); > offset += 4; > } > - > - if (rx_parse->lhflags) { > - data = npc_get_nibbles(flow, 2, offset); > - fprintf(file, "\tNPC_PARSE_NIBBLE_LH_FLAGS:%#02X\n", data); > - } > } > > static void > -- > 2.25.4 >
[PATCH v1 2/2] eventdev: update crypto caps get to return SW cap
update rte_event_crypto_adapter_caps_get() to return SW_CAP if PMD callback is not registered. Signed-off-by: Ganapati Kundapura diff --git a/lib/eventdev/rte_eventdev.c b/lib/eventdev/rte_eventdev.c index 79b9ea3..6988bf1 100644 --- a/lib/eventdev/rte_eventdev.c +++ b/lib/eventdev/rte_eventdev.c @@ -176,11 +176,15 @@ rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, if (caps == NULL) return -EINVAL; - *caps = 0; + + if (dev->dev_ops->crypto_adapter_caps_get == NULL) + *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; + else + *caps = 0; return dev->dev_ops->crypto_adapter_caps_get ? (*dev->dev_ops->crypto_adapter_caps_get) - (dev, cdev, caps) : -ENOTSUP; + (dev, cdev, caps) : 0; } int -- 2.6.4
[PATCH v1 1/2] eventdev/crypto_adapter: move crypto ops to circular buffer
Move crypto ops to circular buffer to retain crypto ops when cryptodev/eventdev are temporarily full Signed-off-by: Ganapati Kundapura diff --git a/lib/eventdev/rte_event_crypto_adapter.c b/lib/eventdev/rte_event_crypto_adapter.c index d840803..4469a89 100644 --- a/lib/eventdev/rte_event_crypto_adapter.c +++ b/lib/eventdev/rte_event_crypto_adapter.c @@ -25,11 +25,27 @@ #define CRYPTO_ADAPTER_MEM_NAME_LEN 32 #define CRYPTO_ADAPTER_MAX_EV_ENQ_RETRIES 100 +#define CRYPTO_ADAPTER_OPS_BUFFER_SZ (BATCH_SIZE + BATCH_SIZE) +#define CRYPTO_ADAPTER_BUFFER_SZ 1024 + /* Flush an instance's enqueue buffers every CRYPTO_ENQ_FLUSH_THRESHOLD * iterations of eca_crypto_adapter_enq_run() */ #define CRYPTO_ENQ_FLUSH_THRESHOLD 1024 +struct crypto_ops_circular_buffer { + /* index of head element in circular buffer */ + uint16_t head; + /* index of tail element in circular buffer */ + uint16_t tail; + /* number elements in buffer */ + uint16_t count; + /* size of circular buffer */ + uint16_t size; + /* Pointer to hold rte_crypto_ops for batching */ + struct rte_crypto_op **op_buffer; +} __rte_cache_aligned; + struct event_crypto_adapter { /* Event device identifier */ uint8_t eventdev_id; @@ -47,6 +63,8 @@ struct event_crypto_adapter { struct crypto_device_info *cdevs; /* Loop counter to flush crypto ops */ uint16_t transmit_loop_count; + /* Circular buffer for batching crypto ops to eventdev */ + struct crypto_ops_circular_buffer ebuf; /* Per instance stats structure */ struct rte_event_crypto_adapter_stats crypto_stats; /* Configuration callback for rte_service configuration */ @@ -93,8 +111,8 @@ struct crypto_device_info { struct crypto_queue_pair_info { /* Set to indicate queue pair is enabled */ bool qp_enabled; - /* Pointer to hold rte_crypto_ops for batching */ - struct rte_crypto_op **op_buffer; + /* Circular buffer for batching crypto ops to cdev */ + struct crypto_ops_circular_buffer cbuf; /* No of crypto ops accumulated */ uint8_t len; } __rte_cache_aligned; @@ -141,6 +159,77 @@ eca_init(void) return 0; } +static inline bool +eca_circular_buffer_batch_ready(struct crypto_ops_circular_buffer *bufp) +{ + return bufp->count >= BATCH_SIZE; +} + +static inline void +eca_circular_buffer_free(struct crypto_ops_circular_buffer *bufp) +{ + rte_free(bufp->op_buffer); +} + +static inline int +eca_circular_buffer_init(const char *name, +struct crypto_ops_circular_buffer *bufp, +uint16_t sz) +{ + bufp->op_buffer = rte_zmalloc(name, + sizeof(struct rte_crypto_op *) * sz, + 0); + if (bufp->op_buffer == NULL) + return -ENOMEM; + + bufp->size = sz; + return 0; +} + +static inline int +eca_circular_buffer_add(struct crypto_ops_circular_buffer *bufp, + struct rte_crypto_op *op) +{ + uint16_t *tailp = &bufp->tail; + + bufp->op_buffer[*tailp] = op; + *tailp = (*tailp + 1) % bufp->size; + bufp->count++; + + return 0; +} + +static inline int +eca_circular_buffer_flush_to_cdev(struct crypto_ops_circular_buffer *bufp, + uint8_t cdev_id, uint16_t qp_id, + uint16_t *nb_ops_flushed) +{ + uint16_t n = 0; + uint16_t *headp = &bufp->head; + uint16_t *tailp = &bufp->tail; + struct rte_crypto_op **ops = bufp->op_buffer; + + if (*tailp > *headp) + n = *tailp - *headp; + else if (*tailp < *headp) + n = bufp->size - *headp; + else { + *nb_ops_flushed = 0; + return 0; /* buffer empty */ + } + + *nb_ops_flushed = rte_cryptodev_enqueue_burst(cdev_id, qp_id, + &ops[*headp], n); + bufp->count -= *nb_ops_flushed; + if (!bufp->count) { + *headp = 0; + *tailp = 0; + } else + *headp = (*headp + *nb_ops_flushed) % bufp->size; + + return *nb_ops_flushed == n ? 0 : -1; +} + static inline struct event_crypto_adapter * eca_id_to_adapter(uint8_t id) { @@ -237,10 +326,19 @@ rte_event_crypto_adapter_create_ext(uint8_t id, uint8_t dev_id, return -ENOMEM; } + if (eca_circular_buffer_init("eca_edev_circular_buffer", +&adapter->ebuf, +CRYPTO_ADAPTER_BUFFER_SZ)) { + RTE_EDEV_LOG_ERR("Failed to get mem for edev buffer"); + rte_free(adapter); + return -ENOMEM; + } + ret = rte_event_dev_info_get(dev_id, &dev_info); if (ret < 0) { RTE_EDEV_LOG_ERR("Failed to get info fo
[PATCH v3 0/2] Add config file support for l3fwd
This patchset introduces config file support for l3fwd and its lookup methods LPM, FIB, and EM, similar to that of l3fwd-acl. This allows for route rules to be defined in configuration files and edited there instead of in each of the lookup methods hardcoded route tables. Sean Morrissey (2): examples/l3fwd: add config file support for LPM/FIB examples/l3fwd: add config file support for EM doc/guides/sample_app_ug/l3_forward.rst | 89 +++-- examples/l3fwd/em_default_v4.cfg| 17 + examples/l3fwd/em_default_v6.cfg| 17 + examples/l3fwd/l3fwd.h | 35 ++ examples/l3fwd/l3fwd_em.c | 479 ++-- examples/l3fwd/l3fwd_fib.c | 52 +-- examples/l3fwd/l3fwd_lpm.c | 281 +- examples/l3fwd/l3fwd_route.h| 49 ++- examples/l3fwd/lpm_default_v4.cfg | 17 + examples/l3fwd/lpm_default_v6.cfg | 17 + examples/l3fwd/main.c | 99 ++--- 11 files changed, 847 insertions(+), 305 deletions(-) create mode 100644 examples/l3fwd/em_default_v4.cfg create mode 100644 examples/l3fwd/em_default_v6.cfg create mode 100644 examples/l3fwd/lpm_default_v4.cfg create mode 100644 examples/l3fwd/lpm_default_v6.cfg -- 2.25.1
[PATCH v3 1/2] examples/l3fwd: add config file support for LPM/FIB
Add support to define ipv4 and ipv6 forwarding tables from reading from a config file for LPM and FIB, with format similar to l3fwd-acl one. With the removal of the hardcoded route tables for IPv4 and IPv6, these routes have been moved to a separate default config file for use with LPM and FIB. Signed-off-by: Sean Morrissey Signed-off-by: Ravi Kerur Acked-by: Konstantin Ananyev --- examples/l3fwd/l3fwd.h| 35 examples/l3fwd/l3fwd_em.c | 7 + examples/l3fwd/l3fwd_fib.c| 52 +++--- examples/l3fwd/l3fwd_lpm.c| 281 +++--- examples/l3fwd/l3fwd_route.h | 17 +- examples/l3fwd/lpm_default_v4.cfg | 17 ++ examples/l3fwd/lpm_default_v6.cfg | 17 ++ examples/l3fwd/main.c | 99 ++- 8 files changed, 438 insertions(+), 87 deletions(-) create mode 100644 examples/l3fwd/lpm_default_v4.cfg create mode 100644 examples/l3fwd/lpm_default_v6.cfg diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h index 38ca19133c..d8b1f971e1 100644 --- a/examples/l3fwd/l3fwd.h +++ b/examples/l3fwd/l3fwd.h @@ -58,6 +58,30 @@ #endif #define HASH_ENTRY_NUMBER_DEFAULT 16 +/*Log file related character defs. */ +#define COMMENT_LEAD_CHAR ('#') +#define ROUTE_LEAD_CHAR('R') + +#defineIPV6_ADDR_LEN 16 +#defineIPV6_ADDR_U16 (IPV6_ADDR_LEN / sizeof(uint16_t)) +#defineIPV6_ADDR_U32 (IPV6_ADDR_LEN / sizeof(uint32_t)) + +#define GET_CB_FIELD(in, fd, base, lim, dlm) do {\ + unsigned long val; \ + char *end; \ + errno = 0; \ + val = strtoul((in), &end, (base)); \ + if (errno != 0 || end[0] != (dlm) || val > (lim)) \ + return -EINVAL; \ + (fd) = (typeof(fd))val; \ + (in) = end + 1; \ +} while (0) + +struct parm_cfg { + const char *rule_ipv4_name; + const char *rule_ipv6_name; +}; + struct mbuf_table { uint16_t len; struct rte_mbuf *m_table[MAX_PKT_BURST]; @@ -96,6 +120,8 @@ extern xmm_t val_eth[RTE_MAX_ETHPORTS]; extern struct lcore_conf lcore_conf[RTE_MAX_LCORE]; +extern struct parm_cfg parm_config; + /* Send burst of packets on an output interface */ static inline int send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) @@ -183,6 +209,12 @@ int init_mem(uint16_t portid, unsigned int nb_mbuf); /* Function pointers for LPM, EM or FIB functionality. */ +void +read_config_files_lpm(void); + +void +read_config_files_em(void); + void setup_lpm(const int socketid); @@ -286,4 +318,7 @@ fib_get_ipv4_l3fwd_lookup_struct(const int socketid); void * fib_get_ipv6_l3fwd_lookup_struct(const int socketid); +int +is_bypass_line(const char *buff); + #endif /* __L3_FWD_H__ */ diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c index 5cc4a4d979..4953cdae4e 100644 --- a/examples/l3fwd/l3fwd_em.c +++ b/examples/l3fwd/l3fwd_em.c @@ -972,6 +972,13 @@ em_event_main_loop_tx_q_burst_vector(__rte_unused void *dummy) return 0; } +/* Load rules from the input file */ +void +read_config_files_em(void) +{ + /* Empty till config file support added to EM */ +} + /* Initialize exact match (hash) parameters. 8< */ void setup_hash(const int socketid) diff --git a/examples/l3fwd/l3fwd_fib.c b/examples/l3fwd/l3fwd_fib.c index 2110459cc3..003721c908 100644 --- a/examples/l3fwd/l3fwd_fib.c +++ b/examples/l3fwd/l3fwd_fib.c @@ -583,7 +583,7 @@ setup_fib(const int socketid) struct rte_eth_dev_info dev_info; struct rte_fib6_conf config; struct rte_fib_conf config_ipv4; - unsigned int i; + int i; int ret; char s[64]; char abuf[INET6_ADDRSTRLEN]; @@ -603,37 +603,39 @@ setup_fib(const int socketid) "Unable to create the l3fwd FIB table on socket %d\n", socketid); + /* Populate the fib ipv4 table. */ - for (i = 0; i < RTE_DIM(ipv4_l3fwd_route_array); i++) { + for (i = 0; i < route_num_v4; i++) { struct in_addr in; /* Skip unused ports. */ - if ((1 << ipv4_l3fwd_route_array[i].if_out & + if ((1 << route_base_v4[i].if_out & enabled_port_mask) == 0) continue; - rte_eth_dev_info_get(ipv4_l3fwd_route_array[i].if_out, + rte_eth_dev_info_get(route_base_v4[i].if_out, &dev_info); ret = rte_fib_add(ipv4_l3fwd_fib_lookup_struct[socketid], - ipv4_l3fwd_route_array[i].ip, - ipv4_l3fwd_route_array[i].depth, - ipv4_l3fwd_route_array[i].if_o
[PATCH v3 2/2] examples/l3fwd: add config file support for EM
Add support to define ipv4 and ipv6 forwarding tables from reading from a config file for EM with a format similar to l3fwd-acl one. With the removal of the hardcoded route tables for IPv4 and IPv6 from 'l3fwd_em', these routes have been moved to a separate default config file for use with EM. Related l3fwd docs have been updated to relfect these changes. Signed-off-by: Sean Morrissey Signed-off-by: Ravi Kerur --- doc/guides/sample_app_ug/l3_forward.rst | 89 +++-- examples/l3fwd/em_default_v4.cfg| 17 + examples/l3fwd/em_default_v6.cfg| 17 + examples/l3fwd/l3fwd_em.c | 474 ++-- examples/l3fwd/l3fwd_route.h| 38 +- 5 files changed, 413 insertions(+), 222 deletions(-) create mode 100644 examples/l3fwd/em_default_v4.cfg create mode 100644 examples/l3fwd/em_default_v6.cfg diff --git a/doc/guides/sample_app_ug/l3_forward.rst b/doc/guides/sample_app_ug/l3_forward.rst index 6d7d7c5cc1..01d86db95d 100644 --- a/doc/guides/sample_app_ug/l3_forward.rst +++ b/doc/guides/sample_app_ug/l3_forward.rst @@ -47,6 +47,7 @@ and loaded into the LPM or FIB object at initialization time. In the sample application, hash-based and FIB-based forwarding supports both IPv4 and IPv6. LPM-based forwarding supports IPv4 only. +During the initialization phase route rules for IPv4 and IPv6 are read from rule files. Compiling the Application - @@ -61,6 +62,8 @@ Running the Application The application has a number of command line options:: ./dpdk-l3fwd [EAL options] -- -p PORTMASK + --rule_ipv4=FILE + --rule_ipv6=FILE [-P] [--lookup LOOKUP_METHOD] --config(port,queue,lcore)[,(port,queue,lcore)] @@ -82,6 +85,11 @@ Where, * ``-p PORTMASK:`` Hexadecimal bitmask of ports to configure +* ``--rule_ipv4=FILE:`` specify the ipv4 rules entries file. + Each rule occupies one line. + +* ``--rule_ipv6=FILE:`` specify the ipv6 rules entries file. + * ``-P:`` Optional, sets all ports to promiscuous mode so that packets are accepted regardless of the packet's Ethernet MAC destination address. Without this option, only packets with the Ethernet MAC destination address set to the Ethernet address of the port are accepted. @@ -135,7 +143,7 @@ To enable L3 forwarding between two ports, assuming that both ports are in the s .. code-block:: console -.//examples/dpdk-l3fwd -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" +.//examples/dpdk-l3fwd -l 1,2 -n 4 -- -p 0x3 --config="(0,0,1),(1,0,2)" --rule_ipv4="rule_ipv4.cfg" --rule_ipv6="rule_ipv6.cfg" In this command: @@ -157,19 +165,23 @@ In this command: | | | | | +--+---+---+-+ +* The -rule_ipv4 option specifies the reading of IPv4 rules sets from the rule_ipv4.cfg file + +* The -rule_ipv6 option specifies the reading of IPv6 rules sets from the rule_ipv6.cfg file. + To use eventdev mode with sync method **ordered** on above mentioned environment, Following is the sample command: .. code-block:: console -.//examples/dpdk-l3fwd -l 0-3 -n 4 -a -- -p 0x3 --eventq-sched=ordered +.//examples/dpdk-l3fwd -l 0-3 -n 4 -a -- -p 0x3 --eventq-sched=ordered --rule_ipv4="rule_ipv4.cfg" --rule_ipv6="rule_ipv6.cfg" or .. code-block:: console .//examples/dpdk-l3fwd -l 0-3 -n 4 -a \ - -- -p 0x03 --mode=eventdev --eventq-sched=ordered + -- -p 0x03 --mode=eventdev --eventq-sched=ordered --rule_ipv4="rule_ipv4.cfg" --rule_ipv6="rule_ipv6.cfg" In this command: @@ -192,7 +204,7 @@ scheduler. Following is the sample command: .. code-block:: console -.//examples/dpdk-l3fwd -l 0-7 -s 0xf -n 4 --vdev event_sw0 -- -p 0x3 --mode=eventdev --eventq-sched=ordered +.//examples/dpdk-l3fwd -l 0-7 -s 0xf -n 4 --vdev event_sw0 -- -p 0x3 --mode=eventdev --eventq-sched=ordered --rule_ipv4="rule_ipv4.cfg" --rule_ipv6="rule_ipv6.cfg" In case of eventdev mode, *--config* option is not used for ethernet port configuration. Instead each ethernet port will be configured with mentioned @@ -216,6 +228,49 @@ The following sections provide some explanation of the sample application code. the initialization and run-time paths are very similar to those of the :doc:`l2_forward_real_virtual` and :doc:`l2_forward_event`. The following sections describe aspects that are specific to the L3 Forwarding sample application. +Parse Rules from File +~ + +The application parses the rules from the file and adds them to the appropriate route table by calling the appropriate function. +It ignores empty and comment lines, and parses and validates the rules it reads. +If errors are detected, the application exits with messages to
[PATCH v1] gpu/cuda: fix memory list cleanup
From: Elena Agostini Memory list cleanup (called by cuda_mem_free) was not properly set the new head of the list when deleting an entry. Fixes: 1306a73b1958 ("gpu/cuda: introduce CUDA driver") Signed-off-by: Elena Agostini --- drivers/gpu/cuda/cuda.c | 6 -- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/cuda/cuda.c b/drivers/gpu/cuda/cuda.c index 49da215af4..afd4b92a93 100644 --- a/drivers/gpu/cuda/cuda.c +++ b/drivers/gpu/cuda/cuda.c @@ -447,9 +447,11 @@ mem_list_del_item(cuda_ptr_key pk) return -EINVAL; /* if key is in head */ - if (mem_alloc_list_cur->prev == NULL) + if (mem_alloc_list_cur->prev == NULL) { mem_alloc_list_head = mem_alloc_list_cur->next; - else { + if (mem_alloc_list_head != NULL) + mem_alloc_list_head->prev = NULL; + } else { mem_alloc_list_cur->prev->next = mem_alloc_list_cur->next; if (mem_alloc_list_cur->next != NULL) mem_alloc_list_cur->next->prev = mem_alloc_list_cur->prev; -- 2.17.1
Re: [PATCH v2 3/7] net/ixgbe: Check that SFF-8472 soft rate select is supported before write
On 12/20/21 20:15, Wang, Haiyue wrote: -Original Message- From: Stephen Douthit Sent: Tuesday, December 21, 2021 05:33 To: Wang, Haiyue ; Lu, Wenzhuo ; Changchun Ouyang ; Zhang, Helin Cc: dev@dpdk.org; Wang, Wen ; sta...@dpdk.org Subject: Re: [PATCH v2 3/7] net/ixgbe: Check that SFF-8472 soft rate select is supported before write On 12/20/21 02:53, Wang, Haiyue wrote: -Original Message- From: Stephen Douthit Sent: Tuesday, December 7, 2021 06:19 To: Wang, Haiyue ; Lu, Wenzhuo ; Changchun Ouyang ; Zhang, Helin Cc: dev@dpdk.org; Wen Wang ; Stephen Douthit ; sta...@dpdk.org Subject: [PATCH v2 3/7] net/ixgbe: Check that SFF-8472 soft rate select is supported before write Make sure an SFP is really a SFF-8472 device that supports the optional soft rate select feature before just blindly poking those I2C registers. Skip all I2C traffic if we know there's no SFP. Fixes: f3430431aba ("ixgbe/base: add SFP+ dual-speed support") Cc: sta...@dpdk.org Signed-off-by: Stephen Douthit --- /* Set RS0 */ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, IXGBE_I2C_EEPROM_DEV_ADDR2, diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h index ceefbb3e68..cd57ce040f 100644 --- a/drivers/net/ixgbe/base/ixgbe_phy.h +++ b/drivers/net/ixgbe/base/ixgbe_phy.h @@ -21,6 +21,7 @@ #define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 #define IXGBE_SFF_CABLE_SPEC_COMP0x3C #define IXGBE_SFF_SFF_8472_SWAP 0x5C +#define IXGBE_SFF_SFF_8472_EOPT 0x5D Looks like this is YOUR platform specific, then this patchset can't be merged. : - ( This isn't anything unique to our hardware, these values are coming from the SFF-8472 SFP+ I2C specification. The ability to do a soft rate select via I2C is an optional feature, and modules that support it are supposed to set bit 3 in byte 93 (0x5d), the "Enhanced Options" register, to advertise the functionality. Please see section 8.10 and Table 8-6 in the SFF-8472 spec. Checking the RATE_SELECT bit flag may be overkill since the transceiver is supposed to ignore writes to rate select control bits if the feature isn't implemented. I can drop that check if you like, but the other checks for a 8472 device (vs 8079) aren't anything different than what already happens in the driver elsewhere[1]. I'd argue that testing that a feature is supported in hardware before trying to use it is normal driver behavior. If instead you mean that the entire series is somehow applicable only to our hardware, I'm not sure why. That hotplug issue isn't seen on the same hardware when using the Linux driver; so it's a dpdk problem (at least on C3000 ixgbe devs), and not a I can't find your related fix in two official Linux drivers: There's no submission from me on the hotplug issue for the mainline, because the issue isn't present in Linux, only in DPDK. https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/ethernet/intel/ixgbe https://www.intel.com/content/www/us/en/download/14302/14687/intel-network-adapter-driver-for-pcie-intel-10-gigabit-ethernet-network-connections-under-linux.html? Normally, DPDK keeps sync with this kind of release. hardware problem. Fixing the hotplug/rateswap issue was my primary goal, the other patches fix problems I found along the way while debugging. I can also reproduce the hotplug/rateswap issue on the PLCC-B, an Intel reference design for the C3000 family, so again, not unique to this platform. I guess this is just in C3000 reference board SDK ? It's the board covered by Intel Doc # 574437. I recommend you submit the fix to kernel firstly, you will get more experts' reviews and fully test: Since patch 3 isn't directly related to the hotplug issue should I pull it from the series for v3 to keep the hotplug fixes moving forward here, and in parallel submit just that one to Linux? Thanks, Steve https://patchwork.ozlabs.org/project/intel-wired-lan/list/ https://lists.osuosl.org/mailman/listinfo/intel-wired-lan Please let me know if that addresses your concerns, or if I've missed your point. Thanks, Steve [1] https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/net/ethernet/intel/ixg be/ixgbe_ethtool.c?h=v5.16-rc6 #define IXGBE_SFF_SFF_8472_COMP 0x5E #define IXGBE_SFF_SFF_8472_OSCB 0x6E #define IXGBE_SFF_SFF_8472_ESCB 0x76 @@ -48,6 +49,8 @@ #define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 -- 2.31.1
Re: [PATCH 3/7] net/bonding: change mbuf pool and ring allocation
Hi Connor, On 12/20/21, 9:03 PM, "Min Hu (Connor)" wrote: > Hi, Sanford, > > There is *NO* benefit for the consumer thread (interrupt thread > > executing tx_machine()) to have caches on per-slave LACPDU pools. > > The interrupt thread is a control thread, i.e., a non-EAL thread. > > Its lcore_id is LCORE_ID_ANY, so it has no "default cache" in any > > mempool. > Well, sorry, I forgot that interrupt thread is non-EAL thread. No problem. (I added a temporary rte_log statement in tx_machine to make sure lcore_id == LCORE_ID_ANY.) > > There is little or no benefit for active data-plane threads to have > > caches on per-slave LACPDU pools, because on each pool, the producer > > thread puts back, at most, one mbuf per second. There is not much > > contention with the consumer (interrupt thread). > > > > I contend that caches are not necessary for these private LACPDU > I agree with you. Thanks. > > I believe there is a mistake in the ring comments (in 3 places). > > It would be better if they replace "free" with "full": > > "... to differentiate a *full* ring from an empty ring." > > > Well, I still can not understand it. I think the ring size is N, it > should store N items, why "N - 1" items.? > Hope for your description, thanks. Here is an excellent article that describes ring buffers, empty vs full, N-1, etc. https://embedjournal.com/implementing-circular-buffer-embedded-c/#the-full-vs-empty-problem > >> To fix the bug, how about just setting the flags "RING_F_EXACT_SZ" > > > > Yes, this is a good idea. I will look for examples or test code that > > use this flag. > Yes, if fixed, ILGM. I will use RING_F_EXACT_SZ flag in the next version of the patchset. I did not know about that flag. rte_ring_create(... N_PKTS ... RING_F_EXACT_SZ) ... is equivalent to, and looks cleaner than ... rte_ring_create(... rte_align32pow2(N_PKTS + 1) ... 0) I plan to create a separate patchset to update the comments in rte_ring.h, re RING_F_EXACT_SZ and "free" vs "full". -- Regards, Robert Sanford
[PATCH v2 0/8] net/bonding: fixes and LACP short timeout
This patchset makes the following changes to net/bonding: - Clean up minor errors in spelling, whitespace, C++ wrappers, and comments. - Replace directly overwriting of slave port's rte_eth_conf by copying it, but only updating it via rte_eth_dev_configure(). - Make minor changes to allocation of mbuf pool and rx/tx rings. - Add support for enabling LACP short timeout, i.e., link partner can use fast periodic time interval between transmits. - Include bond_8023ad and bond_alb in doxygen. - Remove self from Timers maintainers. - Add API stubs to net/ring PMD. - Add LACP short timeout to tests. V2 changes: - Additional typo and whitespace corrections. - Minor changes to LACP private rings creation. - Add net/ring API stubs patch. - Insert extra "bond_handshake" to LACP short timeout autotest. Robert Sanford (8): net/bonding: fix typos and whitespace net/bonding: fix bonded dev configuring slave dev net/bonding: change mbuf pool and ring creation net/bonding: support enabling LACP short timeout net/bonding: add bond_8023ad and bond_alb to doc Remove self from Timers maintainers. net/ring: add promiscuous and allmulticast API stubs net/bonding: add LACP short timeout to tests MAINTAINERS | 1 - app/test-pmd/cmdline.c| 81 +- app/test/test_link_bonding_mode4.c| 98 ++- doc/api/doxy-api-index.md | 2 + drivers/net/bonding/eth_bond_8023ad_private.h | 15 ++-- drivers/net/bonding/rte_eth_bond_8023ad.c | 58 ++-- drivers/net/bonding/rte_eth_bond_8023ad.h | 18 +++-- drivers/net/bonding/rte_eth_bond_pmd.c| 43 ++-- drivers/net/ring/rte_eth_ring.c | 28 9 files changed, 272 insertions(+), 72 deletions(-) -- 2.7.4
[PATCH v2 1/8] net/bonding: fix typos and whitespace
- Clean up minor typos in comments, strings, and private names. - Fix whitespace in log messages and function formatting (new line before open brace). - Move closing C++ wrapper to the end of rte_eth_bond_8023ad.h. Signed-off-by: Robert Sanford --- app/test-pmd/cmdline.c| 4 ++-- app/test/test_link_bonding_mode4.c| 28 +-- drivers/net/bonding/eth_bond_8023ad_private.h | 12 ++-- drivers/net/bonding/rte_eth_bond_8023ad.c | 22 ++--- drivers/net/bonding/rte_eth_bond_8023ad.h | 15 +++--- drivers/net/bonding/rte_eth_bond_pmd.c| 13 - 6 files changed, 49 insertions(+), 45 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 6e10afe..9fd2c2a 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -630,8 +630,8 @@ static void cmd_help_long_parsed(void *parsed_result, "set bonding mac_addr (port_id) (address)\n" " Set the MAC address of a bonded device.\n\n" - "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)" - " Set Aggregation mode for IEEE802.3AD (mode 4)" + "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)\n" + " Set Aggregation mode for IEEE802.3AD (mode 4)\n\n" "set bonding balance_xmit_policy (port_id) (l2|l23|l34)\n" " Set the transmit balance policy for bonded device running in balance mode.\n\n" diff --git a/app/test/test_link_bonding_mode4.c b/app/test/test_link_bonding_mode4.c index 351129d..2be86d5 100644 --- a/app/test/test_link_bonding_mode4.c +++ b/app/test/test_link_bonding_mode4.c @@ -58,11 +58,11 @@ static const struct rte_ether_addr slave_mac_default = { { 0x00, 0xFF, 0x00, 0xFF, 0x00, 0x00 } }; -static const struct rte_ether_addr parnter_mac_default = { +static const struct rte_ether_addr partner_mac_default = { { 0x22, 0xBB, 0xFF, 0xBB, 0x00, 0x00 } }; -static const struct rte_ether_addr parnter_system = { +static const struct rte_ether_addr partner_system = { { 0x33, 0xFF, 0xBB, 0xFF, 0x00, 0x00 } }; @@ -76,7 +76,7 @@ struct slave_conf { uint16_t port_id; uint8_t bonded : 1; - uint8_t lacp_parnter_state; + uint8_t lacp_partner_state; }; struct ether_vlan_hdr { @@ -258,7 +258,7 @@ add_slave(struct slave_conf *slave, uint8_t start) TEST_ASSERT_EQUAL(rte_is_same_ether_addr(&addr, &addr_check), 1, "Slave MAC address is not as expected"); - RTE_VERIFY(slave->lacp_parnter_state == 0); + RTE_VERIFY(slave->lacp_partner_state == 0); return 0; } @@ -288,7 +288,7 @@ remove_slave(struct slave_conf *slave) test_params.bonded_port_id); slave->bonded = 0; - slave->lacp_parnter_state = 0; + slave->lacp_partner_state = 0; return 0; } @@ -501,20 +501,20 @@ make_lacp_reply(struct slave_conf *slave, struct rte_mbuf *pkt) slow_hdr = rte_pktmbuf_mtod(pkt, struct slow_protocol_frame *); /* Change source address to partner address */ - rte_ether_addr_copy(&parnter_mac_default, &slow_hdr->eth_hdr.src_addr); + rte_ether_addr_copy(&partner_mac_default, &slow_hdr->eth_hdr.src_addr); slow_hdr->eth_hdr.src_addr.addr_bytes[RTE_ETHER_ADDR_LEN - 1] = slave->port_id; lacp = (struct lacpdu *) &slow_hdr->slow_protocol; /* Save last received state */ - slave->lacp_parnter_state = lacp->actor.state; + slave->lacp_partner_state = lacp->actor.state; /* Change it into LACP replay by matching parameters. */ memcpy(&lacp->partner.port_params, &lacp->actor.port_params, sizeof(struct port_params)); lacp->partner.state = lacp->actor.state; - rte_ether_addr_copy(&parnter_system, &lacp->actor.port_params.system); + rte_ether_addr_copy(&partner_system, &lacp->actor.port_params.system); lacp->actor.state = STATE_LACP_ACTIVE | STATE_SYNCHRONIZATION | STATE_AGGREGATION | @@ -580,7 +580,7 @@ bond_handshake_done(struct slave_conf *slave) const uint8_t expected_state = STATE_LACP_ACTIVE | STATE_SYNCHRONIZATION | STATE_AGGREGATION | STATE_COLLECTING | STATE_DISTRIBUTING; - return slave->lacp_parnter_state == expected_state; + return slave->lacp_partner_state == expected_state; } static unsigned @@ -1165,7 +1165,7 @@ init_marker(struct rte_mbuf *pkt, struct slave_conf *slave) &marker_hdr->eth_hdr.dst_addr); /* Init source address */ - rte_ether_addr_copy(&parnter_mac_default, + rte_ether_addr_copy(&partner_mac_defaul
[PATCH v2 2/8] net/bonding: fix bonded dev configuring slave dev
- Replace directly overwriting of slave port's private rte_eth_conf by copying it, and then updating it via rte_eth_dev_configure(). Signed-off-by: Robert Sanford --- drivers/net/bonding/rte_eth_bond_pmd.c | 30 -- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index f6003b0..b9e7439 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1691,6 +1691,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, struct rte_flow_error flow_error; struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + struct rte_eth_conf dev_conf; /* Stop slave */ errval = rte_eth_dev_stop(slave_eth_dev->data->port_id); @@ -1698,34 +1699,36 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)", slave_eth_dev->data->port_id, errval); + /* Start with a copy of slave's current rte_eth_conf. */ + dev_conf = slave_eth_dev->data->dev_conf; + dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + /* Enable interrupts on slave device if supported */ - if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; + dev_conf.intr_conf.lsc = + (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) ? 1 : 0; /* If RSS is enabled for bonding, try to enable it for slaves */ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { /* rss_key won't be empty if RSS is configured in bonded dev */ - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = - internals->rss_key_len; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = - internals->rss_key; + dev_conf.rx_adv_conf.rss_conf.rss_key_len = + internals->rss_key_len; + dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + dev_conf.rx_adv_conf.rss_conf.rss_hf = bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; - slave_eth_dev->data->dev_conf.rxmode.mq_mode = + dev_conf.rxmode.mq_mode = bonded_eth_dev->data->dev_conf.rxmode.mq_mode; } if (bonded_eth_dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) - slave_eth_dev->data->dev_conf.rxmode.offloads |= + dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; else - slave_eth_dev->data->dev_conf.rxmode.offloads &= + dev_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; - slave_eth_dev->data->dev_conf.rxmode.mtu = - bonded_eth_dev->data->dev_conf.rxmode.mtu; + dev_conf.rxmode.mtu = bonded_eth_dev->data->dev_conf.rxmode.mtu; nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; @@ -1747,8 +1750,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, /* Configure device */ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, - nb_rx_queues, nb_tx_queues, - &(slave_eth_dev->data->dev_conf)); + nb_rx_queues, nb_tx_queues, &dev_conf); if (errval != 0) { RTE_BOND_LOG(ERR, "Cannot configure slave device: port %u, err (%d)", slave_eth_dev->data->port_id, errval); -- 2.7.4
[PATCH v2 3/8] net/bonding: change mbuf pool and ring creation
- Turn off mbuf pool caching to avoid mbufs lingering in pool caches. At most, we transmit one LACPDU per second, per port. LACP tx_machine() performs the "get", and runs in the context of the interrupt thread (no default cache). PMD typically "puts" no more than one LACPDU per second, on average. - Create rings with RING_F_EXACT_SZ flag, so that they are the desired size, and not one less than requested. Signed-off-by: Robert Sanford --- drivers/net/bonding/rte_eth_bond_8023ad.c | 8 +++- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index 43231bc..9ed2a46 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -1101,9 +1101,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, } snprintf(mem_name, RTE_DIM(mem_name), "slave_port%u_pool", slave_id); - port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc, - RTE_MEMPOOL_CACHE_MAX_SIZE >= 32 ? - 32 : RTE_MEMPOOL_CACHE_MAX_SIZE, + port->mbuf_pool = rte_pktmbuf_pool_create(mem_name, total_tx_desc, 0, 0, element_size, socket_id); /* Any memory allocation failure in initialization is critical because @@ -1115,7 +1113,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_rx", slave_id); port->rx_ring = rte_ring_create(mem_name, - rte_align32pow2(BOND_MODE_8023AX_SLAVE_RX_PKTS), socket_id, 0); + BOND_MODE_8023AX_SLAVE_RX_PKTS, socket_id, RING_F_EXACT_SZ); if (port->rx_ring == NULL) { rte_panic("Slave %u: Failed to create rx ring '%s': %s\n", slave_id, @@ -1125,7 +1123,7 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, /* TX ring is at least one pkt longer to make room for marker packet. */ snprintf(mem_name, RTE_DIM(mem_name), "slave_%u_tx", slave_id); port->tx_ring = rte_ring_create(mem_name, - rte_align32pow2(BOND_MODE_8023AX_SLAVE_TX_PKTS + 1), socket_id, 0); + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1, socket_id, RING_F_EXACT_SZ); if (port->tx_ring == NULL) { rte_panic("Slave %u: Failed to create tx ring '%s': %s\n", slave_id, -- 2.7.4
[PATCH v2 4/8] net/bonding: support enabling LACP short timeout
- Add support for enabling LACP short timeout, i.e., link partner can use fast periodic time interval between transmits. Signed-off-by: Robert Sanford --- drivers/net/bonding/eth_bond_8023ad_private.h | 3 ++- drivers/net/bonding/rte_eth_bond_8023ad.c | 28 +++ drivers/net/bonding/rte_eth_bond_8023ad.h | 3 +++ 3 files changed, 29 insertions(+), 5 deletions(-) diff --git a/drivers/net/bonding/eth_bond_8023ad_private.h b/drivers/net/bonding/eth_bond_8023ad_private.h index 60db31e..bfde03c 100644 --- a/drivers/net/bonding/eth_bond_8023ad_private.h +++ b/drivers/net/bonding/eth_bond_8023ad_private.h @@ -159,7 +159,6 @@ struct mode8023ad_private { uint64_t rx_marker_timeout; uint64_t update_timeout_us; rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb; - uint8_t external_sm; struct rte_ether_addr mac_addr; struct rte_eth_link slave_link; @@ -178,6 +177,8 @@ struct mode8023ad_private { uint16_t tx_qid; } dedicated_queues; enum rte_bond_8023ad_agg_selection agg_selection; + uint8_t short_timeout_enabled : 1; + uint8_t short_timeout_updated : 1; }; /** diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index 9ed2a46..5c175e7 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -868,10 +868,10 @@ bond_mode_8023ad_periodic_cb(void *arg) struct rte_eth_link link_info; struct rte_ether_addr slave_addr; struct rte_mbuf *lacp_pkt = NULL; + uint8_t short_timeout_updated = internals->mode4.short_timeout_updated; uint16_t slave_id; uint16_t i; - /* Update link status on each port */ for (i = 0; i < internals->active_slave_count; i++) { uint16_t key; @@ -916,6 +916,13 @@ bond_mode_8023ad_periodic_cb(void *arg) slave_id = internals->active_slaves[i]; port = &bond_mode_8023ad_ports[slave_id]; + if (short_timeout_updated) { + if (internals->mode4.short_timeout_enabled) + ACTOR_STATE_SET(port, LACP_SHORT_TIMEOUT); + else + ACTOR_STATE_CLR(port, LACP_SHORT_TIMEOUT); + } + if ((port->actor.key & rte_cpu_to_be_16(BOND_LINK_FULL_DUPLEX_KEY)) == 0) { @@ -960,6 +967,9 @@ bond_mode_8023ad_periodic_cb(void *arg) show_warnings(slave_id); } + if (short_timeout_updated) + internals->mode4.short_timeout_updated = 0; + rte_eal_alarm_set(internals->mode4.update_timeout_us, bond_mode_8023ad_periodic_cb, arg); } @@ -1054,7 +1064,6 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, /* Given slave must not be in active list. */ RTE_ASSERT(find_slave_by_id(internals->active_slaves, internals->active_slave_count, slave_id) == internals->active_slave_count); - RTE_SET_USED(internals); /* used only for assert when enabled */ memcpy(&port->actor, &initial, sizeof(struct port_params)); /* Standard requires that port ID must be greater than 0. @@ -1065,7 +1074,9 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, memcpy(&port->partner_admin, &initial, sizeof(struct port_params)); /* default states */ - port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | STATE_DEFAULTED; + port->actor_state = STATE_AGGREGATION | STATE_LACP_ACTIVE | + STATE_DEFAULTED | (internals->mode4.short_timeout_enabled ? + STATE_LACP_SHORT_TIMEOUT : 0); port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION; port->sm_flags = SM_FLAGS_BEGIN; @@ -1209,6 +1220,7 @@ bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, struct mode8023ad_private *mode4 = &internals->mode4; uint64_t ms_ticks = rte_get_tsc_hz() / 1000; + memset(conf, 0, sizeof(*conf)); conf->fast_periodic_ms = mode4->fast_periodic_timeout / ms_ticks; conf->slow_periodic_ms = mode4->slow_periodic_timeout / ms_ticks; conf->short_timeout_ms = mode4->short_timeout / ms_ticks; @@ -1219,6 +1231,7 @@ bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks; conf->slowrx_cb = mode4->slowrx_cb; conf->agg_selection = mode4->agg_selection; + conf->lacp_timeout_control = mode4->short_timeout_enabled; } static void @@ -1234,6 +1247,7 @@ bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf) conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS; conf->slowrx_cb = NULL; conf->agg_selection = AGG_STABLE; + conf->lacp_timeout_control = 0; } static void @@ -1274,6 +1288,11 @@ bond_mo
[PATCH v2 5/8] net/bonding: add bond_8023ad and bond_alb to doc
- Add bond_8023ad and bond_alb to API documentation. Signed-off-by: Robert Sanford --- doc/api/doxy-api-index.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md index 4245b96..830235c 100644 --- a/doc/api/doxy-api-index.md +++ b/doc/api/doxy-api-index.md @@ -39,6 +39,8 @@ The public API headers are grouped by topics: - **device specific**: [softnic](@ref rte_eth_softnic.h), [bond] (@ref rte_eth_bond.h), + [bond_8023ad](@ref rte_eth_bond_8023ad.h), + [bond_alb] (@ref rte_eth_bond_alb.h), [vhost] (@ref rte_vhost.h), [vdpa] (@ref rte_vdpa.h), [KNI](@ref rte_kni.h), -- 2.7.4
[PATCH v2 6/8] remove self from timers maintainers
Remove self from Timers maintainers. Signed-off-by: Robert Sanford --- MAINTAINERS | 1 - 1 file changed, 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 18d9eda..32663b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1613,7 +1613,6 @@ F: examples/vm_power_manager/ F: doc/guides/sample_app_ug/vm_power_management.rst Timers -M: Robert Sanford M: Erik Gabriel Carrillo F: lib/timer/ F: doc/guides/prog_guide/timer_lib.rst -- 2.7.4
[PATCH v2 7/8] net/ring: add promisc and all-MC stubs
Add promiscuous_enable, promiscuous_disable, allmulticast_enable, and allmulticast_disable API stubs. This helps clean up errors in dpdk-test link_bonding_mode4_autotest. Signed-off-by: Robert Sanford --- drivers/net/ring/rte_eth_ring.c | 28 1 file changed, 28 insertions(+) diff --git a/drivers/net/ring/rte_eth_ring.c b/drivers/net/ring/rte_eth_ring.c index db10f03..cfb81da 100644 --- a/drivers/net/ring/rte_eth_ring.c +++ b/drivers/net/ring/rte_eth_ring.c @@ -226,6 +226,30 @@ eth_mac_addr_add(struct rte_eth_dev *dev __rte_unused, } static int +eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int eth_link_update(struct rte_eth_dev *dev __rte_unused, int wait_to_complete __rte_unused) { return 0; } @@ -275,6 +299,10 @@ static const struct eth_dev_ops ops = { .stats_reset = eth_stats_reset, .mac_addr_remove = eth_mac_addr_remove, .mac_addr_add = eth_mac_addr_add, + .promiscuous_enable = eth_promiscuous_enable, + .promiscuous_disable = eth_promiscuous_disable, + .allmulticast_enable = eth_allmulticast_enable, + .allmulticast_disable = eth_allmulticast_disable, }; static int -- 2.7.4
[PATCH v2 8/8] net/bonding: add LACP short timeout tests
- Add "set bonding lacp timeout_ctrl on|off" to testpmd. - Add "test_mode4_lacp_timeout_control" to dpdk-test. - Remove call to rte_eth_dev_mac_addr_remove from add_slave, as it always fails and prints an error. Signed-off-by: Robert Sanford --- app/test-pmd/cmdline.c | 77 ++ app/test/test_link_bonding_mode4.c | 70 +- 2 files changed, 145 insertions(+), 2 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 9fd2c2a..b0c2fb4 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -633,6 +633,9 @@ static void cmd_help_long_parsed(void *parsed_result, "set bonding mode IEEE802.3AD aggregator policy (port_id) (agg_name)\n" " Set Aggregation mode for IEEE802.3AD (mode 4)\n\n" + "set bonding lacp timeout_ctrl (port_id) (on|off)\n" + "Configure LACP partner to use fast|slow periodic tx interval.\n\n" + "set bonding balance_xmit_policy (port_id) (l2|l23|l34)\n" " Set the transmit balance policy for bonded device running in balance mode.\n\n" @@ -6192,6 +6195,7 @@ static void lacp_conf_show(struct rte_eth_bond_8023ad_conf *conf) printf("\taggregation mode: invalid\n"); break; } + printf("\tlacp timeout control: %u\n", conf->lacp_timeout_control); printf("\n"); } @@ -6863,6 +6867,78 @@ cmdline_parse_inst_t cmd_set_bonding_agg_mode_policy = { }; +/* *** SET LACP TIMEOUT CONTROL ON BONDED DEVICE *** */ +struct cmd_set_lacp_timeout_control_result { + cmdline_fixed_string_t set; + cmdline_fixed_string_t bonding; + cmdline_fixed_string_t lacp; + cmdline_fixed_string_t timeout_ctrl; + uint16_t port_id; + cmdline_fixed_string_t on_off; +}; + +static void +cmd_set_lacp_timeout_control_parsed(void *parsed_result, + __rte_unused struct cmdline *cl, + __rte_unused void *data) +{ + struct cmd_set_lacp_timeout_control_result *res = parsed_result; + struct rte_eth_bond_8023ad_conf port_conf; + uint8_t on_off = 0; + int ret; + + if (!strcmp(res->on_off, "on")) + on_off = 1; + + ret = rte_eth_bond_8023ad_conf_get(res->port_id, &port_conf); + if (ret != 0) { + fprintf(stderr, "\tGet bonded device %u lacp conf failed\n", + res->port_id); + return; + } + + port_conf.lacp_timeout_control = on_off; + ret = rte_eth_bond_8023ad_setup(res->port_id, &port_conf); + if (ret != 0) + fprintf(stderr, "\tSetup bonded device %u lacp conf failed\n", + res->port_id); +} + +cmdline_parse_token_string_t cmd_set_lacp_timeout_control_set = + TOKEN_STRING_INITIALIZER(struct cmd_set_lacp_timeout_control_result, + set, "set"); +cmdline_parse_token_string_t cmd_set_lacp_timeout_control_bonding = + TOKEN_STRING_INITIALIZER(struct cmd_set_lacp_timeout_control_result, + bonding, "bonding"); +cmdline_parse_token_string_t cmd_set_lacp_timeout_control_lacp = + TOKEN_STRING_INITIALIZER(struct cmd_set_lacp_timeout_control_result, + lacp, "lacp"); +cmdline_parse_token_string_t cmd_set_lacp_timeout_control_timeout_ctrl = + TOKEN_STRING_INITIALIZER(struct cmd_set_lacp_timeout_control_result, + timeout_ctrl, "timeout_ctrl"); +cmdline_parse_token_num_t cmd_set_lacp_timeout_control_port_id = + TOKEN_NUM_INITIALIZER(struct cmd_set_lacp_timeout_control_result, + port_id, RTE_UINT16); +cmdline_parse_token_string_t cmd_set_lacp_timeout_control_on_off = + TOKEN_STRING_INITIALIZER(struct cmd_set_lacp_timeout_control_result, + on_off, "on#off"); + +cmdline_parse_inst_t cmd_set_lacp_timeout_control = { + .f = cmd_set_lacp_timeout_control_parsed, + .data = (void *) 0, + .help_str = "set bonding lacp timeout_ctrl on|off: " + "Configure partner to use fast|slow periodic tx interval", + .tokens = { + (void *)&cmd_set_lacp_timeout_control_set, + (void *)&cmd_set_lacp_timeout_control_bonding, + (void *)&cmd_set_lacp_timeout_control_lacp, + (void *)&cmd_set_lacp_timeout_control_timeout_ctrl, + (void *)&cmd_set_lacp_timeout_control_port_id, + (void *)&cmd_set_lacp_timeout_control_on_off, + NULL + } +}; + #endif /* RTE_NET_BOND */ /* *** SET FORWARDING MODE *** */ @@ -17728,6 +17804,7 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *) &cmd_set_bond_mon_period, (cmdline_parse_inst_t *) &cmd_set_lacp_dedicated_queues, (
Re: [PATCH v1 01/25] drivers/net: introduce a new PMD driver
On Sun, 19 Dec 2021 11:40:31 -0800 Stephen Hemminger wrote: > On Sat, 18 Dec 2021 10:51:28 +0800 > Yanling Song wrote: > > > +#ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */ > > +#define CLOCK_TYPE CLOCK_MONOTONIC_RAW > > +#else > > +#define CLOCK_TYPE CLOCK_MONOTONIC > > +#endif > > CLOCK_MONOTONIC_RAW was defined in Linux.2.6.28 > DPDK does not support any kernels that old, so the #ifdef is not > needed. > OK. #ifdef will be removed in the next version. > > + > +static inline unsigned long clock_gettime_ms(void) > +{ > + struct timespec tv; > + > + (void)clock_gettime(CLOCK_TYPE, &tv); > + > + return (unsigned long)tv.tv_sec * SPNIC_S_TO_MS_UNIT + > +(unsigned long)tv.tv_nsec / SPNIC_S_TO_NS_UNIT; > +} > > If all you want is jiffie accuracy, you could use > CLOCK_MONOTONIC_COARSE. > I did not get your point: CLOCK_MONOTONIC is more accurate than CLOCK_MONOTONIC_COARSE, right? > > +#define jiffies clock_gettime_ms() > +#define msecs_to_jiffies(ms) (ms) > > +#define time_before(now, end)((now) < (end)) > > Does that simple version of the macro work right if jiffies wraps > around? Less of an issue on 64 bit platforms... > > The kernel version is effectively. > #define time_before(now, end) ((long)((now) - (end)) < 0) OK. Will be changed in the next version.
Re: [PATCH v1 16/25] net/spnic: add device configure/version/info
On Mon, 20 Dec 2021 08:23:56 +0800 Stephen Hemminger wrote: > On Sat, 18 Dec 2021 10:51:43 +0800 > Yanling Song wrote: > > > +static int spnic_dev_configure(struct rte_eth_dev *dev) > > +{ > > + struct spnic_nic_dev *nic_dev = > > SPNIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev); + > > + nic_dev->num_sqs = dev->data->nb_tx_queues; > > + nic_dev->num_rqs = dev->data->nb_rx_queues; > > + > > + if (nic_dev->num_sqs > nic_dev->max_sqs || > > + nic_dev->num_rqs > nic_dev->max_rqs) { > > + PMD_DRV_LOG(ERR, "num_sqs: %d or num_rqs: %d > > larger than max_sqs: %d or max_rqs: %d", > > + nic_dev->num_sqs, nic_dev->num_rqs, > > + nic_dev->max_sqs, nic_dev->max_rqs); > > + return -EINVAL; > > + } > > + > > This should already be covered by checks in ethedev:dev_configure. OK. The check will be removed in the next version. > > > + /* The range of mtu is 384~9600 */ > > + if (SPNIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) < > > + SPNIC_MIN_FRAME_SIZE || > > + SPNIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) > > > + SPNIC_MAX_JUMBO_FRAME_SIZE) { > > + PMD_DRV_LOG(ERR, "Max rx pkt len out of range, > > mtu: %d, expect between %d and %d", > > + dev->data->dev_conf.rxmode.mtu, > > + SPNIC_MIN_FRAME_SIZE, > > SPNIC_MAX_JUMBO_FRAME_SIZE); > > + return -EINVAL; > > + } > > Already covered by eth_dev_validate_mtu called from ethdev > dev_configure. > OK. The check will be removed in the next version.
RE: [PATCH v2 3/7] net/ixgbe: Check that SFF-8472 soft rate select is supported before write
> -Original Message- > From: Morten Brørup > Sent: Tuesday, December 21, 2021 16:58 > To: Wang, Haiyue ; steph...@silicom-usa.com; Lu, > Wenzhuo ; > Changchun Ouyang ; Zhang, Helin > > Cc: dev@dpdk.org; Wang, Wen ; sta...@dpdk.org > Subject: RE: [PATCH v2 3/7] net/ixgbe: Check that SFF-8472 soft rate select > is supported before write > > > From: Wang, Haiyue [mailto:haiyue.w...@intel.com] > > Sent: Tuesday, 21 December 2021 02.15 > > > > > -Original Message- > > > From: Stephen Douthit > > > Sent: Tuesday, December 21, 2021 05:33 > > > > > > On 12/20/21 02:53, Wang, Haiyue wrote: > > > >> -Original Message- > > > >> From: Stephen Douthit > > > >> Sent: Tuesday, December 7, 2021 06:19 > > > >> > > > >> Make sure an SFP is really a SFF-8472 device that supports the > > optional > > > >> soft rate select feature before just blindly poking those I2C > > registers. > > > >> > > > >> Skip all I2C traffic if we know there's no SFP. > > > >> > > > >> Fixes: f3430431aba ("ixgbe/base: add SFP+ dual-speed support") > > > >> Cc: sta...@dpdk.org > > > >> > > > >> Signed-off-by: Stephen Douthit > > > >> --- > > > > > > > > Normally, DPDK keeps sync with this kind of release. > > > > Working with the Linux kernel mainline drivers is good advice. > > The official Intel Linux drivers seem to be ages behind the Kernel mainline, > and they don't fully No, the "ixgbe" drivers is updated on "7/8/2021". https://www.intel.com/content/www/us/en/download/14302/14687/intel-network-adapter-driver-for-pcie-intel-10-gigabit-ethernet-network-connections-under-linux.html > support the C3000 NICs, so don’t waste any time there! We recently tried > using the official Intel > Linux drivers for a C3338 based project (using Kernel 3.19 in 32 bit mode > with x2APIC disabled), and > they didn't work at all. We ended up backporting the necessary changes from > the kernel mainline > instead. From Steve's response: ME: "I guess this is just in C3000 reference board SDK ?" Steve: "It's the board covered by Intel Doc # 574437." I check the doc "Last Updated: 11/07/2018" It should be some kind of customer release, that's why they are not in the official *open source* Linux driver, so keep your patch set as private.
[PATCH] eventdev/rx_adapter: add event port get api
This patch introduces new api for retrieving event port id of eth rx adapter. Signed-off-by: Naga Harish K S V --- lib/eventdev/rte_event_eth_rx_adapter.c | 20 lib/eventdev/rte_event_eth_rx_adapter.h | 20 lib/eventdev/version.map| 1 + 3 files changed, 41 insertions(+) diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c index 809416d9b7..fca2e38690 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.c +++ b/lib/eventdev/rte_event_eth_rx_adapter.c @@ -3119,6 +3119,26 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) return rx_adapter->service_inited ? 0 : -ESRCH; } +int +rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id) +{ + struct event_eth_rx_adapter *rx_adapter; + + if (rxa_memzone_lookup()) + return -ENOMEM; + + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + + rx_adapter = rxa_id_to_adapter(id); + if (rx_adapter == NULL || event_port_id == NULL) + return -EINVAL; + + if (rx_adapter->service_inited) + *event_port_id = rx_adapter->event_port_id; + + return rx_adapter->service_inited ? 0 : -ESRCH; +} + int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id, diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h b/lib/eventdev/rte_event_eth_rx_adapter.h index 9546d792e9..1364eafe38 100644 --- a/lib/eventdev/rte_event_eth_rx_adapter.h +++ b/lib/eventdev/rte_event_eth_rx_adapter.h @@ -37,6 +37,7 @@ * - rte_event_eth_rx_adapter_queue_conf_get() * - rte_event_eth_rx_adapter_queue_stats_get() * - rte_event_eth_rx_adapter_queue_stats_reset() + * - rte_event_eth_rx_adapter_event_port_get() * * The application creates an ethernet to event adapter using * rte_event_eth_rx_adapter_create_ext() or rte_event_eth_rx_adapter_create() @@ -684,6 +685,25 @@ rte_event_eth_rx_adapter_queue_stats_reset(uint8_t id, uint16_t eth_dev_id, uint16_t rx_queue_id); +/** + * Retrieve the event port ID of an adapter. If the adapter doesn't use + * a rte_service function, this function returns -ESRCH. + * + * @param id + * Adapter identifier. + * + * @param [out] event_port_id + * A pointer to a uint32_t, to be filled in with the port id. + * + * @return + * - 0: Success + * - <0: Error code on failure, if the adapter doesn't use a rte_service + * function, this function returns -ESRCH. + */ +__rte_experimental +int +rte_event_eth_rx_adapter_event_port_get(uint8_t id, uint8_t *event_port_id); + #ifdef __cplusplus } #endif diff --git a/lib/eventdev/version.map b/lib/eventdev/version.map index ade1f1182e..91d2b5723b 100644 --- a/lib/eventdev/version.map +++ b/lib/eventdev/version.map @@ -102,6 +102,7 @@ EXPERIMENTAL { # added in 21.11 rte_event_eth_rx_adapter_create_with_params; + rte_event_eth_rx_adapter_event_port_get; rte_event_eth_rx_adapter_queue_conf_get; rte_event_eth_rx_adapter_queue_stats_get; rte_event_eth_rx_adapter_queue_stats_reset; -- 2.25.1