Re: [PATCH] version: 22.11-rc0
19/07/2022 23:02, Thomas Monjalon: > 18/07/2022 12:09, David Marchand: > > --- a/devtools/libabigail.abignore > > +++ b/devtools/libabigail.abignore > > - > > -; Temporary exceptions till next major ABI version ; > > - > > +;;; > > +; Temporary exceptions till next major ABI version; > > +; NOTE: there may be some removed libaries or drivers in check-abi.sh ; > > +;;; > > This note is not clear to me. I think it is better to not add this note which can be confusing. For the rest, Acked-by: Thomas Monjalon
Re: [v3 01/24] eal/loongarch: add atomic operations for LoongArch
Ping for review or feedback for this new arch support. Thanks, Min On 2022年06月06日 21:10, Min Zhou wrote: This patch adds architecture specific atomic operations for LoongArch architecture. These implementations use standard atomics of toolchain and heavily reference generic atomics codes. Signed-off-by: Min Zhou --- lib/eal/loongarch/include/rte_atomic.h | 253 + 1 file changed, 253 insertions(+) create mode 100644 lib/eal/loongarch/include/rte_atomic.h diff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h new file mode 100644 index 00..8e007e7f76 --- /dev/null +++ b/lib/eal/loongarch/include/rte_atomic.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Loongson Technology Corporation Limited + */ + +#ifndef _RTE_ATOMIC_LOONGARCH_H_ +#define _RTE_ATOMIC_LOONGARCH_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include "generic/rte_atomic.h" + +/** + * LoongArch Synchronize + */ +static inline void synchronize(void) +{ + __asm__ __volatile__("dbar 0":::"memory"); +} + +/** + * General memory barrier. + * + * Guarantees that the LOAD and STORE operations generated before the + * barrier occur before the LOAD and STORE operations generated after. + * This function is architecture dependent. + */ +#define rte_mb() synchronize() + +/** + * Write memory barrier. + * + * Guarantees that the STORE operations generated before the barrier + * occur before the STORE operations generated after. + * This function is architecture dependent. + */ +#define rte_wmb() synchronize() + +/** + * Read memory barrier. + * + * Guarantees that the LOAD operations generated before the barrier + * occur before the LOAD operations generated after. + * This function is architecture dependent. + */ +#define rte_rmb() synchronize() + +#define rte_smp_mb() rte_mb() + +#define rte_smp_wmb() rte_mb() + +#define rte_smp_rmb() rte_mb() + +#define rte_io_mb() rte_mb() + +#define rte_io_wmb() rte_mb() + +#define rte_io_rmb() rte_mb() + +static __rte_always_inline void +rte_atomic_thread_fence(int memorder) +{ + __atomic_thread_fence(memorder); +} + +#ifndef RTE_FORCE_INTRINSICS +/*- 16 bit atomic operations -*/ +static inline int +rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +static inline uint16_t +rte_atomic16_exchange(volatile uint16_t *dst, uint16_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_2(dst, val, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +rte_atomic16_inc(rte_atomic16_t *v) +{ + rte_atomic16_add(v, 1); +} + +static inline void +rte_atomic16_dec(rte_atomic16_t *v) +{ + rte_atomic16_sub(v, 1); +} + +static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v) +{ + return __sync_add_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v) +{ + return __sync_sub_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic16_test_and_set(rte_atomic16_t *v) +{ + return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1); +} + +/*- 32 bit atomic operations -*/ +static inline int +rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +static inline uint32_t +rte_atomic32_exchange(volatile uint32_t *dst, uint32_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_4(dst, val, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +rte_atomic32_inc(rte_atomic32_t *v) +{ + rte_atomic32_add(v, 1); +} + +static inline void +rte_atomic32_dec(rte_atomic32_t *v) +{ + rte_atomic32_sub(v, 1); +} + +static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v) +{ + return __sync_add_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v) +{ + return __sync_sub_and_fetch(&v->cnt, 1) == 0; +} + +static inline int rte_atomic32_test_and_set(rte_atomic32_t *v) +{ + return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1); +} + +/*- 64 bit atomic operations -*/ +static inline int +rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src) +{ + return __sync_bool_compare_and_swap(dst, exp, src); +} + +static inline uint64_t +rte_atomic64_exchange(volatile uint64_t *dst, uint64_t val) +{ +#if defined(__clang__) + return __atomic_exchange_n(dst, val, __ATOMIC_SEQ_CST); +#else + return __atomic_exchange_8(dst, val, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +rte_atomic64_init(rte_atomic64_t *v) +{ + v->cnt = 0; +} + +st
Re: autotest system requirements.
On Tue, Jul 19, 2022 at 01:00:07PM -0700, Stephen Hemminger wrote: > With 22.07 release, the test requirements seemed to have changed. > Can't run tests unless root now. > > And the table test fails if only 1G of hugepages is setup. > > This doesn't match the documentation: > https://doc.dpdk.org/guides/prog_guide/meson_ut.html > > which shows running tests as non-root. Can you clarify a bit more what you are seeing? I can certainly run a suite of unit tests as non-root and have the majority of them pass. There are failures and timeouts though, but I haven't root-caused why exactly. /Bruce $ meson test --suite DPDK:fast-tests ninja: Entering directory `/home/bruce/dpdk.org/build' ninja: no work to do. 1/102 DPDK:fast-tests / acl_autotestOK 3.05s 2/102 DPDK:fast-tests / atomic_autotest TIMEOUT 10.01s killed by signal 15 SIGTERM >>> DPDK_TEST=atomic_autotest MALLOC_PERTURB_=252 >>> /home/bruce/dpdk.org/build/app/test/dpdk-test --file-prefix=atomic_autotest [3/102] ð 3/102 DPDK:fast-tests / bitmap_autotest OK 0.14s 4/102 DPDK:fast-tests / bpf_autotestOK 0.12s 5/102 DPDK:fast-tests / bpf_convert_autotestOK 0.14s ... 100/102 DPDK:fast-tests / pdump_autotest OK 5.46s 101/102 DPDK:fast-tests / vdev_autotest OK 0.16s 102/102 DPDK:fast-tests / compressdev_autotestSKIP 0.15s exit status 77 Ok: 88 Expected Fail: 0 Fail: 1 Unexpected Pass:0 Skipped:5 Timeout:8 Full log written to /home/bruce/dpdk.org/build/meson-logs/testlog.txt
[Bug 1054] Bugzilla test
https://bugs.dpdk.org/show_bug.cgi?id=1054 Bug ID: 1054 Summary: Bugzilla test Product: DPDK Version: 22.03 Hardware: All OS: All Status: UNCONFIRMED Severity: normal Priority: Normal Component: vhost/virtio Assignee: dev@dpdk.org Reporter: maxime.coque...@redhat.com CC: maxime.coque...@redhat.com Target Milestone: --- This is a test Bz. -- You are receiving this mail because: You are the assignee for the bug.
Re: [PATCH] crypto/ccp: Check for the NULL pointer after calling rte_malloc
On Wed, Jul 20, 2022 at 8:29 AM Namburu, Chandu-babu wrote: > From: Shiqi Liu <835703...@qq.com> > > As the possible failure of the rte_malloc(), the not_checked and checked > could be NULL pointer. > Therefore, it should be better to check it in order to avoid the dereference > of the NULL pointer. > > Fixes: 09a0fd736a0 ("crypto/ccp: enable IOMMU") > Signed-off-by: Shiqi Liu <835703...@qq.com> This sha_ctx variable and its accesses are suspicious. It seems to be used as some kind of intermediate buffer, but I fail to see the need. Can't the existing code rely on sess->auth.ctx ? There is also a suspicious mention (in ccp_perform_sha) of sha_ctx but with no calling rte_mem_virt2iova(). -- David Marchand
Re: [PATCH] crypto/ccp: Check for the NULL pointer after calling rte_malloc
On Wed, 20 Jul 2022 06:29:06 + "Namburu, Chandu-babu" wrote: > sha_ctx = (void *)rte_malloc(NULL, SHA512_DIGEST_SIZE, 64); > + if (sha_ctx == NULL) { > + return -ENOMEM; > + } There is unnecessary cast here (pre-existing). rte_malloc() already returns void *
Re: autotest system requirements.
On Wed, 20 Jul 2022 12:07:59 +0100 Bruce Richardson wrote: > On Tue, Jul 19, 2022 at 01:00:07PM -0700, Stephen Hemminger wrote: > > With 22.07 release, the test requirements seemed to have changed. > > Can't run tests unless root now. > > > > And the table test fails if only 1G of hugepages is setup. > > > > This doesn't match the documentation: > > https://doc.dpdk.org/guides/prog_guide/meson_ut.html > > > > which shows running tests as non-root. > > Can you clarify a bit more what you are seeing? I can certainly run a suite > of unit tests as non-root and have the majority of them pass. There are > failures and timeouts though, but I haven't root-caused why exactly. > > /Bruce > > $ meson test --suite DPDK:fast-tests > ninja: Entering directory `/home/bruce/dpdk.org/build' > ninja: no work to do. > 1/102 DPDK:fast-tests / acl_autotestOK > 3.05s > 2/102 DPDK:fast-tests / atomic_autotest TIMEOUT >10.01s killed by signal 15 SIGTERM > >>> DPDK_TEST=atomic_autotest MALLOC_PERTURB_=252 > >>> /home/bruce/dpdk.org/build/app/test/dpdk-test > >>> --file-prefix=atomic_autotest > > [3/102] ð 3/102 DPDK:fast-tests / bitmap_autotest OK > 0.14s > 4/102 DPDK:fast-tests / bpf_autotestOK > 0.12s > 5/102 DPDK:fast-tests / bpf_convert_autotestOK > 0.14s > ... > 100/102 DPDK:fast-tests / pdump_autotest OK > 5.46s > 101/102 DPDK:fast-tests / vdev_autotest OK > 0.16s > 102/102 DPDK:fast-tests / compressdev_autotestSKIP > 0.15s exit status 77 > > Ok: 88 > Expected Fail: 0 > Fail: 1 > Unexpected Pass:0 > Skipped:5 > Timeout:8 > > Full log written to /home/bruce/dpdk.org/build/meson-logs/testlog.txt > Turns out the default for hugepages setup is making it inaccessible. Using new --user flag fixed that. Probably should add that to docs about running tests. With 1G of hugepages: Ok: 96 Expected Fail: 0 Fail: 1 Unexpected Pass:0 Skipped:5 Timeout:0
RE: [PATCH v2 1/3] power: add uncore API to power library
> -Original Message- > From: Kearney, Tadhg > diff --git a/config/x86/meson.build b/config/x86/meson.build index > +dpdk_conf.set('RTE_MAX_NUMA_DIE', 1) > +dpdk_conf.set('RTE_MAX_UNCORE_FREQS', 32) Check if these flags should be added to other platforms. > +Abstract > + > +Up to 60% power saving can be achieved by reducing the uncore frequency > to its lowest value. Might be give some intro on this before specifying directly the power saving. Would be good to say power savings achieved instead of specific with any %. Also > +With later kernels, there is now a sysfs entry to allow adjustment of uncore > frequency. Kernel provides the driver "intel-uncore-frequency to control the uncore frequency limits for x86 platform. The driver is available from the kernel version 5.6 and above. > diff --git a/lib/power/rte_power_uncore.c b/lib/power/rte_power_uncore.c > +static int > +set_uncore_freq_internal(struct uncore_power_info *ui, uint32_t idx) { > + open_core_sysfs_file(&ui->f_cur_max, "rw+", > POWER_UNCORE_SYSFILE_MAX_FREQ, You have already this f_cur_max opened , you can re-use, instead of opening again. Thanks, Reshma
Re: [v3 00/24] Support LoongArch architecture
Hello, On Mon, Jun 6, 2022 at 3:11 PM Min Zhou wrote: > > Dear team, > The following patch set is intended to support DPDK running on LoongArch > architecture. > > LoongArch is the general processor architecture of Loongson and is a new > RISC ISA, which is a bit like MIPS or RISC-V. > > The online documents of LoongArch are here: > https://loongson.github.io/LoongArch-Documentation/README-EN.html > > The latest cross compile tool chain can be downloaded from: > https://github.com/loongson/build-tools > > v3: > - add URL for cross compile tool chain > - remove rte_lpm_lsx.h which was a dummy vector implementation > because there is already a scalar implementation, thanks to > Michal Mazurek > - modify the name of compiler for cross compiling > - remove useless variable in meson.build > > v2: > - use standard atomics of toolchain to implement > atomic operations > - implement spinlock based on standard atomics Thanks for porting DPDK to a new architecture. I am unsure of what this architecture status is wrt to the upstream Linux kernel and wrt to main distributions support. Could you give some details? Otherwise, I did not look at the series yet, but it needs to be rebased on the main repository, there have been quite some changes since this original submission. How will this architecture be integrated wrt CI: GHA? sending your hw to UNH lab? or maybe do you have plans for your own CI servers? -- David Marchand
RE: [EXT] Re: [PATCH] examples/l2fwd: add check of Rx packets count
Ping > -Original Message- > From: Thomas Monjalon > Sent: Monday, June 27, 2022 2:42 AM > To: dev@dpdk.org > Cc: Bruce Richardson ; ferruh.yi...@xilinx.com; > ajit.khapa...@broadcom.com; abo...@pensando.io; > andrew.rybche...@oktetlabs.ru; beilei.x...@intel.com; ch...@att.com; > chenbo@intel.com; ciara.lof...@intel.com; Devendra Singh Rawat > ; ed.cz...@atomicrules.com; > evge...@amazon.com; gr...@u256.net; g.si...@nxp.com; > zhouguoy...@huawei.com; haiyue.w...@intel.com; Harman Kalra > ; heinrich.k...@corigine.com; > hemant.agra...@nxp.com; hyon...@cisco.com; igo...@amazon.com; Igor > Russkikh ; jgraj...@cisco.com; > jasvinder.si...@intel.com; jianw...@trustnetic.com; > jiawe...@trustnetic.com; jingjing...@intel.com; johnd...@cisco.com; > john.mil...@atomicrules.com; linvi...@tuxdriver.com; keith.wi...@intel.com; > Kiran Kumar Kokkilagadda ; ouli...@huawei.com; > Liron Himi ; lon...@microsoft.com; m...@semihalf.com; > spin...@cesnet.cz; ma...@nvidia.com; matt.pet...@windriver.com; > maxime.coque...@redhat.com; m...@semihalf.com; humi...@huawei.com; > Pradeep Kumar Nalla ; Nithin Kumar Dabilpuram > ; qiming.y...@intel.com; qi.z.zh...@intel.com; > Radha Chintakuntla ; rahul.lakkire...@chelsio.com; > Rasesh Mody ; rosen...@intel.com; > sachin.sax...@oss.nxp.com; Satha Koteswara Rao Kottidi > ; Shahed Shaikh ; > shaib...@amazon.com; shepard.sie...@atomicrules.com; > asoma...@amd.com; somnath.ko...@broadcom.com; > sthem...@microsoft.com; steven.webs...@windriver.com; Sunil Kumar Kori > ; mtetsu...@gmail.com; Veerasenareddy Burru > ; viachesl...@nvidia.com; xiao.w.w...@intel.com; > cloud.wangxiao...@huawei.com; yisen.zhu...@huawei.com; > yongw...@vmware.com; xuanziya...@huawei.com; Rahul Bhansali > > Subject: [EXT] Re: [PATCH] examples/l2fwd: add check of Rx packets count > > External Email > > -- > Please could we have some tests on other hardware with this improvement? > > > 25/05/2022 11:13, Rahul Bhansali: > > An additional check is added to avoid extra processing if receive > > packets are 0. > > > > Performance impact: with Marvell OCTEON TX2 platform, observed an > > improvement by ~14%. > > > > Signed-off-by: Rahul Bhansali > > --- > > nb_rx = rte_eth_rx_burst(portid, 0, > > pkts_burst, MAX_PKT_BURST); > > > > + if (unlikely(nb_rx == 0)) > > + continue; > > + > > >
Re: [PATCH] examples/l2fwd: add check of Rx packets count
On Wed, 25 May 2022 14:43:27 +0530 Rahul Bhansali wrote: > An additional check is added to avoid extra processing if > receive packets are 0. > > Performance impact: with Marvell OCTEON TX2 platform, observed an > improvement by ~14%. > > Signed-off-by: Rahul Bhansali This makes sense. Could drop the unlikely() since in general unlikely should be reserved for error cases. But doubt it matters at all. Did a quick check the other examples do similar thing already. Acked-by: Stephen Hemminger
RE: [PATCH v9 2/4] ethdev: introduce protocol hdr based buffer split
Hi Andrew, > -Original Message- > From: Andrew Rybchenko > Sent: 2022年7月8日 23:01 > To: Wu, WenxuanX ; tho...@monjalon.net; Li, > Xiaoyun ; ferruh.yi...@xilinx.com; Singh, Aman Deep > ; dev@dpdk.org; Zhang, Yuying > ; Zhang, Qi Z ; > jerinjac...@gmail.com > Cc: step...@networkplumber.org; Ding, Xuan ; Wang, > YuanX ; Ray Kinsella > Subject: Re: [PATCH v9 2/4] ethdev: introduce protocol hdr based buffer split > > On 6/13/22 13:25, wenxuanx...@intel.com wrote: > > From: Wenxuan Wu > > > > Currently, Rx buffer split supports length based split. With Rx queue > > offload RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT enabled and Rx packet > segment > > configured, PMD will be able to split the received packets into > > multiple segments. > > > > However, length based buffer split is not suitable for NICs that do > > split based on protocol headers. Given an arbitrarily variable length > > in Rx packet segment, it is almost impossible to pass a fixed protocol > > header to driver. Besides, the existence of tunneling results in the > > composition of a packet is various, which makes the situation even worse. > > > > This patch extends current buffer split to support protocol header > > based buffer split. A new proto_hdr field is introduced in the > > reserved field of rte_eth_rxseg_split structure to specify protocol > > header. The proto_hdr field defines the split position of packet, > > splitting will always happens after the protocol header defined in the > > Rx packet segment. When Rx queue offload > > RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT is enabled and corresponding protocol > > header is configured, driver will split the ingress packets into multiple > segments. > > > > struct rte_eth_rxseg_split { > > > > struct rte_mempool *mp; /* memory pools to allocate segment from */ > > uint16_t length; /* segment maximal data length, > > configures "split point" */ > > uint16_t offset; /* data offset from beginning > > of mbuf data buffer */ > > uint32_t proto_hdr; /* inner/outer L2/L3/L4 protocol header, > >configures "split point" */ > > There is a big problem here that using RTE_PTYPE_* defines I can't request > split > after either TCP or UDP header. Sorry, for some reason I missed your reply. Current RTE_PTYPE_* list all the tunnel and L2/L3/L4 protocol headers (both outer and inner). Do you mean that we should support higher layer protocols after L4? I think tunnel and L2/L3/L4 protocol headers are enough. In DPDK, we don't parse higher level protocols after L4. And the higher layer protocols are richer, we can't list all of them. What do you think? > > > }; > > > > If both inner and outer L2/L3/L4 level protocol header split can be > > supported by a PMD. Corresponding protocol header capability is > > RTE_PTYPE_L2_ETHER, RTE_PTYPE_L3_IPV4, RTE_PTYPE_L3_IPV6, > > RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_SCTP, > > RTE_PTYPE_INNER_L2_ETHER, RTE_PTYPE_INNER_L3_IPV4, > > RTE_PTYPE_INNER_L3_IPV6, RTE_PTYPE_INNER_L4_TCP, > RTE_PTYPE_INNER_L4_UDP, RTE_PTYPE_INNER_L4_SCTP. > > I think there is no point to list above defines here if it is not the only > supported > defines. Yes, since we use a API to return the protocol header driver supported to split, there is no need to list the incomplete RTE_PTYPE* here. Please see next version. > > > > > For example, let's suppose we configured the Rx queue with the > > following segments: > > seg0 - pool0, proto_hdr0=RTE_PTYPE_L3_IPV4, off0=2B > > seg1 - pool1, proto_hdr1=RTE_PTYPE_L4_UDP, off1=128B > > seg2 - pool2, off1=0B > > > > The packet consists of MAC_IPV4_UDP_PAYLOAD will be split like > > following: > > seg0 - ipv4 header @ RTE_PKTMBUF_HEADROOM + 2 in mbuf from pool0 > > seg1 - udp header @ 128 in mbuf from pool1 > > seg2 - payload @ 0 in mbuf from pool2 > > Sorry, but I still see no definition what should happen with, for example, ARP > packet with above config. Thanks, because the following reply was not answered in v8, the definition has not been added in v9 yet. " Our NIC only supports to split the packets into two segments, so there will be an exact match for the only one protocol header configured. Back to this question, for the set of proto_hdrs configured, it can have two behaviors: 1. The aggressive way is to split on longest match you mentioned, E.g. we configure split on ETH-IPV4-TCP, when receives ETH-IPV4-UDP or ETH-IPV6, it can also split on ETH-IPV4 or ETH. 2. A more conservative way is to split only when the packets meet the all protocol headers in the Rx packet segment. In the above situation, it will not do split for ETH-IPV4-UDP and ETH-IPV6. I prefer the second behavior, because the split is usually for the inner most header and payload, if it does not meet, the rest of the headers have no actual value. " Hope to get your insights. And we will update the doc to defi
[PATCH] app/crypto-perf: remove redundant function return
Remove redundant function return value. The function is used in datapath and the return value is not checked in any of the existing callers. Signed-off-by: Anoob Joseph --- app/test-crypto-perf/cperf_ops.c | 36 +--- app/test-crypto-perf/cperf_ops.h | 2 +- 2 files changed, 11 insertions(+), 27 deletions(-) diff --git a/app/test-crypto-perf/cperf_ops.c b/app/test-crypto-perf/cperf_ops.c index 0417af2d5e..d746d51082 100644 --- a/app/test-crypto-perf/cperf_ops.c +++ b/app/test-crypto-perf/cperf_ops.c @@ -9,7 +9,7 @@ #include "cperf_ops.h" #include "cperf_test_vectors.h" -static int +static void cperf_set_ops_asym(struct rte_crypto_op **ops, uint32_t src_buf_offset __rte_unused, uint32_t dst_buf_offset __rte_unused, uint16_t nb_ops, @@ -33,7 +33,6 @@ cperf_set_ops_asym(struct rte_crypto_op **ops, asym_op->modex.result.length = options->modex_data->result.len; rte_crypto_op_attach_asym_session(ops[i], asym_sess); } - return 0; } #ifdef RTE_LIB_SECURITY @@ -52,7 +51,7 @@ test_ipsec_vec_populate(struct rte_mbuf *m, const struct cperf_options *options, } } -static int +static void cperf_set_ops_security(struct rte_crypto_op **ops, uint32_t src_buf_offset __rte_unused, uint32_t dst_buf_offset __rte_unused, @@ -120,11 +119,9 @@ cperf_set_ops_security(struct rte_crypto_op **ops, RTE_SET_USED(tsc_start); RTE_SET_USED(test_vector); - - return 0; } -static int +static void cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, uint32_t src_buf_offset __rte_unused, uint32_t dst_buf_offset __rte_unused, @@ -166,7 +163,7 @@ cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, } if (options->test_file != NULL) - return 0; + return; tsc_start_temp = rte_rdtsc_precise(); @@ -179,13 +176,11 @@ cperf_set_ops_security_ipsec(struct rte_crypto_op **ops, tsc_end_temp = rte_rdtsc_precise(); *tsc_start += tsc_end_temp - tsc_start_temp; - - return 0; } #endif -static int +static void cperf_set_ops_null_cipher(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, @@ -221,11 +216,9 @@ cperf_set_ops_null_cipher(struct rte_crypto_op **ops, sym_op->cipher.data.length = options->test_buffer_size; sym_op->cipher.data.offset = 0; } - - return 0; } -static int +static void cperf_set_ops_null_auth(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, @@ -261,11 +254,9 @@ cperf_set_ops_null_auth(struct rte_crypto_op **ops, sym_op->auth.data.length = options->test_buffer_size; sym_op->auth.data.offset = 0; } - - return 0; } -static int +static void cperf_set_ops_cipher(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, @@ -318,11 +309,9 @@ cperf_set_ops_cipher(struct rte_crypto_op **ops, } } - - return 0; } -static int +static void cperf_set_ops_auth(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, @@ -420,10 +409,9 @@ cperf_set_ops_auth(struct rte_crypto_op **ops, } } } - return 0; } -static int +static void cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, @@ -539,11 +527,9 @@ cperf_set_ops_cipher_auth(struct rte_crypto_op **ops, } } - - return 0; } -static int +static void cperf_set_ops_aead(struct rte_crypto_op **ops, uint32_t src_buf_offset, uint32_t dst_buf_offset, uint16_t nb_ops, struct rte_cryptodev_sym_session *sess, @@ -652,8 +638,6 @@ cperf_set_ops_aead(struct rte_crypto_op **ops, } } } - - return 0; } static struct rte_cryptodev_sym_session * diff --git a/app/test-crypto-perf/cperf_ops.h b/app/test-crypto-perf/cperf_ops.h index 30d38f90e3..1d2fbb4e30 100644 --- a/app/test-crypto-perf/cperf_ops.h +++ b/app/test-crypto-perf/cperf_ops.h @@ -18,7 +18,7 @@ typedef struct rte_cryptodev_sym_session *(*cperf_sessions_create_t)( const struct cperf_test_vector *test_vector, uint16_t iv_offset); -typedef int (*cperf_populate_ops_t)(struct rte_crypto_op **ops, +typedef