RE: [PATCH v3] ethdev: add flow rule actions update API

2023-05-23 Thread Ori Kam
Hi 

> -Original Message-
> From: Ferruh Yigit 
> Sent: Monday, May 22, 2023 1:28 PM
> 
> On 5/18/2023 10:48 PM, Alexander Kozyrev wrote:
> > Introduce the new rte_flow_update() API allowing users
> > to update the action list in the already existing rule.
> 
> If the API is only to update actions, does make sense to rename it to
> explicitly state this, like:
> `rte_flow_action_update()`
> 
> Same for async version of the API.
> 

I'm O.K with the suggested name.
Maybe just change action to actions?

Best,
Ori

> > Flow rules can be updated now without the need to destroy
> > the rule first and create a new one instead.
> > A single API call ensures that no packets are lost by
> > guaranteeing atomicity and flow state correctness.
> > The rte_flow_async_update() is added as well.
> > The matcher is not updated, only the action list is.
> >
> > Signed-off-by: Alexander Kozyrev 
> 
> <...>


Minutes of Technical Board Meeting, 2023-April-19

2023-05-23 Thread Maxime Coquelin

Minutes of Technical Board Meeting, 2023-April-19

Members Attending
-
-Aaron
-Bruce
-Hemant
-Honnappa
-Jerin
-Kevin
-Konstantin
-Maxime (Chair)
-Stephen
-Thomas


NOTE: The technical board meetings every second Wednesday at 
https://meet.jit.si/DPDK at 3 pm UTC.

Meetings are public, and DPDK community members are welcome to attend.

NOTE: Next meeting will be on Wednesday 2023-May-3 @3pm UTC, and will be 
chaired by Stephen.


1) Welcoming Dave Young, our new technical writer
- Dave will start his new role on June 12th.
- He lives in Georgia (US) and has a Master degree in Professional
writing.

2) Direct-rearm/Buffer-recycle patch set discussions
- Honnappa and Ferruh put together a slide deck presenting the feature
- Goal of the series is to provide a new mechanism to free the buffers
directly into the Tx software ring, an so avoid free/alloc from the
lcore cache.
- Gains up to 17% with l3fwd on Ampere Altra
- No major objections from the Technical board, but suggestion to
provide a new forward mode to testpmd to exercise the new API, and also
to add a namespace to the API names to highlight they belong to the same
feature.

3) Technical board updates for the Governing board meeting
- Kevin asked if Techboard members had updates to share at theGoverning 
board meeting.

- He will send a list of updates to the Techboard mailing list.

4) Marketing group updates
- Thomas invites the Techboard to join the Marketing group meetings.



[RFC] net/iavf: handle iavf reset gracefully

2023-05-23 Thread Shiyang He
Originally, when vf received PF-to-VF reset event, the iavf PMD did not
perform special actions, resulting in vf being offline and unavailable.
This commit handle the PF-to-VF reset event by performing all necessary
actions to bring the vf back online and available.

Signed-off-by: Shiyang He 
---
 drivers/net/iavf/iavf.h|  29 +++
 drivers/net/iavf/iavf_ethdev.c | 144 +
 drivers/net/iavf/iavf_rxtx.c   |  25 ++
 drivers/net/iavf/iavf_rxtx.h   |   1 +
 drivers/net/iavf/iavf_vchnl.c  |  12 +++
 5 files changed, 211 insertions(+)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index aa18650ffa..79030944f7 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -95,6 +95,24 @@
 
 #define IAVF_L2TPV2_FLAGS_LEN  0x4000
 
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (   \
+{  \
+   const typeof(d) __d = d;\
+   (((n) + (__d) - 1) / (__d));\
+}  \
+)
+#endif
+#ifndef DELAY
+#define DELAY(x) rte_delay_us(x)
+#endif
+#ifndef msleep
+#define msleep(x) DELAY(1000 * (x))
+#endif
+#ifndef usleep_range
+#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000))
+#endif
+
 struct iavf_adapter;
 struct iavf_rx_queue;
 struct iavf_tx_queue;
@@ -279,6 +297,8 @@ struct iavf_info {
 
uint32_t ptp_caps;
rte_spinlock_t phc_time_aq_lock;
+
+   bool auto_reset_enabled;
 };
 
 #define IAVF_MAX_PKT_TYPE 1024
@@ -423,6 +443,14 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 
return !ret;
 }
+
+static inline bool
+iavf_is_reset(struct iavf_hw *hw)
+{
+   return !(IAVF_READ_REG(hw, IAVF_VF_ARQLEN1) &
+IAVF_VF_ARQLEN1_ARQENABLE_MASK);
+}
+
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
 void iavf_dev_event_handler_fini(void);
@@ -498,4 +526,5 @@ int iavf_flow_unsub(struct iavf_adapter *adapter,
struct iavf_fsub_conf *filter);
 int iavf_flow_sub_check(struct iavf_adapter *adapter,
struct iavf_fsub_conf *filter);
+int iavf_handle_hw_reset(struct rte_eth_dev *dev);
 #endif /* _IAVF_ETHDEV_H_ */
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index f6d68403ce..f421febcac 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -337,6 +337,12 @@ iavf_dev_watchdog_disable(struct iavf_adapter *adapter 
__rte_unused)
 #endif
 }
 
+static void
+iavf_dev_auto_reset_enable(struct iavf_adapter *adapter)
+{
+   adapter->vf.auto_reset_enabled = true;
+}
+
 static int
 iavf_set_mc_addr_list(struct rte_eth_dev *dev,
struct rte_ether_addr *mc_addrs,
@@ -2687,6 +2693,8 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
iavf_dev_watchdog_enable(adapter);
adapter->closed = false;
 
+   iavf_dev_auto_reset_enable(adapter);
+
return 0;
 
 flow_init_err:
@@ -2814,6 +2822,142 @@ iavf_dev_reset(struct rte_eth_dev *dev)
return iavf_dev_init(dev);
 }
 
+static bool
+iavf_is_reset_detected(struct iavf_adapter *adapter)
+{
+   struct iavf_hw *hw = &adapter->hw;
+   int i;
+
+   /* poll until we see the reset actually happen */
+   for (i = 0; i < IAVF_RESET_WAIT_CNT; i++) {
+   if (iavf_is_reset(hw))
+   return true;
+   usleep_range(5000, 1);
+   }
+
+   return false;
+}
+
+static int
+iavf_uninit_hw(struct rte_eth_dev *dev, struct iavf_hw *hw)
+{
+   struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+   struct rte_intr_handle *intr_handle = dev->intr_handle;
+   struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+   struct iavf_adapter *adapter = dev->data->dev_private;
+
+   iavf_reset_queues(dev);
+
+   /* Disable the interrupt for Rx */
+   rte_intr_efd_disable(intr_handle);
+   /* Rx interrupt vector mapping free */
+   rte_intr_vec_list_free(intr_handle);
+
+   adapter->stopped = 1;
+   dev->data->dev_started = 0;
+
+   adapter->closed = true;
+
+   /* free iAVF security device context all related resources */
+   iavf_security_ctx_destroy(adapter);
+
+   iavf_flow_flush(dev, NULL);
+   iavf_flow_uninit(adapter);
+
+   if (vf->promisc_unicast_enabled || vf->promisc_multicast_enabled)
+   iavf_config_promisc(adapter, false, false);
+
+   iavf_shutdown_adminq(hw);
+   if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
+   /* disable uio intr before callback unregister */
+   rte_intr_disable(intr_handle);
+
+   /* unregister callback func from eal lib */
+   rte_intr_callback_unregister(intr_handle,
+iavf_dev_interrupt_handler, dev);
+   }
+   

Re: [RFC v2 1/2] dts: add smoke tests

2023-05-23 Thread Juraj Linkeš
Hi Jeremy, first, a few general points:

1. Send patches to maintainers (Thomas, me, Honnappa, Lijuan and
anyone else involved with DTS or who might be interested) and add the
devlist to cc.
2. Run the linter script before submitting.
3. The use of the various nested objects breaks the current
abstractions. The basic idea is that the test suite developers should
ideally only use the sut/tg node objects and those objects should
delegate logic further to their nested objects. More below.

I have many comments about the implementation, but I haven't run it
yet. I'm going to do that after this round of comments and I may have
more ideas.

On Fri, May 12, 2023 at 9:28 PM  wrote:
>
> From: Jeremy Spewock 
>
> Adds a new test suite for running smoke tests that verify general
> configuration aspects of the system under test. If any of these tests
> fail, the DTS execution terminates as part of a "fail-fast" model.
>
> Signed-off-by: Jeremy Spewock 
> ---
>  dts/conf.yaml |  9 ++
>  dts/framework/config/__init__.py  | 21 +
>  dts/framework/config/conf_yaml_schema.json| 32 ++-
>  dts/framework/dts.py  | 19 +++-
>  dts/framework/exception.py| 11 +++
>  dts/framework/remote_session/os_session.py|  6 +-
>  .../remote_session/remote/__init__.py | 28 ++
>  dts/framework/test_result.py  | 13 ++-
>  dts/framework/test_suite.py   | 24 -
>  dts/framework/testbed_model/__init__.py   |  5 +
>  .../interactive_apps/__init__.py  |  6 ++
>  .../interactive_apps/interactive_command.py   | 57 +++
>  .../interactive_apps/testpmd_driver.py| 24 +
>  dts/framework/testbed_model/node.py   |  2 +
>  dts/framework/testbed_model/sut_node.py   |  6 ++
>  dts/tests/TestSuite_smoke_tests.py| 94 +++
>  16 files changed, 348 insertions(+), 9 deletions(-)
>  create mode 100644 dts/framework/testbed_model/interactive_apps/__init__.py
>  create mode 100644 
> dts/framework/testbed_model/interactive_apps/interactive_command.py
>  create mode 100644 
> dts/framework/testbed_model/interactive_apps/testpmd_driver.py

Let's not add any more levels. I don't like even the current hw
subdirectory (which I want to remove in the docs patch) and we don't
need it. I'd also like to move this functionality into remote_session,
as it's handling a type of remote session.

>  create mode 100644 dts/tests/TestSuite_smoke_tests.py
>
> diff --git a/dts/conf.yaml b/dts/conf.yaml
> index a9bd8a3e..042ef954 100644
> --- a/dts/conf.yaml
> +++ b/dts/conf.yaml
> @@ -10,13 +10,22 @@ executions:
>  compiler_wrapper: ccache
>  perf: false
>  func: true
> +nics: #physical devices to be used for testing
> +  - addresses:
> +  - ":11:00.0"
> +  - ":11:00.1"
> +driver: "i40e"
> +vdevs: #names of virtual devices to be used for testing
> +  - "crypto_openssl"

I believe we specified the NICs under SUTs in the original DTS, just
as Owen did in his internal GitLab patch. If you can access it, have a
look at how he did it.
This brings an interesting question of where we want to specify which
NICs/vdevs to test. It could be on the SUT level, but also on the
execution or even the build target level. This should be informed by
testing needs. What makes the most sense? We could specify NIC details
per SUT/TG and then just reference the NICs on the execution/build
target level.

>  test_suites:
> +  - smoke_tests
>- hello_world
>  system_under_test: "SUT 1"
>  nodes:
>- name: "SUT 1"
>  hostname: sut1.change.me.localhost
>  user: root
> +password: ""

This was deliberately left out to discourage the use of passwords.

>  arch: x86_64
>  os: linux
>  lcores: ""



> diff --git a/dts/framework/dts.py b/dts/framework/dts.py
> index 05022845..0d03e158 100644
> --- a/dts/framework/dts.py
> +++ b/dts/framework/dts.py
> @@ -5,6 +5,8 @@
>
>  import sys
>
> +from .exception import BlockingTestSuiteError
> +
>  from .config import CONFIGURATION, BuildTargetConfiguration, 
> ExecutionConfiguration
>  from .logger import DTSLOG, getLogger
>  from .test_result import BuildTargetResult, DTSResult, ExecutionResult, 
> Result
> @@ -49,6 +51,7 @@ def run_all() -> None:
>  nodes[sut_node.name] = sut_node
>
>  if sut_node:
> +#SMOKE TEST EXECUTION GOES HERE!
>  _run_execution(sut_node, execution, result)
>
>  except Exception as e:
> @@ -118,7 +121,7 @@ def _run_build_target(
>
>  try:
>  sut_node.set_up_build_target(build_target)
> -result.dpdk_version = sut_node.dpdk_version
> +# result.dpdk_version = sut_node.dpdk_version
>  build_target_result.update_setup(Result.PASS)
>  except Exception as e:
>  dts_logger.exception("Build target setup failed.")
> @@ -146

RE: [EXT] Re: [PATCH v2] lib/cryptodev: fix assertion to remove GCC compilation warning

2023-05-23 Thread Akhil Goyal
> On Mon, 22 May 2023 15:04:52 -0400
> Kamil Godzwon  wrote:
> 
> > /home/vagrant/dpdk/build/include/rte_crypto_sym.h:1009:4: \
> > warning: Value stored to 'left' is never read [deadcode.DeadStores]
> >   left = 0;
> >   ^  ~
> >   1 warning generated.
> >
> > Compilator sees that the variable 'left' is never read after
> > assignment a '0' value. To get rid of this warning message, use 'if'
> > condition to verify the 'left' value before RTE_ASSERT.
> >
> > Signed-off-by: Kamil Godzwon 
> > ---
> > v2:
> > Changed commit message as the line was too long
> > Removed braces
> > ---
> >  lib/cryptodev/rte_crypto_sym.h | 4 +++-
> >  1 file changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/lib/cryptodev/rte_crypto_sym.h b/lib/cryptodev/rte_crypto_sym.h
> > index b43174dbec..dcef1a5049 100644
> > --- a/lib/cryptodev/rte_crypto_sym.h
> > +++ b/lib/cryptodev/rte_crypto_sym.h
> > @@ -1016,7 +1016,9 @@ rte_crypto_mbuf_to_vec(const struct rte_mbuf
> *mb, uint32_t ofs, uint32_t len,
> > left -= seglen;
> > }
> >
> > -   RTE_ASSERT(left == 0);
> > +   if (left != 0)
> > +   RTE_ASSERT(false);
> > +
> > return i;
> >  }
> >
> 
> This could happen if the passed in length to this routine was larger than
> the amount of data in the mbuf. Should the function check and return an error?
> 
> Panic should only be reserved for seriously corrupted input (like invalid 
> mbuf).
> 
> Also, this is a big enough function that it really should not be inlined.

This is a datapath API. RTE_ASSERT is normally not enabled in release build.
So, this assert is not doing any check for normal scenario.
We normally avoid these type of error checks in the datapath.
And while building in debug mode, we need these asserts to give a backtrace also
To debug the rootcause of the issue.

I would suggest fixing the assert itself instead of adding a check.
Current patch will affect performance.

Agreed, that the function is big for being an inline function,
but that is what all the datapath APIs are and
we keep them inline to improve the performance.




RE: [PATCH v6 01/15] graph: rename rte_graph_work as common

2023-05-23 Thread Yan, Zhirun


> -Original Message-
> From: Jerin Jacob 
> Sent: Monday, May 22, 2023 4:26 PM
> To: Yan, Zhirun 
> Cc: dev@dpdk.org; jer...@marvell.com; kirankum...@marvell.com;
> ndabilpu...@marvell.com; step...@networkplumber.org;
> pbhagavat...@marvell.com; Liang, Cunming ; Wang,
> Haiyue 
> Subject: Re: [PATCH v6 01/15] graph: rename rte_graph_work as common
> 
> On Tue, May 9, 2023 at 11:34 AM Zhirun Yan  wrote:
> >
> > Rename rte_graph_work.h to rte_graph_work_common.h for supporting
> > multiple graph worker model.
> 
> 
> I have requested to check the performance with dpdk-test and l3fwd graph in
> last series.
> Have you checked the performance? In my testing, there is regression.
> Please check the performance with dpdk-test and l3fwd graph, there should not
> be any regression in RTC mode.
> 
> There is around -300% regression arm64 and x86.
> Command to mesure:
> ./build/app/test/dpdk-test -c 0xf0 -- graph_perf_autotest
> 
> There is around ~-2% regression in l3fwd-graph. I dont think, there should 
> not be
> any reason for regression as it is model are separate header file.
> Please check the common header file in fastpath and fix the regression to 
> accept
> this series.
> 
> ./build/examples/dpdk-l3fwd-graph -a 0002:02:00.0 -c 0xc0  -- -p
> 0x1 --config="(0, 0, 23)" -P (edited)
> Old
> +---+---+---+---+--
> -+---+---+
> |Node   |calls  |objs
> |realloc_count  |objs/call  |objs/sec(10E6) |cycles/call|
> +---+---+---+---+--
> -+---+---+
> |ip4_lookup |7282757|1864385584 |1
>  |256.000|38.704896  |1770.  |
> |ip4_rewrite|7282758|1864385840 |1
>  |256.000|38.704896  |1431.  |
> |ethdev_tx-0|7282758|1864385840 |1
>  |256.000|38.704896  |922.   |
> |ethdev_rx-0-0  |14882133   |1864386096 |2
>  |256.000|38.704896  |2015.  |
> |pkt_cls|7282760|1864386352 |1
>  |256.000|38.704896  |392.   |
> +---+---+---+---+--
> -+---+---+
> 
> 
> New
> +---+---+---+---+--
> -+---+---+
> |Node   |calls  |objs
> |realloc_count  |objs/call  |objs/sec(10E6) |cycles/call|
> +---+---+---+---+--
> -+---+---+
> |ip4_lookup |3002135|768546560  |2
>  |256.000|38.402048  |1770.  |
> |ip4_rewrite|3002136|768546816  |1
>  |256.000|38.402048  |1425.  |
> |ethdev_tx-0|3002137|768547072  |2
>  |256.000|38.402048  |949.   |
> |ethdev_rx-0-0  |3002138|768547328  |2
>  |256.000|38.402048  |1966.  |
> |pkt_cls|3002138|768547328  |1
>  |256.000|38.402048  |408.   |
> +---+---+---+---+--
> -+---+---+
> 
> NAK for this series till the performance issues fixed.
> 

The root cause is come from V5->V6, change rte_rdtsc() to rte_rdtsc_precise() 
in node process in patch 03.

rte_rdtsc_precise() is heavy than rte_rdtsc(). And the graph walk will call 
__rte_node_process() for each node.

I will revert this change.


> 
> 
> >
> > Signed-off-by: Haiyue Wang 
> > Signed-off-by: Cunming Liang 
> > Signed-off-by: Zhirun Yan 
> > ---
> 
> > diff --git a/MAINTAINERS b/MAINTAINERS index 8df23e5099..cc11328242
> > 100644
> > --- a/MAINTAINERS
> > +++ b/MAINTAINERS
> > @@ -1714,6 +1714,7 @@ F: doc/guides/prog_guide/bpf_lib.rst  Graph -
> > EXPERIMENTAL
> >  M: Jerin Jacob 
> >  M: Kiran Kumar K 
> > +M: Zhirun Yan 
> 
> Thanks for adding as maintainer.
> Since you are at this change. Could you move up "Nithin Dabilpuram
> " two lines below and group all together?


Yes, I will do in next version.
> 
> >  F: lib/graph/
> >  F: doc/guides/prog_guide/graph_lib.rst
> >  F: app/test/test_graph*


Re: [RFC v2 1/2] dts: add smoke tests

2023-05-23 Thread Juraj Linkeš
On Tue, May 23, 2023 at 10:05 AM Juraj Linkeš
 wrote:
>
> Hi Jeremy, first, a few general points:
>

One more general point - don't forget to add licenses to all new files
and possibly update licenses in other affected files.

Juraj


[PATCH v1] common/qat: fix qat_dev_cmd_param corruption

2023-05-23 Thread Vikash Poddar
Adding fix to address the memory corruption issue for
qat_dev_cmd_param structure on QAT GEN3.

This fix aligns the storage where it stores the value
on 4 byte unsigned integer data type after
reading slice configuration of QAT capabilities.

Fixes: b3cbbcdffa4f ("common/qat: read HW slice configuration")
Cc: arkadiuszx.kusz...@intel.com

Signed-off-by: Vikash Poddar 
---
 drivers/common/qat/dev/qat_dev_gen1.c | 2 +-
 drivers/common/qat/dev/qat_dev_gen2.c | 2 +-
 drivers/common/qat/dev/qat_dev_gen3.c | 2 +-
 drivers/common/qat/dev/qat_dev_gen4.c | 2 +-
 drivers/common/qat/qat_device.c   | 2 +-
 drivers/common/qat/qat_device.h   | 2 +-
 6 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/common/qat/dev/qat_dev_gen1.c 
b/drivers/common/qat/dev/qat_dev_gen1.c
index cf480dcba8..dd2e878e90 100644
--- a/drivers/common/qat/dev/qat_dev_gen1.c
+++ b/drivers/common/qat/dev/qat_dev_gen1.c
@@ -242,7 +242,7 @@ qat_dev_get_extra_size_gen1(void)
 }
 
 static int
-qat_get_dev_slice_map_gen1(uint16_t *map __rte_unused,
+qat_get_dev_slice_map_gen1(uint32_t *map __rte_unused,
const struct rte_pci_device *pci_dev __rte_unused)
 {
return 0;
diff --git a/drivers/common/qat/dev/qat_dev_gen2.c 
b/drivers/common/qat/dev/qat_dev_gen2.c
index f51be46eb0..061dfdb698 100644
--- a/drivers/common/qat/dev/qat_dev_gen2.c
+++ b/drivers/common/qat/dev/qat_dev_gen2.c
@@ -22,7 +22,7 @@ static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen2 = {
 };
 
 static int
-qat_dev_get_slice_map_gen2(uint16_t *map __rte_unused,
+qat_dev_get_slice_map_gen2(uint32_t *map __rte_unused,
const struct rte_pci_device *pci_dev __rte_unused)
 {
return 0;
diff --git a/drivers/common/qat/dev/qat_dev_gen3.c 
b/drivers/common/qat/dev/qat_dev_gen3.c
index e4197f3c0f..f01b98ff86 100644
--- a/drivers/common/qat/dev/qat_dev_gen3.c
+++ b/drivers/common/qat/dev/qat_dev_gen3.c
@@ -68,7 +68,7 @@ static struct qat_qp_hw_spec_funcs qat_qp_hw_spec_gen3 = {
 };
 
 static int
-qat_dev_get_slice_map_gen3(uint16_t *map,
+qat_dev_get_slice_map_gen3(uint32_t *map,
const struct rte_pci_device *pci_dev)
 {
if (rte_pci_read_config(pci_dev, map,
diff --git a/drivers/common/qat/dev/qat_dev_gen4.c 
b/drivers/common/qat/dev/qat_dev_gen4.c
index 1b3a5deabf..1ce262f715 100644
--- a/drivers/common/qat/dev/qat_dev_gen4.c
+++ b/drivers/common/qat/dev/qat_dev_gen4.c
@@ -283,7 +283,7 @@ qat_dev_get_misc_bar_gen4(struct rte_mem_resource 
**mem_resource,
 }
 
 static int
-qat_dev_get_slice_map_gen4(uint16_t *map __rte_unused,
+qat_dev_get_slice_map_gen4(uint32_t *map __rte_unused,
const struct rte_pci_device *pci_dev __rte_unused)
 {
return 0;
diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c
index 8bce2ac073..ed75b66041 100644
--- a/drivers/common/qat/qat_device.c
+++ b/drivers/common/qat/qat_device.c
@@ -361,7 +361,7 @@ static int qat_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
 {
int sym_ret = 0, asym_ret = 0, comp_ret = 0;
int num_pmds_created = 0;
-   uint16_t capa = 0;
+   uint32_t capa = 0;
struct qat_pci_device *qat_pci_dev;
struct qat_dev_hw_spec_funcs *ops_hw;
struct qat_dev_cmd_param qat_dev_cmd_param[] = {
diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h
index bc3da04238..4a79cdded3 100644
--- a/drivers/common/qat/qat_device.h
+++ b/drivers/common/qat/qat_device.h
@@ -37,7 +37,7 @@ typedef int (*qat_dev_get_misc_bar_t)
 typedef int (*qat_dev_read_config_t)
(struct qat_pci_device *);
 typedef int (*qat_dev_get_extra_size_t)(void);
-typedef int (*qat_dev_get_slice_map_t)(uint16_t *map,
+typedef int (*qat_dev_get_slice_map_t)(uint32_t *map,
const struct rte_pci_device *pci_dev);
 
 struct qat_dev_hw_spec_funcs {
-- 
2.25.1

--
Intel Research and Development Ireland Limited
Registered in Ireland
Registered Office: Collinstown Industrial Park, Leixlip, County Kildare
Registered Number: 308263


This e-mail and any attachments may contain confidential material for the sole
use of the intended recipient(s). Any review or distribution by others is
strictly prohibited. If you are not the intended recipient, please contact the
sender and delete all copies.



[PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs

2023-05-23 Thread Ashwin Sekhar T K
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_npa.c   | 219 
 drivers/common/cnxk/roc_npa.h   |   4 +
 drivers/common/cnxk/version.map |   2 +
 3 files changed, 225 insertions(+)

diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 20637fbf65..e3c925ddd1 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, 
struct npa_aura_s *aura
return rc;
 }
 
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+   struct npa_aq_enq_req *aura_init_req;
+   struct npa_aq_enq_rsp *aura_init_rsp;
+   struct mbox *mbox;
+   int rc = -ENOSPC;
+
+   mbox = mbox_get(m_box);
+   aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+   if (aura_init_req == NULL)
+   goto exit;
+   aura_init_req->aura_id = aura_id;
+   aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+   aura_init_req->op = NPA_AQ_INSTOP_INIT;
+   mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+   rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+   if (rc < 0)
+   goto exit;
+
+   if (aura_init_rsp->hdr.rc == 0)
+   rc = 0;
+   else
+   rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
 static int
 npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 {
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, 
uint64_t aura_handle)
return rc;
 }
 
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+   struct npa_aq_enq_req *aura_req;
+   struct npa_aq_enq_rsp *aura_rsp;
+   struct ndc_sync_op *ndc_req;
+   struct mbox *mbox;
+   int rc = -ENOSPC;
+
+   /* Procedure for disabling an aura/pool */
+   plt_delay_us(10);
+
+   mbox = mbox_get(m_box);
+   aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+   if (aura_req == NULL)
+   goto exit;
+   aura_req->aura_id = aura_id;
+   aura_req->ctype = NPA_AQ_CTYPE_AURA;
+   aura_req->op = NPA_AQ_INSTOP_WRITE;
+   aura_req->aura.ena = 0;
+   aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+   rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+   if (rc < 0)
+   goto exit;
+
+   if (aura_rsp->hdr.rc != 0)
+   return NPA_ERR_AURA_POOL_FINI;
+
+   /* Sync NDC-NPA for LF */
+   ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+   if (ndc_req == NULL) {
+   rc = -ENOSPC;
+   goto exit;
+   }
+   ndc_req->npa_lf_sync = 1;
+   rc = mbox_process(mbox);
+   if (rc) {
+   plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+   rc = NPA_ERR_AURA_POOL_FINI;
+   goto exit;
+   }
+   rc = 0;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
 int
 roc_npa_pool_op_pc_reset(uint64_t aura_handle)
 {
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t 
block_size,
return rc;
 }
 
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+  struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+   int rc, aura_id;
+
+   /* Sanity check */
+   if (!lf || !aura || !aura_handle)
+   return NPA_ERR_PARAM;
+
+   roc_npa_dev_lock();
+   /* Get aura_id from resource bitmap */
+   aura_id = find_free_aura(lf, flags);
+   if (aura_id < 0) {
+   roc_npa_dev_unlock();
+   return NPA_ERR_AURA_ID_ALLOC;
+   }
+
+   /* Mark aura as reserved */
+   plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+   roc_npa_dev_unlock();
+   rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+   NPA_ERR_AURA_ID_ALLOC :
+   0;
+   if (rc)
+   goto exit;
+
+   /* Update aura fields */
+   aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+   aura->ena = 1;
+   aura->shift = plt_log2_u32(block_count);
+   aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+   aura->limit = block_count;
+   aura->pool_caching = 1;
+   aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+   aura->avg_con = 0;
+   /* Many to one reduction */
+   aura->err_qint_idx = aura_id % lf->qints;
+
+   /* Issue AURA_INIT and POOL_INIT op */
+   rc = npa_aura_init(lf->mbox, aura_

[PATCH v2 1/5] mempool/cnxk: use pool config to pass flags

2023-05-23 Thread Ashwin Sekhar T K
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/mempool/cnxk/cnxk_mempool.h | 24 
 drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++---
 drivers/net/cnxk/cnxk_ethdev_sec.c  | 25 ++---
 3 files changed, 40 insertions(+), 26 deletions(-)

diff --git a/drivers/mempool/cnxk/cnxk_mempool.h 
b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
 
 #include 
 
+enum cnxk_mempool_flags {
+   /* This flag is used to ensure that only aura zero is allocated.
+* If aura zero is not available, then mempool creation fails.
+*/
+   CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+   /* Here the pool create will use the npa_aura_s structure passed
+* as pool config to create the pool.
+*/
+   CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) 
\
+   (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m)
\
+   (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) 
\
+   do {   \
+   void *_c = CNXK_MEMPOOL_CONFIG(_m);\
+   uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f);   \
+   (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags);   \
+   } while (0)
+
 unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
 ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
   uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c 
b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, 
uint32_t obj_num,
 int
 cnxk_mempool_alloc(struct rte_mempool *mp)
 {
-   uint32_t block_count, flags = 0;
+   uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
 
-   /* Use driver specific mp->pool_config to override aura config */
-   if (mp->pool_config != NULL)
-   memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+   flags = CNXK_MEMPOOL_FLAGS(mp);
+   if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+   roc_flags = ROC_NPA_ZERO_AURA_F;
+   } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+   struct npa_aura_s *paura;
 
-   if (aura.ena && aura.pool_addr == 0)
-   flags = ROC_NPA_ZERO_AURA_F;
+   paura = CNXK_MEMPOOL_CONFIG(mp);
+   memcpy(&aura, paura, sizeof(struct npa_aura_s));
+   }
 
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
-&pool, flags);
+&pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c 
b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
  */
 
 #include 
+#include 
 
 #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
 
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
 {
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
-   struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
return -EINVAL;
}
 
-   plt_free(mp->pool_config);
rte_mempool_free(mp);
 
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
return -EIO;
}
 
-   /* Indicate to allocate zero aura */
-   aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
-   if (!aura) {
-   rc = -ENOMEM;
-   goto free_mp;
-   }
-   aura->ena = 1;
-   if (!mempool_name)
-   aura->pool_addr = 0;
-   else
-   a

[PATCH v2] crypto/qat: support to enable insecure algorithms

2023-05-23 Thread Vikash Poddar
All the insecure algorithms are default disable from
cryptodev Gen 1,2,3 and 4.
use qat_legacy_capa to enable all the legacy
algorithms.
These change effects both sym and asym insecure crypto
algorithms.

Signed-off-by: Vikash Poddar 
---
v2:
Extend the support to enable the insecure algorithm in
QAT Gen 1,3 and 4 for sym as well as asym.
---
 app/test/test_cryptodev_asym.c   |  28 +++--
 doc/guides/cryptodevs/qat.rst|  13 +++
 drivers/common/qat/qat_device.c  |   1 +
 drivers/common/qat/qat_device.h  |   3 +-
 drivers/crypto/qat/dev/qat_crypto_pmd_gen2.c |  88 ---
 drivers/crypto/qat/dev/qat_crypto_pmd_gen3.c | 113 +++
 drivers/crypto/qat/dev/qat_crypto_pmd_gen4.c |  64 ++-
 drivers/crypto/qat/dev/qat_sym_pmd_gen1.c|  90 ---
 drivers/crypto/qat/qat_asym.c|  16 ++-
 drivers/crypto/qat/qat_crypto.h  |   1 +
 drivers/crypto/qat/qat_sym.c |   3 +
 11 files changed, 255 insertions(+), 165 deletions(-)

diff --git a/app/test/test_cryptodev_asym.c b/app/test/test_cryptodev_asym.c
index 9236817650..bb32d81e57 100644
--- a/app/test/test_cryptodev_asym.c
+++ b/app/test/test_cryptodev_asym.c
@@ -453,11 +453,14 @@ test_cryptodev_asym_op(struct 
crypto_testsuite_params_asym *ts_params,
ret = rte_cryptodev_asym_session_create(dev_id, &xform_tc,
ts_params->session_mpool, &sess);
if (ret < 0) {
-   snprintf(test_msg, ASYM_TEST_MSG_LEN,
-   "line %u "
-   "FAILED: %s", __LINE__,
-   "Session creation failed");
status = (ret == -ENOTSUP) ? TEST_SKIPPED : TEST_FAILED;
+   if (status == TEST_SKIPPED)
+   snprintf(test_msg, ASYM_TEST_MSG_LEN, 
"SKIPPED");
+   else
+   snprintf(test_msg, ASYM_TEST_MSG_LEN,
+   "line %u "
+   "FAILED: %s", __LINE__,
+   "Session creation failed");
goto error_exit;
}
 
@@ -489,6 +492,11 @@ test_cryptodev_asym_op(struct crypto_testsuite_params_asym 
*ts_params,
}
 
if (test_cryptodev_asym_ver(op, &xform_tc, data_tc, result_op) != 
TEST_SUCCESS) {
+   if (result_op->status == RTE_CRYPTO_OP_STATUS_INVALID_ARGS) {
+   snprintf(test_msg, ASYM_TEST_MSG_LEN, "SESSIONLESS 
SKIPPED");
+   status = TEST_SKIPPED;
+   goto error_exit;
+   }
snprintf(test_msg, ASYM_TEST_MSG_LEN,
"line %u FAILED: %s",
__LINE__, "Verification failed ");
@@ -619,13 +627,19 @@ test_one_by_one(void)
/* Go through all test cases */
test_index = 0;
for (i = 0; i < test_vector.size; i++) {
-   if (test_one_case(test_vector.address[i], 0) != TEST_SUCCESS)
+   status = test_one_case(test_vector.address[i], 0);
+   if (status == TEST_SUCCESS || status == TEST_SKIPPED)
+   status = TEST_SUCCESS;
+   else
status = TEST_FAILED;
}
+
if (sessionless) {
for (i = 0; i < test_vector.size; i++) {
-   if (test_one_case(test_vector.address[i], 1)
-   != TEST_SUCCESS)
+   status = test_one_case(test_vector.address[i], 1);
+   if (status == TEST_SUCCESS || status == TEST_SKIPPED)
+   status = TEST_SUCCESS;
+   else
status = TEST_FAILED;
}
}
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index ef754106a8..f91a5e19e5 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -294,6 +294,19 @@ by comma. When the same parameter is used more than once 
first occurrence of the
 is used.
 Maximum threshold that can be set is 32.
 
+Running QAT PMD with insecure crypto algorithms
+~~~
+
+A few insecure crypto algorithms are deprecated from QAT drivers. This needs 
to be reflected in DPDK QAT PMD.
+DPDK QAT PMD has by default disabled all the insecure crypto algorithms from 
Gen 1,2,3 and 4.
+A PMD parameter is used to enable the capability.
+
+- qat_legacy_capa
+
+To use this feature the user must set the parameter on process start as a 
device additional parameter::
+
+  -a b1:01.2,qat_legacy_capa=1
+
 Running QAT PMD with Intel IPSEC MB library for symmetric precomputes function
 

[PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs

2023-05-23 Thread Ashwin Sekhar T K
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_npa.c   | 219 
 drivers/common/cnxk/roc_npa.h   |   4 +
 drivers/common/cnxk/version.map |   2 +
 3 files changed, 225 insertions(+)

diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 20637fbf65..e3c925ddd1 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, 
struct npa_aura_s *aura
return rc;
 }
 
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+   struct npa_aq_enq_req *aura_init_req;
+   struct npa_aq_enq_rsp *aura_init_rsp;
+   struct mbox *mbox;
+   int rc = -ENOSPC;
+
+   mbox = mbox_get(m_box);
+   aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+   if (aura_init_req == NULL)
+   goto exit;
+   aura_init_req->aura_id = aura_id;
+   aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+   aura_init_req->op = NPA_AQ_INSTOP_INIT;
+   mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+   rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+   if (rc < 0)
+   goto exit;
+
+   if (aura_init_rsp->hdr.rc == 0)
+   rc = 0;
+   else
+   rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
 static int
 npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 {
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, 
uint64_t aura_handle)
return rc;
 }
 
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+   struct npa_aq_enq_req *aura_req;
+   struct npa_aq_enq_rsp *aura_rsp;
+   struct ndc_sync_op *ndc_req;
+   struct mbox *mbox;
+   int rc = -ENOSPC;
+
+   /* Procedure for disabling an aura/pool */
+   plt_delay_us(10);
+
+   mbox = mbox_get(m_box);
+   aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+   if (aura_req == NULL)
+   goto exit;
+   aura_req->aura_id = aura_id;
+   aura_req->ctype = NPA_AQ_CTYPE_AURA;
+   aura_req->op = NPA_AQ_INSTOP_WRITE;
+   aura_req->aura.ena = 0;
+   aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+   rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+   if (rc < 0)
+   goto exit;
+
+   if (aura_rsp->hdr.rc != 0)
+   return NPA_ERR_AURA_POOL_FINI;
+
+   /* Sync NDC-NPA for LF */
+   ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+   if (ndc_req == NULL) {
+   rc = -ENOSPC;
+   goto exit;
+   }
+   ndc_req->npa_lf_sync = 1;
+   rc = mbox_process(mbox);
+   if (rc) {
+   plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+   rc = NPA_ERR_AURA_POOL_FINI;
+   goto exit;
+   }
+   rc = 0;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
 int
 roc_npa_pool_op_pc_reset(uint64_t aura_handle)
 {
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t 
block_size,
return rc;
 }
 
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+  struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+   int rc, aura_id;
+
+   /* Sanity check */
+   if (!lf || !aura || !aura_handle)
+   return NPA_ERR_PARAM;
+
+   roc_npa_dev_lock();
+   /* Get aura_id from resource bitmap */
+   aura_id = find_free_aura(lf, flags);
+   if (aura_id < 0) {
+   roc_npa_dev_unlock();
+   return NPA_ERR_AURA_ID_ALLOC;
+   }
+
+   /* Mark aura as reserved */
+   plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+   roc_npa_dev_unlock();
+   rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+   NPA_ERR_AURA_ID_ALLOC :
+   0;
+   if (rc)
+   goto exit;
+
+   /* Update aura fields */
+   aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+   aura->ena = 1;
+   aura->shift = plt_log2_u32(block_count);
+   aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+   aura->limit = block_count;
+   aura->pool_caching = 1;
+   aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+   aura->avg_con = 0;
+   /* Many to one reduction */
+   aura->err_qint_idx = aura_id % lf->qints;
+
+   /* Issue AURA_INIT and POOL_INIT op */
+   rc = npa_aura_init(lf->mbox, aura_

Re: [PATCH] ci: switch to Fedora 37

2023-05-23 Thread Thomas Monjalon
25/04/2023 15:13, David Marchand:
> Hello Aaron,
> 
> On Fri, Apr 21, 2023 at 11:06 PM Aaron Conole  wrote:
> > David Marchand  writes:
> > > Fedora 35 has been declared EOL in 2022/12 (see [1]).
> > > Fedora 36 will soon be EOL too.
> > >
> > > Move to Fedora 37.
> > > Fedora 37 libbpf does not support AF_XDP anymore, now provided by
> > > libxdp.
> > >
> > > 1: https://docs.fedoraproject.org/en-US/releases/eol/
> > >
> > > Signed-off-by: David Marchand 
> > > ---
> >
> > Acked-by: Aaron Conole 
> >
> > FYI, Fedora 38 also just got released.  Perhaps that can be a candidate
> > as well, but I didn't try it out.
> 
> At a first glance, gcc 13 raises some new warnings, at least for
> examples (ip-pipeline and ntb).
> We can switch to f38 once builds are fine with gcc 13.

Let's switch to Fedora 37 as a first step (we skipped Fedora 36).

Applied, thanks.




Re: [PATCH] test/mbuf: fix the forked process segment fault

2023-05-23 Thread Burakov, Anatoly

On 5/23/2023 4:45 AM, Ruifeng Wang wrote:

-Original Message-
From: Burakov, Anatoly 
Sent: Monday, May 22, 2023 6:19 PM
To: Ruifeng Wang ; olivier.m...@6wind.com
Cc: dev@dpdk.org; sta...@dpdk.org; tho...@monjalon.net; 
step...@networkplumber.org; Justin
He ; Honnappa Nagarahalli ; nd

Subject: Re: [PATCH] test/mbuf: fix the forked process segment fault

On 5/22/2023 10:55 AM, Ruifeng Wang wrote:

-Original Message-
From: Burakov, Anatoly 
Sent: Monday, May 22, 2023 5:24 PM
To: Ruifeng Wang ; olivier.m...@6wind.com
Cc: dev@dpdk.org; sta...@dpdk.org; tho...@monjalon.net;
step...@networkplumber.org; Justin He ; Honnappa
Nagarahalli ; nd 
Subject: Re: [PATCH] test/mbuf: fix the forked process segment fault

On 5/22/2023 7:01 AM, Ruifeng Wang wrote:

Access of any memory in the hugepage shared file-backed area will
trigger an unexpected forked child process segment fault. The root
cause is DPDK doesn't support fork model [1] (calling rte_eal_init() before 
fork()).
Forked child process can't be treated as a secondary process.

Hence fix it by avoiding fork and doing verification in the main process.

[1] https://mails.dpdk.org/archives/dev/2018-July/108106.html

Fixes: af75078fece3 ("first public release")
Cc: sta...@dpdk.org

Signed-off-by: Jia He 
Signed-off-by: Ruifeng Wang 
---


Would this be something that a secondary process-based test could test?
That's how we test rte_panic() and other calls.


This case validates mbuf. IMO there is no need to do validation in a secondary 
process.
Unit test for rte_panic() also uses fork() and could have the same issue.



In that case, rte_panic() test should be fixed as well.

My concern is that ideally, we shouldn't intentionally crash the test app if 
something
goes wrong, and calling rte_panic() accomplishes just that - which is why I 
suggested
running them in secondary processes instead, so that any call into rte_panic 
happens
inside a secondary process, and the main test process doesn't crash even if the 
test has
failed.


Agree that intentionally crashing the test app is bad.
In this patch, verification of mbuf is changed to use another API without 
rte_panic().
Then the verification can be done directly in the primary. And the indirectness 
of
using a secondary process is removed. Because verification will not crash the 
process.



Oh,

My apologies, I did not notice that. In that case,

Acked-by: Anatoly Burakov 

--
Thanks,
Anatoly



Re: [dpdk-dev] [PATCH v3] ring: fix use after free in ring release

2023-05-23 Thread Thomas Monjalon
05/05/2023 08:48, Yunjian Wang:
> After the memzone is freed, it is not removed from the 'rte_ring_tailq'.
> If rte_ring_lookup is called at this time, it will cause a use-after-free
> problem. This change prevents that from happening.
> 
> Fixes: 4e32101f9b01 ("ring: support freeing")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Yunjian Wang 
> Acked-by: Konstantin Ananyev 
> Reviewed-by: Honnappa Nagarahalli 

Applied, thanks.
That's a real pleasure to see reliability improved :)





Re: [v4] net/gve: check driver compatibility

2023-05-23 Thread Ferruh Yigit
On 5/22/2023 4:45 PM, Rushil Gupta wrote:
> 1. This is the excerpt from the google's virtual nic spec: 
> "In addition to the device-owned register file, vector table, and
> doorbells, the gVNIC device uses *DMA* (which in most cases amounts to
> ordinary memory access by host software since we're dealing with a
> virtual device, but guests must assume the device could be backed by
> actual hardware) to access physical memory. The following are all
> located in physical memory: Admin queue - 4096-byte command queue used
> for configuring gVNIC. 
> Some commands require an additional dma memory region to be passed to
> the device. These memory regions are allocated to execute the command
> and freed when the command completes."
> The calloc by default doesn't allow memory to be shared between the dpdk
> process and hypervisor (where virtual device lives); so that's the
> reason it doesn't work.
> 

Thanks Rushil for the info.
So, I expect gVNIC requires physical address to be passed in the admin
command, as 'driver_info_mem.iova'.

What confuses me is, latest version passes another virtual address
'driver_info' ('driver_info_mem->addr').

> 2. I also have a query: RHEL8 compilation in ci/Intel-compilation
> context fails due to; is this because of if `not is_linux`
> 
> meson.build:67:0: ERROR: Include dir lib/eal/linux/include does not exist.
> 

This error shouldn't be related with `not is_linux`, but I am not sure
about its root case, if it still exists in next version we can
communicate with CI team for details. For now I assume this is an
infrastructure issue.

> Passes:
> http://patchwork.dpdk.org/project/dpdk/patch/20230508191552.104540-1-rush...@google.com/
>  
> 
> 
> Fails:
> http://patchwork.dpdk.org/project/dpdk/patch/20230519204618.1507956-1-rush...@google.com/
>  
> 
> 
> 
> On Mon, May 22, 2023 at 1:52 AM Ferruh Yigit  > wrote:
> 
> On 5/19/2023 9:46 PM, Rushil Gupta wrote:
> > +static int
> > +gve_verify_driver_compatibility(struct gve_priv *priv)
> > +{
> > +     const struct rte_memzone *driver_info_mem;
> > +     struct gve_driver_info *driver_info;
> > +     int err;
> > +
> > +     driver_info_mem =
> rte_memzone_reserve_aligned("verify_driver_compatibility",
> > +                     sizeof(struct gve_driver_info),
> > +                     rte_socket_id(),
> > +                     RTE_MEMZONE_IOVA_CONTIG, PAGE_SIZE);
> > +
> > +     if (driver_info_mem == NULL) {
> > +             PMD_DRV_LOG(ERR,
> > +                         "Could not alloc memzone for driver
> compatibility");
> > +             return -ENOMEM;
> > +     }
> > +     driver_info = (struct gve_driver_info *)driver_info_mem->addr;
> > +
> > +     *driver_info = (struct gve_driver_info) {
> > +             .os_type = 5, /* DPDK */
> > +             .driver_major = GVE_VERSION_MAJOR,
> > +             .driver_minor = GVE_VERSION_MINOR,
> > +             .driver_sub = GVE_VERSION_SUB,
> > +             .os_version_major = cpu_to_be32(DPDK_VERSION_MAJOR),
> > +             .os_version_minor = cpu_to_be32(DPDK_VERSION_MINOR),
> > +             .os_version_sub = cpu_to_be32(DPDK_VERSION_SUB),
> > +             .driver_capability_flags = {
> > +                     cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS1),
> > +                     cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS2),
> > +                     cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS3),
> > +                     cpu_to_be64(GVE_DRIVER_CAPABILITY_FLAGS4),
> > +             },
> > +     };
> > +
> > +     populate_driver_version_strings((char
> *)driver_info->os_version_str1,
> > +                     (char *)driver_info->os_version_str2);
> > +
> > +     err = gve_adminq_verify_driver_compatibility(priv,
> > +             sizeof(struct gve_driver_info),
> (dma_addr_t)driver_info);
> 
> Back to previous discussion, other commands pass physical address to the
> admin command, but this pass virtual address.
> To follow the same semantic, shouldn't above be 'driver_info_mem.iova'?
> 
> I asked before but not able to get an answer, what is the memory type
> requirement for device?
> Why virtual address obtained via 'calloc()' is not working, but virtual
> address from hugepages are working?
> 



Re: [v4] net/gve: check driver compatibility

2023-05-23 Thread Ferruh Yigit
On 5/19/2023 9:46 PM, Rushil Gupta wrote:
> diff --git a/drivers/net/gve/base/gve_osdep.h 
> b/drivers/net/gve/base/gve_osdep.h
> index abf3d379ae..5e8ae1eac6 100644
> --- a/drivers/net/gve/base/gve_osdep.h
> +++ b/drivers/net/gve/base/gve_osdep.h
> @@ -21,9 +21,14 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #include "../gve_logs.h"
>  
> +#ifdef __linux__
> +#include 
> +#endif
> +

Can you please use 'RTE_EXEC_ENV_LINUX' macro instead of '__linux__'?


Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread

2023-05-23 Thread Ferruh Yigit
On 5/23/2023 2:45 AM, Deng, KaiwenX wrote:
> 
> 
>> -Original Message-
>> From: Deng, KaiwenX
>> Sent: Friday, May 5, 2023 10:31 AM
>> To: Ferruh Yigit ; dev@dpdk.org
>> Cc: sta...@dpdk.org; Yang, Qiming ; Zhou, YidingX
>> ; Chas Williams ; Min Hu (Connor)
>> ; Wu, Jingjing ; Xing, Beilei
>> ; Mike Pattrick ; Zhang, Qi Z
>> ; Doherty, Declan ;
>> Mrzyglod, Daniel T ; Dapeng Yu
>> ; Zhang, Helin ;
>> Mcnamara, John ; Thomas Monjalon
>> 
>> Subject: RE: [PATCH v3] net/iavf: fix iavf query stats in intr thread
>>
>>
>>
>>> -Original Message-
>>> From: Ferruh Yigit 
>>> Sent: Monday, March 27, 2023 8:38 PM
>>> To: Deng, KaiwenX ; dev@dpdk.org
>>> Cc: sta...@dpdk.org; Yang, Qiming ; Zhou,
>>> YidingX ; Chas Williams ; Min
>>> Hu (Connor) ; Wu, Jingjing
>>> ; Xing, Beilei ; Mike
>>> Pattrick ; Zhang, Qi Z ;
>>> Doherty, Declan ; Mrzyglod, Daniel T
>>> ; Dapeng Yu ;
>>> Zhang, Helin ; Mcnamara, John
>>> ; Thomas Monjalon 
>>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr thread
>>>
>>> On 3/27/2023 1:31 PM, Ferruh Yigit wrote:
 On 3/27/2023 6:31 AM, Deng, KaiwenX wrote:
>
>
>> -Original Message-
>> From: Ferruh Yigit 
>> Sent: Thursday, March 23, 2023 11:39 PM
>> To: Deng, KaiwenX ; dev@dpdk.org
>> Cc: sta...@dpdk.org; Yang, Qiming ; Zhou,
>> YidingX ; Chas Williams ;
>> Min Hu (Connor) ; Wu, Jingjing
>> ; Xing, Beilei ;
>> Mike Pattrick ; Zhang, Qi Z
>> ; Doherty, Declan
>> ; Mrzyglod, Daniel T
>> ; Dapeng Yu 
>> Subject: Re: [PATCH v3] net/iavf: fix iavf query stats in intr
>> thread
>>
>> On 3/22/2023 7:26 AM, Kaiwen Deng wrote:
>>> When iavf send query-stats command in eal-intr-thread through
>>> virtual channel, there will be no response received from
>>> iavf_dev_virtchnl_handler for this command during block and wait.
>>> Because iavf_dev_virtchnl_handler is also registered in eal-intr-
>> thread.
>>>
>>> When vf device is bonded as BONDING_MODE_TLB mode, the slave
>>> device
>>> update callback will registered in alarm and called by
>>> eal-intr-thread, it would also raise the above issue.
>>>
>>> This commit add to poll the response for VIRTCHNL_OP_GET_STATS
>>> when
>> it
>>> is called by eal-intr-thread to fix this issue.
>>>
>>> Fixes: 91bf37d250aa ("net/iavf: add lock for VF commands")
>>> Fixes: 22b123a36d07 ("net/avf: initialize PMD")
>>> Fixes: 7c76a747e68c ("bond: add mode 5")
>>> Fixes: 435d523112cc ("net/iavf: fix multi-process shared data")
>>> Fixes: cb5c1b91f76f ("net/iavf: add thread for event callbacks")
>>
>>
>> Hi Kaiwen,
>>
>> Above commit already seems trying to address same issue, it
>> creates
>> "iavf- event-thread" control thread to asyncroniously handle the
>> interrupts, in non- interrupt context, why it is not working?
>>
>> Instead of adding 'rte_thread_is_intr()' checks, can't you make
>> sure all interrupts handled in control tread?
>>
>> And can you please provide a stack trace in commit log, to
>> describe the issue better?
> Hi Ferru,
> Sorry for my late reply, And thanks for your review.
>
> The above commit does not fix this issue when we need to get the
>>> returned data.
> If we call iavf_query_stats and wait for response statistics in the
> intr-
>>> thread.
> iavf_handle_virtchnl_msg is also registered in the intr_thread and
> will not be executed while waiting.
>

 Got it, since return value is required, API can't be called asyncroniously.



 I think 'rte_thread_is_intr()' checks may cause more trouble for you
 in long term,

 - why 'iavf_query_stats()' is called in the iterrupt thread, can it
 be prevented?

 - does it make sense to allways poll messages from PF (for simplification)?


 If answer to both are 'No', I am OK to continue with current
 proposal if you are happy with it.

>>>
>>>
>>> btw, how critical is this issue?
>>>
>>> If it is critical, I am OK to get it as it is for this release and
>>> investigate it further for next release, since only a few days left for this
>> release.
>>>
>> Hi Ferruh,
>>
>> I didn't find a more suitable solution after consideration, if you have a 
>> better
>> suggestion, please let me know, thanks.
> Hi Ferruh,
> Can you please take a look at this patch.
>

Hi Kaiwen,

Sorry for delay, lets continue with this solution.

I thought calling from "iavf-event-thread" can be an option but your
description clarifies why it is not an option, and I also don't have
better solution, so I think can continue as it is.


> Thanks.
>>>

> This commit I changed it to polling for replies to commands
> executed in
>>> the interrupt thread.
>
> main thread   
>  interrupt
>>> thread
>

Re: [PATCH 1/1] vfio: Make buildable with MUSL runtime

2023-05-23 Thread Thomas Monjalon
22/05/2023 11:27, Burakov, Anatoly:
> On 5/20/2023 7:07 PM, Philip Prindeville wrote:
> > From: Philip Prindeville 
> > 
> > pread64() and pwrite64() are declared in  in MUSL and
> > other (i.e. not glibc) C runtimes.
> > 
> > Signed-off-by: Philip Prindeville 
> Acked-by: Anatoly Burakov 

It was "buildable" already, and I don't understand how.
In any doubt, I've added Cc: sta...@dpdk.org for backports.

Applied, thanks.




Re: [PATCH v3] ethdev: add flow rule actions update API

2023-05-23 Thread Ferruh Yigit
On 5/23/2023 7:59 AM, Ori Kam wrote:
> Hi 
> 
>> -Original Message-
>> From: Ferruh Yigit 
>> Sent: Monday, May 22, 2023 1:28 PM
>>
>> On 5/18/2023 10:48 PM, Alexander Kozyrev wrote:
>>> Introduce the new rte_flow_update() API allowing users
>>> to update the action list in the already existing rule.
>>
>> If the API is only to update actions, does make sense to rename it to
>> explicitly state this, like:
>> `rte_flow_action_update()`
>>
>> Same for async version of the API.
>>
> 
> I'm O.K with the suggested name.
> Maybe just change action to actions?
> 

Both OK for me, existing APIs have mixed usage of 'action' vs 'actions',
is there a clear distinction when to use one or other?

> Best,
> Ori
> 
>>> Flow rules can be updated now without the need to destroy
>>> the rule first and create a new one instead.
>>> A single API call ensures that no packets are lost by
>>> guaranteeing atomicity and flow state correctness.
>>> The rte_flow_async_update() is added as well.
>>> The matcher is not updated, only the action list is.
>>>
>>> Signed-off-by: Alexander Kozyrev 
>>
>> <...>



RE: [PATCH v3 0/5] ethdev: modify field API for multiple headers

2023-05-23 Thread Ori Kam
Hi Michael,

> -Original Message-
> From: Michael Baum 
> Sent: Monday, May 22, 2023 10:28 PM
> To: dev@dpdk.org
> Cc: Ori Kam ; Aman Singh ;
> Yuying Zhang ; Ferruh Yigit
> ; NBU-Contact-Thomas Monjalon (EXTERNAL)
> 
> Subject: [PATCH v3 0/5] ethdev: modify field API for multiple headers
> 
> This patch-set extend the modify field action API to support both
> multiple MPLS and GENEVE option headers.
> 
> In current API, the header type is provided by rte_flow_field_id
> enumeration and the encapsulation level (inner/outer/tunnel) is
> specified by data.level field.
> However, there is no way to specify header inside encapsulation level.
> 
> For example, for this packet:
> 
> eth / mpls / mpls / mpls / ipv4 / udp
> the both second and third MPLS headers cannot be modified using this
> API.
> 
> RFC:
> https://patchwork.dpdk.org/project/dpdk/cover/20230420092145.522389-1-
> michae...@nvidia.com/
> 
> v2:
>  - Change "sub_level" name to "tag_index".
>  - Squash PMD changes into API changes patch.
>  - Remove PMD private patch from the patch-set.
> 
> v3:
>  - Add TAG array API change to release notes.
>  - Improve comment and documentation.
> 
> Michael Baum (5):
>   doc: fix blank lines in modify field action description
>   doc: fix blank line in asynchronous operations description
>   doc: fix wrong indentation in RSS action description
>   ethdev: add GENEVE TLV option modification support
>   ethdev: add MPLS header modification support
> 
>  app/test-pmd/cmdline_flow.c| 70 +++-
>  doc/guides/prog_guide/rte_flow.rst | 59 -
>  doc/guides/rel_notes/release_23_07.rst |  7 +++
>  drivers/net/mlx5/mlx5_flow_hw.c| 22 
>  lib/ethdev/rte_flow.h  | 73 --
>  5 files changed, 203 insertions(+), 28 deletions(-)
> 
> --
> 2.25.1

Series-acked-by:  Ori Kam 
Best,
Ori




Re: [PATCH v7] enhance NUMA affinity heuristic

2023-05-23 Thread Burakov, Anatoly

On 5/23/2023 3:50 AM, Kaisen You wrote:

When a DPDK application is started on only one numa node, memory is
allocated for only one socket. When interrupt threads use memory,
memory may not be found on the socket where the interrupt thread
is currently located, and memory has to be reallocated on the hugepage,
this operation will lead to performance degradation.

Fixes: 705356f0811f ("eal: simplify control thread creation")
Fixes: 770d41bf3309 ("malloc: fix allocation with unknown socket ID")
Cc: sta...@dpdk.org

Signed-off-by: Kaisen You 


Hi You,

I've suggested comment rewordings based on my understanding of the issue.


---
Changes since v6:
- New explanation for easy understanding,

Changes since v5:
- Add comments to the code,

Changes since v4:
- mod the patch title,

Changes since v3:
- add the assignment of socket_id in thread initialization,

Changes since v2:
- add uncommitted local change and fix compilation,

Changes since v1:
- accomodate for configurations with main lcore running on multiples
   physical cores belonging to different numa,
---
  lib/eal/common/eal_common_thread.c | 6 ++
  lib/eal/common/malloc_heap.c   | 9 +
  2 files changed, 15 insertions(+)

diff --git a/lib/eal/common/eal_common_thread.c 
b/lib/eal/common/eal_common_thread.c
index 079a385630..6479b66da1 100644
--- a/lib/eal/common/eal_common_thread.c
+++ b/lib/eal/common/eal_common_thread.c
@@ -252,6 +252,12 @@ static int ctrl_thread_init(void *arg)
struct rte_thread_ctrl_params *params = arg;
  
  	__rte_thread_init(rte_lcore_id(), cpuset);

+   /* set the value of the per-core variable _socket_id to SOCKET_ID_ANY.
+* Satisfy the judgment condition when threads find memory.
+* If SOCKET_ID_ANY is not specified, the thread may go to a node with
+* unallocated memory in a subsequent memory search.


I suggest a different comment wording:

Set control thread socket ID to SOCKET_ID_ANY as control threads may be 
scheduled on any NUMA node.



+*/
+   RTE_PER_LCORE(_socket_id) = SOCKET_ID_ANY;
params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset);
if (params->ret != 0) {
__atomic_store_n(¶ms->ctrl_thread_status,
diff --git a/lib/eal/common/malloc_heap.c b/lib/eal/common/malloc_heap.c
index d25bdc98f9..6d37f8afee 100644
--- a/lib/eal/common/malloc_heap.c
+++ b/lib/eal/common/malloc_heap.c
@@ -716,6 +716,15 @@ malloc_get_numa_socket(void)
if (conf->socket_mem[socket_id] != 0)
return socket_id;
}
+   /* Trying to allocate memory on the main lcore numa node.
+* especially when the DPDK application is started only on one numa 
node.
+*/


I suggest the following comment wording:

We couldn't find quickly find a NUMA node where memory was available, so 
fall back to using main lcore socket ID.



+   socket_id = rte_lcore_to_socket_id(rte_get_main_lcore());
+   /* When the socket_id obtained in the main lcore numa is SOCKET_ID_ANY,
+* The probability of finding memory on rte_socket_id_by_idx(0) is 
higher.
+*/


I suggest the following comment wording:

Main lcore socket ID may be SOCKET_ID_ANY in cases when main lcore 
thread is affinitized to multiple NUMA nodes.



+   if (socket_id != (unsigned int)SOCKET_ID_ANY)
+   return socket_id;
  


I suggest adding comment here:

Failed to find meaningful socket ID, so just use the first one available.


return rte_socket_id_by_idx(0);
  }


I believe these comments offer better explanation as to why we are doing 
the things we do here.


Whether or not you decide to take these corrections on board,

Acked-by: Anatoly Burakov 

--
Thanks,
Anatoly



[PATCH v2 4/5] mempool/cnxk: add hwpool ops

2023-05-23 Thread Ashwin Sekhar T K
Add hwpool ops which can used to create a rte_mempool that attaches
to another rte_mempool. The hwpool will not have its own buffers and
will have a dummy populate callback. Only an NPA aura will be allocated
for this rte_mempool. The buffers will be allocate from the NPA pool
of the attached rte_mempool.

Only mbuf objects are supported in hwpool. Generic objects are not
supported. Note that this pool will not have any range check enabled.
So user will be able to free any pointer into this pool. HW will not
throw error interrupts if invalid buffers are passed. So user must be
careful when using this pool.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/mempool/cnxk/cn10k_hwpool_ops.c | 211 
 drivers/mempool/cnxk/cnxk_mempool.h |   4 +
 drivers/mempool/cnxk/meson.build|   1 +
 3 files changed, 216 insertions(+)
 create mode 100644 drivers/mempool/cnxk/cn10k_hwpool_ops.c

diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c 
b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
new file mode 100644
index 00..9238765155
--- /dev/null
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include 
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CN10K_HWPOOL_MEM_SIZE 128
+
+static int __rte_hot
+cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int 
n)
+{
+   struct rte_mempool *mp;
+   unsigned int index;
+
+   mp = CNXK_MEMPOOL_CONFIG(hp);
+   /* Ensure mbuf init changes are written before the free pointers
+* are enqueued to the stack.
+*/
+   rte_io_wmb();
+   for (index = 0; index < n; index++) {
+   struct rte_mempool_objhdr *hdr;
+   struct rte_mbuf *m;
+
+   m = PLT_PTR_CAST(obj_table[index]);
+   /* Update mempool information in the mbuf */
+   hdr = rte_mempool_get_header(obj_table[index]);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   if (hdr->mp != m->pool || hdr->mp != hp)
+   plt_err("Pool Header Mismatch");
+#endif
+   m->pool = mp;
+   hdr->mp = mp;
+   roc_npa_aura_op_free(hp->pool_id, 0,
+(uint64_t)obj_table[index]);
+   }
+
+   return 0;
+}
+
+static int __rte_hot
+cn10k_hwpool_deq(struct rte_mempool *hp, void **obj_table, unsigned int n)
+{
+   unsigned int index;
+   uint64_t obj;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   struct rte_mempool *mp;
+
+   mp = CNXK_MEMPOOL_CONFIG(hp);
+#endif
+
+   for (index = 0; index < n; index++, obj_table++) {
+   struct rte_mempool_objhdr *hdr;
+   struct rte_mbuf *m;
+   int retry = 4;
+
+   /* Retry few times before failing */
+   do {
+   obj = roc_npa_aura_op_alloc(hp->pool_id, 0);
+   } while (retry-- && (obj == 0));
+
+   if (obj == 0) {
+   cn10k_hwpool_enq(hp, obj_table - index, index);
+   return -ENOENT;
+   }
+   /* Update mempool information in the mbuf */
+   hdr = rte_mempool_get_header(PLT_PTR_CAST(obj));
+   m = PLT_PTR_CAST(obj);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   if (hdr->mp != m->pool || hdr->mp != mp)
+   plt_err("Pool Header Mismatch");
+#endif
+   m->pool = hp;
+   hdr->mp = hp;
+   *obj_table = (void *)obj;
+   }
+
+   return 0;
+}
+
+static unsigned int
+cn10k_hwpool_get_count(const struct rte_mempool *hp)
+{
+   return (unsigned int)roc_npa_aura_op_available(hp->pool_id);
+}
+
+static int
+cn10k_hwpool_alloc(struct rte_mempool *hp)
+{
+   uint64_t aura_handle = 0;
+   struct rte_mempool *mp;
+   uint32_t pool_id;
+   int rc;
+
+   if (hp->cache_size) {
+   plt_err("Hwpool does not support cache");
+   return -EINVAL;
+   }
+
+   if (CNXK_MEMPOOL_FLAGS(hp)) {
+   plt_err("Flags must not be passed to hwpool ops");
+   return -EINVAL;
+   }
+
+   mp = CNXK_MEMPOOL_CONFIG(hp);
+   if (!mp) {
+   plt_err("Invalid rte_mempool passed as pool_config");
+   return -EINVAL;
+   }
+   if (mp->cache_size) {
+   plt_err("Hwpool does not support attaching to pool with cache");
+   return -EINVAL;
+   }
+
+   if (hp->elt_size != mp->elt_size ||
+   hp->header_size != mp->header_size ||
+   hp->trailer_size != mp->trailer_size || hp->size != mp->size) {
+   plt_err("Hwpool parameters matching with master pool");
+   return -EINVAL;
+   }
+
+   /* Create the NPA aura */
+   pool_id = roc_npa_aura_handle_to_aura(mp->pool_id);
+   rc = roc_npa_aura_create(&aura_handle, hp->size, NULL, (int)pool_id, 0);
+

[PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools

2023-05-23 Thread Ashwin Sekhar T K
Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
between pools.
 * rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
   is hwpool or not.
 * rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
   any rte_mempool.
 * rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
   rte_mempool where the range check is disabled.

Signed-off-by: Ashwin Sekhar T K 
---
 doc/api/doxy-api-index.md   |  1 +
 doc/api/doxy-api.conf.in|  1 +
 drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 -
 drivers/mempool/cnxk/cnxk_mempool.h |  4 ++
 drivers/mempool/cnxk/meson.build|  1 +
 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++
 drivers/mempool/cnxk/version.map| 10 
 7 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
 create mode 100644 drivers/mempool/cnxk/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..a781b8f408 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -49,6 +49,7 @@ The public API headers are grouped by topics:
   [iavf](@ref rte_pmd_iavf.h),
   [bnxt](@ref rte_pmd_bnxt.h),
   [cnxk](@ref rte_pmd_cnxk.h),
+  [cnxk_mempool](@ref rte_pmd_cnxk_mempool.h),
   [dpaa](@ref rte_pmd_dpaa.h),
   [dpaa2](@ref rte_pmd_dpaa2.h),
   [mlx5](@ref rte_pmd_mlx5.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index d230a19e1f..7e68e43c64 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -9,6 +9,7 @@ INPUT   = @TOPDIR@/doc/api/doxy-api-index.md \
   @TOPDIR@/drivers/crypto/scheduler \
   @TOPDIR@/drivers/dma/dpaa2 \
   @TOPDIR@/drivers/event/dlb2 \
+  @TOPDIR@/drivers/mempool/cnxk \
   @TOPDIR@/drivers/mempool/dpaa2 \
   @TOPDIR@/drivers/net/ark \
   @TOPDIR@/drivers/net/bnxt \
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c 
b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
index 9238765155..b234481ec1 100644
--- a/drivers/mempool/cnxk/cn10k_hwpool_ops.c
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -3,11 +3,14 @@
  */
 
 #include 
+#include 
 
 #include "roc_api.h"
 #include "cnxk_mempool.h"
 
-#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_HWPOOL_MEM_SIZE   128
+#define CN10K_NPA_IOVA_RANGE_MIN 0x0
+#define CN10K_NPA_IOVA_RANGE_MAX 0x1fff80
 
 static int __rte_hot
 cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int 
n)
@@ -197,6 +200,64 @@ cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int 
max_objs,
return hp->size;
 }
 
+int
+rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1, struct rte_mbuf *m2)
+{
+   struct rte_mempool_objhdr *hdr;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   if (!(CNXK_MEMPOOL_FLAGS(m1->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK) ||
+   !(CNXK_MEMPOOL_FLAGS(m2->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)) {
+   plt_err("Pools must have range check disabled");
+   return -EINVAL;
+   }
+   if (m1->pool->elt_size != m2->pool->elt_size ||
+   m1->pool->header_size != m2->pool->header_size ||
+   m1->pool->trailer_size != m2->pool->trailer_size ||
+   m1->pool->size != m2->pool->size) {
+   plt_err("Parameters of pools involved in exchange does not 
match");
+   return -EINVAL;
+   }
+#endif
+   RTE_SWAP(m1->pool, m2->pool);
+   hdr = rte_mempool_get_header(m1);
+   hdr->mp = m1->pool;
+   hdr = rte_mempool_get_header(m2);
+   hdr->mp = m2->pool;
+   return 0;
+}
+
+int
+rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp)
+{
+   return !!(CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_IS_HWPOOL);
+}
+
+int
+rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp)
+{
+   if (rte_pmd_cnxk_mempool_is_hwpool(mp)) {
+   /* Disable only aura range check for hardware pools */
+   roc_npa_aura_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+   CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+   mp = CNXK_MEMPOOL_CONFIG(mp);
+   }
+
+   /* No need to disable again if already disabled */
+   if (CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)
+   return 0;
+
+   /* Disable aura/pool range check */
+   roc_npa_pool_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+   if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
+   return -EBUSY;
+
+   CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+   return 0;
+}
+
 static struct rte_mempool_

RE: [PATCH v3] ethdev: add flow rule actions update API

2023-05-23 Thread Ori Kam
Hi Ferruh,

> -Original Message-
> From: Ferruh Yigit 
> Sent: Tuesday, May 23, 2023 1:34 PM
> 
> On 5/23/2023 7:59 AM, Ori Kam wrote:
> > Hi
> >
> >> -Original Message-
> >> From: Ferruh Yigit 
> >> Sent: Monday, May 22, 2023 1:28 PM
> >>
> >> On 5/18/2023 10:48 PM, Alexander Kozyrev wrote:
> >>> Introduce the new rte_flow_update() API allowing users
> >>> to update the action list in the already existing rule.
> >>
> >> If the API is only to update actions, does make sense to rename it to
> >> explicitly state this, like:
> >> `rte_flow_action_update()`
> >>
> >> Same for async version of the API.
> >>
> >
> > I'm O.K with the suggested name.
> > Maybe just change action to actions?
> >
> 
> Both OK for me, existing APIs have mixed usage of 'action' vs 'actions',
> is there a clear distinction when to use one or other?
> 
The idea is that if we have a single action it is action else actions.
For example,
1. Template create - rte_flow_actions_template_create since it has number of 
actions.
2. create indirect action - rte_flow_async_action_handle_create since is create 
just one action

> > Best,
> > Ori
> >
> >>> Flow rules can be updated now without the need to destroy
> >>> the rule first and create a new one instead.
> >>> A single API call ensures that no packets are lost by
> >>> guaranteeing atomicity and flow state correctness.
> >>> The rte_flow_async_update() is added as well.
> >>> The matcher is not updated, only the action list is.
> >>>
> >>> Signed-off-by: Alexander Kozyrev 
> >>
> >> <...>



Re: [PATCH v7] enhance NUMA affinity heuristic

2023-05-23 Thread Burakov, Anatoly

On 5/23/2023 3:50 AM, Kaisen You wrote:

When a DPDK application is started on only one numa node, memory is
allocated for only one socket. When interrupt threads use memory,
memory may not be found on the socket where the interrupt thread
is currently located, and memory has to be reallocated on the hugepage,
this operation will lead to performance degradation.

Fixes: 705356f0811f ("eal: simplify control thread creation")
Fixes: 770d41bf3309 ("malloc: fix allocation with unknown socket ID")
Cc: sta...@dpdk.org

Signed-off-by: Kaisen You 
---


For the record, I still think that this is a solution for a problem that 
should be fixed elsewhere, because a DPDK lcore (even main lcore!) 
having a specific NUMA node affinity is one of the most fundamental 
assumptions about DPDK, and I feel like we're inviting problems if we 
allow lcores to have multiple NUMA node affinities.


For example, if I run DPDK test app with the following command-line:

--lcores "1@(1,29),2@(30)"

The malloc autotest will fail because main lcore now returns -1 when 
we're calling `rte_socket_id()` from it. Correspondigly, any API's that 
use `rte_socket_id()` internally for various purposes (especially 
indexing arrays!) will now have to account for the fact that 
`rte_socket_id()` can just return -1 and it is not an exceptional situation.


IMO if we want to keep this behavior, EAL should at least warn the user 
that a DPDK lcore was assigned SOCKET_ID_ANY on account of multiple NUMA 
nodes being in its cpuset. So, as an unrealted change (so, i'm not 
suggesting doing it in this specific patchset), I would suggest that 
`thread_update_affinity()` should warn about DPDK lcore being assigned 
socket ID like that.


--
Thanks,
Anatoly



[PATCH v2 1/5] mempool/cnxk: use pool config to pass flags

2023-05-23 Thread Ashwin Sekhar T K
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/mempool/cnxk/cnxk_mempool.h | 24 
 drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++---
 drivers/net/cnxk/cnxk_ethdev_sec.c  | 25 ++---
 3 files changed, 40 insertions(+), 26 deletions(-)

diff --git a/drivers/mempool/cnxk/cnxk_mempool.h 
b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
 
 #include 
 
+enum cnxk_mempool_flags {
+   /* This flag is used to ensure that only aura zero is allocated.
+* If aura zero is not available, then mempool creation fails.
+*/
+   CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+   /* Here the pool create will use the npa_aura_s structure passed
+* as pool config to create the pool.
+*/
+   CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) 
\
+   (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m)
\
+   (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) 
\
+   do {   \
+   void *_c = CNXK_MEMPOOL_CONFIG(_m);\
+   uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f);   \
+   (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags);   \
+   } while (0)
+
 unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
 ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
   uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c 
b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, 
uint32_t obj_num,
 int
 cnxk_mempool_alloc(struct rte_mempool *mp)
 {
-   uint32_t block_count, flags = 0;
+   uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
 
-   /* Use driver specific mp->pool_config to override aura config */
-   if (mp->pool_config != NULL)
-   memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+   flags = CNXK_MEMPOOL_FLAGS(mp);
+   if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+   roc_flags = ROC_NPA_ZERO_AURA_F;
+   } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+   struct npa_aura_s *paura;
 
-   if (aura.ena && aura.pool_addr == 0)
-   flags = ROC_NPA_ZERO_AURA_F;
+   paura = CNXK_MEMPOOL_CONFIG(mp);
+   memcpy(&aura, paura, sizeof(struct npa_aura_s));
+   }
 
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
-&pool, flags);
+&pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c 
b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
  */
 
 #include 
+#include 
 
 #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
 
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
 {
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
-   struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
return -EINVAL;
}
 
-   plt_free(mp->pool_config);
rte_mempool_free(mp);
 
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
return -EIO;
}
 
-   /* Indicate to allocate zero aura */
-   aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
-   if (!aura) {
-   rc = -ENOMEM;
-   goto free_mp;
-   }
-   aura->ena = 1;
-   if (!mempool_name)
-   aura->pool_addr = 0;
-   else
-   a

[PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs

2023-05-23 Thread Ashwin Sekhar T K
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.

Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.

The existing aura range set functionality has been moved
as a pool range set API.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_nix_queue.c |  2 +-
 drivers/common/cnxk/roc_npa.c   | 35 -
 drivers/common/cnxk/roc_npa.h   |  6 +
 drivers/common/cnxk/roc_sso.c   |  2 +-
 drivers/common/cnxk/version.map |  2 ++
 drivers/mempool/cnxk/cnxk_mempool_ops.c |  2 +-
 6 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c 
b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct 
roc_nix_sq *sq)
goto npa_fail;
}
 
-   roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+   roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
 }
 
 void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
  uint64_t end_iova)
 {
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
 
+   /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
 
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
 }
 
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+   uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_lf *lf = idev_npa_obj_get();
+   struct npa_aura_lim *lim;
+
+   PLT_ASSERT(lf);
+   lim = lf->aura_lim;
+
+   /* Change only the bookkeeping in software */
+   lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+   lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+   uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_aura_lim *lim;
+   struct npa_lf *lf;
+
+   lf = idev_npa_obj_get();
+   PLT_ASSERT(lf);
+
+   lim = lf->aura_lim;
+   *start_iova = lim[aura_id].ptr_start;
+   *end_iova = lim[aura_id].ptr_end;
+}
+
 static int
 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s 
*aura,
   struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t 
aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 uint64_t start_iova,
 uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+uint64_t *start_iova,
+uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+uint64_t start_iova,
+uint64_t end_iova);
 int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
  struct npa_aura_s *aura, int pool_id,
  uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct 
roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
-   roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+   roc_npa_pool_op_range_set(xa

[PATCH v4 0/5] ethdev: modify field API for multiple headers

2023-05-23 Thread Michael Baum
This patch-set extend the modify field action API to support both
multiple MPLS and GENEVE option headers.

In current API, the header type is provided by rte_flow_field_id
enumeration and the encapsulation level (inner/outer/tunnel) is
specified by data.level field.
However, there is no way to specify header inside encapsulation level.

For example, for this packet:

eth / mpls / mpls / mpls / ipv4 / udp
the both second and third MPLS headers cannot be modified using this
API.

RFC:
https://patchwork.dpdk.org/project/dpdk/cover/20230420092145.522389-1-michae...@nvidia.com/

v2:
 - Change "sub_level" name to "tag_index".
 - Squash PMD changes into API changes patch.
 - Remove PMD private patch from the patch-set.

v3:
 - Add TAG array API change to release notes.
 - Improve comment and documentation. 

v4:
 - Add "Acked-by" labels.
 - Add PMD adjustment for TAG array API change.

Michael Baum (5):
  doc: fix blank lines in modify field action description
  doc: fix blank line in asynchronous operations description
  doc: fix wrong indentation in RSS action description
  ethdev: add GENEVE TLV option modification support
  ethdev: add MPLS header modification support

 app/test-pmd/cmdline_flow.c| 70 +++-
 doc/guides/prog_guide/rte_flow.rst | 59 -
 doc/guides/rel_notes/release_23_07.rst |  7 +++
 drivers/net/mlx5/mlx5_flow.c   | 34 
 drivers/net/mlx5/mlx5_flow.h   | 23 
 drivers/net/mlx5/mlx5_flow_dv.c| 29 +-
 drivers/net/mlx5/mlx5_flow_hw.c| 43 +--
 lib/ethdev/rte_flow.h  | 73 --
 8 files changed, 288 insertions(+), 50 deletions(-)

-- 
2.25.1



[PATCH v4 1/5] doc: fix blank lines in modify field action description

2023-05-23 Thread Michael Baum
The modify field action description inside "Generic flow API (rte_flow)"
documentation, lists all operations supported for a destination field.
In addition, it lists the values supported for a encapsulation level
field.

Before the lists, in both cases, miss a blank line causing them to look
regular text lines.

This patch adds the blank lines.

Fixes: 73b68f4c54a0 ("ethdev: introduce generic modify flow action")
Cc: akozy...@nvidia.com
Cc: sta...@dpdk.org

Signed-off-by: Michael Baum 
Acked-by: Ori Kam 
---
 doc/guides/prog_guide/rte_flow.rst | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/doc/guides/prog_guide/rte_flow.rst 
b/doc/guides/prog_guide/rte_flow.rst
index 32fc45516a..e7faa368a1 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -2917,20 +2917,23 @@ The immediate value ``RTE_FLOW_FIELD_VALUE`` (or a 
pointer to it
 ``RTE_FLOW_FIELD_START`` is used to point to the beginning of a packet.
 See ``enum rte_flow_field_id`` for the list of supported fields.
 
-``op`` selects the operation to perform on a destination field.
+``op`` selects the operation to perform on a destination field:
+
 - ``set`` copies the data from ``src`` field to ``dst`` field.
 - ``add`` adds together ``dst`` and ``src`` and stores the result into ``dst``.
-- ``sub`` subtracts ``src`` from ``dst`` and stores the result into ``dst``
+- ``sub`` subtracts ``src`` from ``dst`` and stores the result into ``dst``.
 
 ``width`` defines a number of bits to use from ``src`` field.
 
 ``level`` is used to access any packet field on any encapsulation level
-as well as any tag element in the tag array.
-- ``0`` means the default behaviour. Depending on the packet type, it can
-mean outermost, innermost or anything in between.
+as well as any tag element in the tag array:
+
+- ``0`` means the default behaviour. Depending on the packet type,
+  it can mean outermost, innermost or anything in between.
 - ``1`` requests access to the outermost packet encapsulation level.
 - ``2`` and subsequent values requests access to the specified packet
-encapsulation level, from outermost to innermost (lower to higher values).
+  encapsulation level, from outermost to innermost (lower to higher values).
+
 For the tag array (in case of multiple tags are supported and present)
 ``level`` translates directly into the array index.
 
-- 
2.25.1



[PATCH v4 2/5] doc: fix blank line in asynchronous operations description

2023-05-23 Thread Michael Baum
The asynchronous operations description inside "Generic flow API
(rte_flow)" documentation, adds some bullets to describe asynchronous
operations behavior.

Before the first bullet, miss a blank line causing it to look a regular
text line.

This patch adds the blank line.

Fixes: 197e820c6685 ("ethdev: bring in async queue-based flow rules operations")
Cc: akozy...@nvidia.com
Cc: sta...@dpdk.org

Signed-off-by: Michael Baum 
Acked-by: Ori Kam 
---
 doc/guides/prog_guide/rte_flow.rst | 1 +
 1 file changed, 1 insertion(+)

diff --git a/doc/guides/prog_guide/rte_flow.rst 
b/doc/guides/prog_guide/rte_flow.rst
index e7faa368a1..76e69190fc 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -3702,6 +3702,7 @@ Asynchronous operations
 ---
 
 Flow rules management can be done via special lockless flow management queues.
+
 - Queue operations are asynchronous and not thread-safe.
 
 - Operations can thus be invoked by the app's datapath,
-- 
2.25.1



[PATCH v4 3/5] doc: fix wrong indentation in RSS action description

2023-05-23 Thread Michael Baum
The RSS action description inside "Generic flow API (rte_flow)"
documentation, lists the values supported for a encapsulation level
field.

For "2" value, it uses 3 spaces as an indentation instead of 2 after
line breaking, causing the first line to be bold.

This patch updates the number of spaces in the indentation.

Fixes: 18aee2861a1f ("ethdev: add encap level to RSS flow API action")
Cc: adrien.mazarg...@6wind.com
Cc: sta...@dpdk.org

Signed-off-by: Michael Baum 
Acked-by: Ori Kam 
---
 doc/guides/prog_guide/rte_flow.rst | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/doc/guides/prog_guide/rte_flow.rst 
b/doc/guides/prog_guide/rte_flow.rst
index 76e69190fc..25b57bf86d 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -1954,8 +1954,8 @@ Also, regarding packet encapsulation ``level``:
   level.
 
 - ``2`` and subsequent values request RSS to be performed on the specified
-   inner packet encapsulation level, from outermost to innermost (lower to
-   higher values).
+  inner packet encapsulation level, from outermost to innermost (lower to
+  higher values).
 
 Values other than ``0`` are not necessarily supported.
 
-- 
2.25.1



[PATCH v4 4/5] ethdev: add GENEVE TLV option modification support

2023-05-23 Thread Michael Baum
Add modify field support for GENEVE option fields:
 - "RTE_FLOW_FIELD_GENEVE_OPT_TYPE"
 - "RTE_FLOW_FIELD_GENEVE_OPT_CLASS"
 - "RTE_FLOW_FIELD_GENEVE_OPT_DATA"

Each GENEVE TLV option is identified by both its "class" and "type", so
2 new fields were added to "rte_flow_action_modify_data" structure to
help specify which option to modify.

To get room for those 2 new fields, the "level" field move to use
"uint8_t" which is more than enough for encapsulation level.
This patch also reduces all modify field encapsulation level "fully
masked" initializations to use UINT8_MAX instead of UINT32_MAX.
This change avoids compilation warning caused by this API changing.

Signed-off-by: Michael Baum 
Acked-by: Ori Kam 
---
 app/test-pmd/cmdline_flow.c| 48 +-
 doc/guides/prog_guide/rte_flow.rst | 23 
 doc/guides/rel_notes/release_23_07.rst |  3 ++
 drivers/net/mlx5/mlx5_flow_hw.c| 22 ++--
 lib/ethdev/rte_flow.h  | 48 +-
 5 files changed, 131 insertions(+), 13 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 58939ec321..8c1dea53c0 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -636,11 +636,15 @@ enum index {
ACTION_MODIFY_FIELD_DST_TYPE_VALUE,
ACTION_MODIFY_FIELD_DST_LEVEL,
ACTION_MODIFY_FIELD_DST_LEVEL_VALUE,
+   ACTION_MODIFY_FIELD_DST_TYPE_ID,
+   ACTION_MODIFY_FIELD_DST_CLASS_ID,
ACTION_MODIFY_FIELD_DST_OFFSET,
ACTION_MODIFY_FIELD_SRC_TYPE,
ACTION_MODIFY_FIELD_SRC_TYPE_VALUE,
ACTION_MODIFY_FIELD_SRC_LEVEL,
ACTION_MODIFY_FIELD_SRC_LEVEL_VALUE,
+   ACTION_MODIFY_FIELD_SRC_TYPE_ID,
+   ACTION_MODIFY_FIELD_SRC_CLASS_ID,
ACTION_MODIFY_FIELD_SRC_OFFSET,
ACTION_MODIFY_FIELD_SRC_VALUE,
ACTION_MODIFY_FIELD_SRC_POINTER,
@@ -854,7 +858,9 @@ static const char *const modify_field_ids[] = {
"ipv4_ecn", "ipv6_ecn", "gtp_psc_qfi", "meter_color",
"ipv6_proto",
"flex_item",
-   "hash_result", NULL
+   "hash_result",
+   "geneve_opt_type", "geneve_opt_class", "geneve_opt_data",
+   NULL
 };
 
 static const char *const meter_colors[] = {
@@ -2295,6 +2301,8 @@ static const enum index next_action_sample[] = {
 
 static const enum index action_modify_field_dst[] = {
ACTION_MODIFY_FIELD_DST_LEVEL,
+   ACTION_MODIFY_FIELD_DST_TYPE_ID,
+   ACTION_MODIFY_FIELD_DST_CLASS_ID,
ACTION_MODIFY_FIELD_DST_OFFSET,
ACTION_MODIFY_FIELD_SRC_TYPE,
ZERO,
@@ -2302,6 +2310,8 @@ static const enum index action_modify_field_dst[] = {
 
 static const enum index action_modify_field_src[] = {
ACTION_MODIFY_FIELD_SRC_LEVEL,
+   ACTION_MODIFY_FIELD_SRC_TYPE_ID,
+   ACTION_MODIFY_FIELD_SRC_CLASS_ID,
ACTION_MODIFY_FIELD_SRC_OFFSET,
ACTION_MODIFY_FIELD_SRC_VALUE,
ACTION_MODIFY_FIELD_SRC_POINTER,
@@ -6388,6 +6398,24 @@ static const struct token token_list[] = {
.call = parse_vc_modify_field_level,
.comp = comp_none,
},
+   [ACTION_MODIFY_FIELD_DST_TYPE_ID] = {
+   .name = "dst_type_id",
+   .help = "destination field type ID",
+   .next = NEXT(action_modify_field_dst,
+NEXT_ENTRY(COMMON_UNSIGNED)),
+   .args = ARGS(ARGS_ENTRY(struct rte_flow_action_modify_field,
+   dst.type)),
+   .call = parse_vc_conf,
+   },
+   [ACTION_MODIFY_FIELD_DST_CLASS_ID] = {
+   .name = "dst_class",
+   .help = "destination field class ID",
+   .next = NEXT(action_modify_field_dst,
+NEXT_ENTRY(COMMON_UNSIGNED)),
+   .args = ARGS(ARGS_ENTRY_HTON(struct 
rte_flow_action_modify_field,
+dst.class_id)),
+   .call = parse_vc_conf,
+   },
[ACTION_MODIFY_FIELD_DST_OFFSET] = {
.name = "dst_offset",
.help = "destination field bit offset",
@@ -6423,6 +6451,24 @@ static const struct token token_list[] = {
.call = parse_vc_modify_field_level,
.comp = comp_none,
},
+   [ACTION_MODIFY_FIELD_SRC_TYPE_ID] = {
+   .name = "src_type_id",
+   .help = "source field type ID",
+   .next = NEXT(action_modify_field_src,
+NEXT_ENTRY(COMMON_UNSIGNED)),
+   .args = ARGS(ARGS_ENTRY(struct rte_flow_action_modify_field,
+   src.type)),
+   .call = parse_vc_conf,
+   },
+   [ACTION_MODIFY_FIELD_SRC_CLASS_ID] = {
+   .name = "src_class",
+   .help = "source field class ID",
+   .next = NEXT(action_modify_field_src,
+NEXT_ENTRY(COMMON_UNSIGNED)),

[PATCH v2 1/5] mempool/cnxk: use pool config to pass flags

2023-05-23 Thread Ashwin Sekhar T K
Use lower bits of pool_config to pass flags specific to
cnxk mempool PMD ops.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/mempool/cnxk/cnxk_mempool.h | 24 
 drivers/mempool/cnxk/cnxk_mempool_ops.c | 17 ++---
 drivers/net/cnxk/cnxk_ethdev_sec.c  | 25 ++---
 3 files changed, 40 insertions(+), 26 deletions(-)

diff --git a/drivers/mempool/cnxk/cnxk_mempool.h 
b/drivers/mempool/cnxk/cnxk_mempool.h
index 3405aa7663..fc2e4b5b70 100644
--- a/drivers/mempool/cnxk/cnxk_mempool.h
+++ b/drivers/mempool/cnxk/cnxk_mempool.h
@@ -7,6 +7,30 @@
 
 #include 
 
+enum cnxk_mempool_flags {
+   /* This flag is used to ensure that only aura zero is allocated.
+* If aura zero is not available, then mempool creation fails.
+*/
+   CNXK_MEMPOOL_F_ZERO_AURA = RTE_BIT64(0),
+   /* Here the pool create will use the npa_aura_s structure passed
+* as pool config to create the pool.
+*/
+   CNXK_MEMPOOL_F_CUSTOM_AURA = RTE_BIT64(1),
+};
+
+#define CNXK_MEMPOOL_F_MASK 0xFUL
+
+#define CNXK_MEMPOOL_FLAGS(_m) 
\
+   (PLT_U64_CAST((_m)->pool_config) & CNXK_MEMPOOL_F_MASK)
+#define CNXK_MEMPOOL_CONFIG(_m)
\
+   (PLT_PTR_CAST(PLT_U64_CAST((_m)->pool_config) & ~CNXK_MEMPOOL_F_MASK))
+#define CNXK_MEMPOOL_SET_FLAGS(_m, _f) 
\
+   do {   \
+   void *_c = CNXK_MEMPOOL_CONFIG(_m);\
+   uint64_t _flags = CNXK_MEMPOOL_FLAGS(_m) | (_f);   \
+   (_m)->pool_config = PLT_PTR_CAST(PLT_U64_CAST(_c) | _flags);   \
+   } while (0)
+
 unsigned int cnxk_mempool_get_count(const struct rte_mempool *mp);
 ssize_t cnxk_mempool_calc_mem_size(const struct rte_mempool *mp,
   uint32_t obj_num, uint32_t pg_shift,
diff --git a/drivers/mempool/cnxk/cnxk_mempool_ops.c 
b/drivers/mempool/cnxk/cnxk_mempool_ops.c
index 3769afd3d1..1b6c4591bb 100644
--- a/drivers/mempool/cnxk/cnxk_mempool_ops.c
+++ b/drivers/mempool/cnxk/cnxk_mempool_ops.c
@@ -72,7 +72,7 @@ cnxk_mempool_calc_mem_size(const struct rte_mempool *mp, 
uint32_t obj_num,
 int
 cnxk_mempool_alloc(struct rte_mempool *mp)
 {
-   uint32_t block_count, flags = 0;
+   uint32_t block_count, flags, roc_flags = 0;
uint64_t aura_handle = 0;
struct npa_aura_s aura;
struct npa_pool_s pool;
@@ -96,15 +96,18 @@ cnxk_mempool_alloc(struct rte_mempool *mp)
pool.nat_align = 1;
pool.buf_offset = mp->header_size / ROC_ALIGN;
 
-   /* Use driver specific mp->pool_config to override aura config */
-   if (mp->pool_config != NULL)
-   memcpy(&aura, mp->pool_config, sizeof(struct npa_aura_s));
+   flags = CNXK_MEMPOOL_FLAGS(mp);
+   if (flags & CNXK_MEMPOOL_F_ZERO_AURA) {
+   roc_flags = ROC_NPA_ZERO_AURA_F;
+   } else if (flags & CNXK_MEMPOOL_F_CUSTOM_AURA) {
+   struct npa_aura_s *paura;
 
-   if (aura.ena && aura.pool_addr == 0)
-   flags = ROC_NPA_ZERO_AURA_F;
+   paura = CNXK_MEMPOOL_CONFIG(mp);
+   memcpy(&aura, paura, sizeof(struct npa_aura_s));
+   }
 
rc = roc_npa_pool_create(&aura_handle, block_size, block_count, &aura,
-&pool, flags);
+&pool, roc_flags);
if (rc) {
plt_err("Failed to alloc pool or aura rc=%d", rc);
goto error;
diff --git a/drivers/net/cnxk/cnxk_ethdev_sec.c 
b/drivers/net/cnxk/cnxk_ethdev_sec.c
index aa8a378a00..cd64daacc0 100644
--- a/drivers/net/cnxk/cnxk_ethdev_sec.c
+++ b/drivers/net/cnxk/cnxk_ethdev_sec.c
@@ -3,6 +3,7 @@
  */
 
 #include 
+#include 
 
 #define CNXK_NIX_INL_META_POOL_NAME "NIX_INL_META_POOL"
 
@@ -43,7 +44,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
 {
const char *mp_name = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
-   struct npa_aura_s *aura;
struct rte_mempool *mp;
uint16_t first_skip;
int rc;
@@ -65,7 +65,6 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
return -EINVAL;
}
 
-   plt_free(mp->pool_config);
rte_mempool_free(mp);
 
*aura_handle = 0;
@@ -84,22 +83,12 @@ cnxk_nix_inl_meta_pool_cb(uint64_t *aura_handle, uintptr_t 
*mpool, uint32_t buf_
return -EIO;
}
 
-   /* Indicate to allocate zero aura */
-   aura = plt_zmalloc(sizeof(struct npa_aura_s), 0);
-   if (!aura) {
-   rc = -ENOMEM;
-   goto free_mp;
-   }
-   aura->ena = 1;
-   if (!mempool_name)
-   aura->pool_addr = 0;
-   else
-   a

[PATCH v4 5/5] ethdev: add MPLS header modification support

2023-05-23 Thread Michael Baum
Add support for MPLS modify header using "RTE_FLOW_FIELD_MPLS" id.

Since MPLS heaser might appear more the one time in inner/outer/tunnel,
a new field was added to "rte_flow_action_modify_data" structure in
addition to "level" field.
The "tag_index" field is the index of the header inside encapsulation
level. It is used for modify multiple MPLS headers in same encapsulation
level.

This addition enables to modify multiple VLAN headers too, so the
description of "RTE_FLOW_FIELD_VLAN_" was updated.

Since the "tag_index" field is added, the "RTE_FLOW_FIELD_TAG" type
moves to use it for tag array instead of using "level" field.
Using "level" is still supported for backwards compatibility when
"tag_index" field is zero.

Signed-off-by: Michael Baum 
Acked-by: Ori Kam 
---
 app/test-pmd/cmdline_flow.c| 24 +++-
 doc/guides/prog_guide/rte_flow.rst | 18 ++---
 doc/guides/rel_notes/release_23_07.rst |  8 +++-
 drivers/net/mlx5/mlx5_flow.c   | 34 +
 drivers/net/mlx5/mlx5_flow.h   | 23 
 drivers/net/mlx5/mlx5_flow_dv.c| 29 +++
 drivers/net/mlx5/mlx5_flow_hw.c| 21 ---
 lib/ethdev/rte_flow.h  | 51 ++
 8 files changed, 162 insertions(+), 46 deletions(-)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 8c1dea53c0..a51e37276b 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -636,6 +636,7 @@ enum index {
ACTION_MODIFY_FIELD_DST_TYPE_VALUE,
ACTION_MODIFY_FIELD_DST_LEVEL,
ACTION_MODIFY_FIELD_DST_LEVEL_VALUE,
+   ACTION_MODIFY_FIELD_DST_TAG_INDEX,
ACTION_MODIFY_FIELD_DST_TYPE_ID,
ACTION_MODIFY_FIELD_DST_CLASS_ID,
ACTION_MODIFY_FIELD_DST_OFFSET,
@@ -643,6 +644,7 @@ enum index {
ACTION_MODIFY_FIELD_SRC_TYPE_VALUE,
ACTION_MODIFY_FIELD_SRC_LEVEL,
ACTION_MODIFY_FIELD_SRC_LEVEL_VALUE,
+   ACTION_MODIFY_FIELD_SRC_TAG_INDEX,
ACTION_MODIFY_FIELD_SRC_TYPE_ID,
ACTION_MODIFY_FIELD_SRC_CLASS_ID,
ACTION_MODIFY_FIELD_SRC_OFFSET,
@@ -859,7 +861,7 @@ static const char *const modify_field_ids[] = {
"ipv6_proto",
"flex_item",
"hash_result",
-   "geneve_opt_type", "geneve_opt_class", "geneve_opt_data",
+   "geneve_opt_type", "geneve_opt_class", "geneve_opt_data", "mpls",
NULL
 };
 
@@ -2301,6 +2303,7 @@ static const enum index next_action_sample[] = {
 
 static const enum index action_modify_field_dst[] = {
ACTION_MODIFY_FIELD_DST_LEVEL,
+   ACTION_MODIFY_FIELD_DST_TAG_INDEX,
ACTION_MODIFY_FIELD_DST_TYPE_ID,
ACTION_MODIFY_FIELD_DST_CLASS_ID,
ACTION_MODIFY_FIELD_DST_OFFSET,
@@ -2310,6 +2313,7 @@ static const enum index action_modify_field_dst[] = {
 
 static const enum index action_modify_field_src[] = {
ACTION_MODIFY_FIELD_SRC_LEVEL,
+   ACTION_MODIFY_FIELD_SRC_TAG_INDEX,
ACTION_MODIFY_FIELD_SRC_TYPE_ID,
ACTION_MODIFY_FIELD_SRC_CLASS_ID,
ACTION_MODIFY_FIELD_SRC_OFFSET,
@@ -6398,6 +6402,15 @@ static const struct token token_list[] = {
.call = parse_vc_modify_field_level,
.comp = comp_none,
},
+   [ACTION_MODIFY_FIELD_DST_TAG_INDEX] = {
+   .name = "dst_tag_index",
+   .help = "destination field tag array",
+   .next = NEXT(action_modify_field_dst,
+NEXT_ENTRY(COMMON_UNSIGNED)),
+   .args = ARGS(ARGS_ENTRY(struct rte_flow_action_modify_field,
+   dst.tag_index)),
+   .call = parse_vc_conf,
+   },
[ACTION_MODIFY_FIELD_DST_TYPE_ID] = {
.name = "dst_type_id",
.help = "destination field type ID",
@@ -6451,6 +6464,15 @@ static const struct token token_list[] = {
.call = parse_vc_modify_field_level,
.comp = comp_none,
},
+   [ACTION_MODIFY_FIELD_SRC_TAG_INDEX] = {
+   .name = "stc_tag_index",
+   .help = "source field tag array",
+   .next = NEXT(action_modify_field_src,
+NEXT_ENTRY(COMMON_UNSIGNED)),
+   .args = ARGS(ARGS_ENTRY(struct rte_flow_action_modify_field,
+   src.tag_index)),
+   .call = parse_vc_conf,
+   },
[ACTION_MODIFY_FIELD_SRC_TYPE_ID] = {
.name = "src_type_id",
.help = "source field type ID",
diff --git a/doc/guides/prog_guide/rte_flow.rst 
b/doc/guides/prog_guide/rte_flow.rst
index ec812de335..e4328e7ed6 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -2925,8 +2925,7 @@ See ``enum rte_flow_field_id`` for the list of supported 
fields.
 
 ``width`` defines a number of bits to use from ``src`` field.
 
-``level`` is used to access any packet field on any enca

[PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs

2023-05-23 Thread Ashwin Sekhar T K
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.

Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.

The existing aura range set functionality has been moved
as a pool range set API.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_nix_queue.c |  2 +-
 drivers/common/cnxk/roc_npa.c   | 35 -
 drivers/common/cnxk/roc_npa.h   |  6 +
 drivers/common/cnxk/roc_sso.c   |  2 +-
 drivers/common/cnxk/version.map |  2 ++
 drivers/mempool/cnxk/cnxk_mempool_ops.c |  2 +-
 6 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c 
b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct 
roc_nix_sq *sq)
goto npa_fail;
}
 
-   roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+   roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
 }
 
 void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
  uint64_t end_iova)
 {
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
 
+   /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
 
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
 }
 
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+   uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_lf *lf = idev_npa_obj_get();
+   struct npa_aura_lim *lim;
+
+   PLT_ASSERT(lf);
+   lim = lf->aura_lim;
+
+   /* Change only the bookkeeping in software */
+   lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+   lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+   uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_aura_lim *lim;
+   struct npa_lf *lf;
+
+   lf = idev_npa_obj_get();
+   PLT_ASSERT(lf);
+
+   lim = lf->aura_lim;
+   *start_iova = lim[aura_id].ptr_start;
+   *end_iova = lim[aura_id].ptr_end;
+}
+
 static int
 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s 
*aura,
   struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t 
aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 uint64_t start_iova,
 uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+uint64_t *start_iova,
+uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+uint64_t start_iova,
+uint64_t end_iova);
 int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
  struct npa_aura_s *aura, int pool_id,
  uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct 
roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
-   roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+   roc_npa_pool_op_range_set(xa

[PATCH v2 4/5] mempool/cnxk: add hwpool ops

2023-05-23 Thread Ashwin Sekhar T K
Add hwpool ops which can used to create a rte_mempool that attaches
to another rte_mempool. The hwpool will not have its own buffers and
will have a dummy populate callback. Only an NPA aura will be allocated
for this rte_mempool. The buffers will be allocate from the NPA pool
of the attached rte_mempool.

Only mbuf objects are supported in hwpool. Generic objects are not
supported. Note that this pool will not have any range check enabled.
So user will be able to free any pointer into this pool. HW will not
throw error interrupts if invalid buffers are passed. So user must be
careful when using this pool.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/mempool/cnxk/cn10k_hwpool_ops.c | 211 
 drivers/mempool/cnxk/cnxk_mempool.h |   4 +
 drivers/mempool/cnxk/meson.build|   1 +
 3 files changed, 216 insertions(+)
 create mode 100644 drivers/mempool/cnxk/cn10k_hwpool_ops.c

diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c 
b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
new file mode 100644
index 00..9238765155
--- /dev/null
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include 
+
+#include "roc_api.h"
+#include "cnxk_mempool.h"
+
+#define CN10K_HWPOOL_MEM_SIZE 128
+
+static int __rte_hot
+cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int 
n)
+{
+   struct rte_mempool *mp;
+   unsigned int index;
+
+   mp = CNXK_MEMPOOL_CONFIG(hp);
+   /* Ensure mbuf init changes are written before the free pointers
+* are enqueued to the stack.
+*/
+   rte_io_wmb();
+   for (index = 0; index < n; index++) {
+   struct rte_mempool_objhdr *hdr;
+   struct rte_mbuf *m;
+
+   m = PLT_PTR_CAST(obj_table[index]);
+   /* Update mempool information in the mbuf */
+   hdr = rte_mempool_get_header(obj_table[index]);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   if (hdr->mp != m->pool || hdr->mp != hp)
+   plt_err("Pool Header Mismatch");
+#endif
+   m->pool = mp;
+   hdr->mp = mp;
+   roc_npa_aura_op_free(hp->pool_id, 0,
+(uint64_t)obj_table[index]);
+   }
+
+   return 0;
+}
+
+static int __rte_hot
+cn10k_hwpool_deq(struct rte_mempool *hp, void **obj_table, unsigned int n)
+{
+   unsigned int index;
+   uint64_t obj;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   struct rte_mempool *mp;
+
+   mp = CNXK_MEMPOOL_CONFIG(hp);
+#endif
+
+   for (index = 0; index < n; index++, obj_table++) {
+   struct rte_mempool_objhdr *hdr;
+   struct rte_mbuf *m;
+   int retry = 4;
+
+   /* Retry few times before failing */
+   do {
+   obj = roc_npa_aura_op_alloc(hp->pool_id, 0);
+   } while (retry-- && (obj == 0));
+
+   if (obj == 0) {
+   cn10k_hwpool_enq(hp, obj_table - index, index);
+   return -ENOENT;
+   }
+   /* Update mempool information in the mbuf */
+   hdr = rte_mempool_get_header(PLT_PTR_CAST(obj));
+   m = PLT_PTR_CAST(obj);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   if (hdr->mp != m->pool || hdr->mp != mp)
+   plt_err("Pool Header Mismatch");
+#endif
+   m->pool = hp;
+   hdr->mp = hp;
+   *obj_table = (void *)obj;
+   }
+
+   return 0;
+}
+
+static unsigned int
+cn10k_hwpool_get_count(const struct rte_mempool *hp)
+{
+   return (unsigned int)roc_npa_aura_op_available(hp->pool_id);
+}
+
+static int
+cn10k_hwpool_alloc(struct rte_mempool *hp)
+{
+   uint64_t aura_handle = 0;
+   struct rte_mempool *mp;
+   uint32_t pool_id;
+   int rc;
+
+   if (hp->cache_size) {
+   plt_err("Hwpool does not support cache");
+   return -EINVAL;
+   }
+
+   if (CNXK_MEMPOOL_FLAGS(hp)) {
+   plt_err("Flags must not be passed to hwpool ops");
+   return -EINVAL;
+   }
+
+   mp = CNXK_MEMPOOL_CONFIG(hp);
+   if (!mp) {
+   plt_err("Invalid rte_mempool passed as pool_config");
+   return -EINVAL;
+   }
+   if (mp->cache_size) {
+   plt_err("Hwpool does not support attaching to pool with cache");
+   return -EINVAL;
+   }
+
+   if (hp->elt_size != mp->elt_size ||
+   hp->header_size != mp->header_size ||
+   hp->trailer_size != mp->trailer_size || hp->size != mp->size) {
+   plt_err("Hwpool parameters matching with master pool");
+   return -EINVAL;
+   }
+
+   /* Create the NPA aura */
+   pool_id = roc_npa_aura_handle_to_aura(mp->pool_id);
+   rc = roc_npa_aura_create(&aura_handle, hp->size, NULL, (int)pool_id, 0);
+

[PATCH v2 5/5] mempool/cnxk: add support for exchanging mbufs between pools

2023-05-23 Thread Ashwin Sekhar T K
Add the following cnxk mempool PMD APIs to facilitate exchanging mbufs
between pools.
 * rte_pmd_cnxk_mempool_is_hwpool() - Allows user to check whether a pool
   is hwpool or not.
 * rte_pmd_cnxk_mempool_range_check_disable() - Disables range checking on
   any rte_mempool.
 * rte_pmd_cnxk_mempool_mbuf_exchange() - Exchanges mbufs between any two
   rte_mempool where the range check is disabled.

Signed-off-by: Ashwin Sekhar T K 
---
 doc/api/doxy-api-index.md   |  1 +
 doc/api/doxy-api.conf.in|  1 +
 drivers/mempool/cnxk/cn10k_hwpool_ops.c | 63 -
 drivers/mempool/cnxk/cnxk_mempool.h |  4 ++
 drivers/mempool/cnxk/meson.build|  1 +
 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h | 56 ++
 drivers/mempool/cnxk/version.map| 10 
 7 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/mempool/cnxk/rte_pmd_cnxk_mempool.h
 create mode 100644 drivers/mempool/cnxk/version.map

diff --git a/doc/api/doxy-api-index.md b/doc/api/doxy-api-index.md
index c709fd48ad..a781b8f408 100644
--- a/doc/api/doxy-api-index.md
+++ b/doc/api/doxy-api-index.md
@@ -49,6 +49,7 @@ The public API headers are grouped by topics:
   [iavf](@ref rte_pmd_iavf.h),
   [bnxt](@ref rte_pmd_bnxt.h),
   [cnxk](@ref rte_pmd_cnxk.h),
+  [cnxk_mempool](@ref rte_pmd_cnxk_mempool.h),
   [dpaa](@ref rte_pmd_dpaa.h),
   [dpaa2](@ref rte_pmd_dpaa2.h),
   [mlx5](@ref rte_pmd_mlx5.h),
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index d230a19e1f..7e68e43c64 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -9,6 +9,7 @@ INPUT   = @TOPDIR@/doc/api/doxy-api-index.md \
   @TOPDIR@/drivers/crypto/scheduler \
   @TOPDIR@/drivers/dma/dpaa2 \
   @TOPDIR@/drivers/event/dlb2 \
+  @TOPDIR@/drivers/mempool/cnxk \
   @TOPDIR@/drivers/mempool/dpaa2 \
   @TOPDIR@/drivers/net/ark \
   @TOPDIR@/drivers/net/bnxt \
diff --git a/drivers/mempool/cnxk/cn10k_hwpool_ops.c 
b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
index 9238765155..b234481ec1 100644
--- a/drivers/mempool/cnxk/cn10k_hwpool_ops.c
+++ b/drivers/mempool/cnxk/cn10k_hwpool_ops.c
@@ -3,11 +3,14 @@
  */
 
 #include 
+#include 
 
 #include "roc_api.h"
 #include "cnxk_mempool.h"
 
-#define CN10K_HWPOOL_MEM_SIZE 128
+#define CN10K_HWPOOL_MEM_SIZE   128
+#define CN10K_NPA_IOVA_RANGE_MIN 0x0
+#define CN10K_NPA_IOVA_RANGE_MAX 0x1fff80
 
 static int __rte_hot
 cn10k_hwpool_enq(struct rte_mempool *hp, void *const *obj_table, unsigned int 
n)
@@ -197,6 +200,64 @@ cn10k_hwpool_populate(struct rte_mempool *hp, unsigned int 
max_objs,
return hp->size;
 }
 
+int
+rte_pmd_cnxk_mempool_mbuf_exchange(struct rte_mbuf *m1, struct rte_mbuf *m2)
+{
+   struct rte_mempool_objhdr *hdr;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+   if (!(CNXK_MEMPOOL_FLAGS(m1->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK) ||
+   !(CNXK_MEMPOOL_FLAGS(m2->pool) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)) {
+   plt_err("Pools must have range check disabled");
+   return -EINVAL;
+   }
+   if (m1->pool->elt_size != m2->pool->elt_size ||
+   m1->pool->header_size != m2->pool->header_size ||
+   m1->pool->trailer_size != m2->pool->trailer_size ||
+   m1->pool->size != m2->pool->size) {
+   plt_err("Parameters of pools involved in exchange does not 
match");
+   return -EINVAL;
+   }
+#endif
+   RTE_SWAP(m1->pool, m2->pool);
+   hdr = rte_mempool_get_header(m1);
+   hdr->mp = m1->pool;
+   hdr = rte_mempool_get_header(m2);
+   hdr->mp = m2->pool;
+   return 0;
+}
+
+int
+rte_pmd_cnxk_mempool_is_hwpool(struct rte_mempool *mp)
+{
+   return !!(CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_IS_HWPOOL);
+}
+
+int
+rte_pmd_cnxk_mempool_range_check_disable(struct rte_mempool *mp)
+{
+   if (rte_pmd_cnxk_mempool_is_hwpool(mp)) {
+   /* Disable only aura range check for hardware pools */
+   roc_npa_aura_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+   CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+   mp = CNXK_MEMPOOL_CONFIG(mp);
+   }
+
+   /* No need to disable again if already disabled */
+   if (CNXK_MEMPOOL_FLAGS(mp) & CNXK_MEMPOOL_F_NO_RANGE_CHECK)
+   return 0;
+
+   /* Disable aura/pool range check */
+   roc_npa_pool_op_range_set(mp->pool_id, CN10K_NPA_IOVA_RANGE_MIN,
+ CN10K_NPA_IOVA_RANGE_MAX);
+   if (roc_npa_pool_range_update_check(mp->pool_id) < 0)
+   return -EBUSY;
+
+   CNXK_MEMPOOL_SET_FLAGS(mp, CNXK_MEMPOOL_F_NO_RANGE_CHECK);
+   return 0;
+}
+
 static struct rte_mempool_

[PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs

2023-05-23 Thread Ashwin Sekhar T K
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.

Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.

The existing aura range set functionality has been moved
as a pool range set API.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_nix_queue.c |  2 +-
 drivers/common/cnxk/roc_npa.c   | 35 -
 drivers/common/cnxk/roc_npa.h   |  6 +
 drivers/common/cnxk/roc_sso.c   |  2 +-
 drivers/common/cnxk/version.map |  2 ++
 drivers/mempool/cnxk/cnxk_mempool_ops.c |  2 +-
 6 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c 
b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct 
roc_nix_sq *sq)
goto npa_fail;
}
 
-   roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+   roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
 }
 
 void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
  uint64_t end_iova)
 {
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
 
+   /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
 
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
 }
 
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+   uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_lf *lf = idev_npa_obj_get();
+   struct npa_aura_lim *lim;
+
+   PLT_ASSERT(lf);
+   lim = lf->aura_lim;
+
+   /* Change only the bookkeeping in software */
+   lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+   lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+   uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_aura_lim *lim;
+   struct npa_lf *lf;
+
+   lf = idev_npa_obj_get();
+   PLT_ASSERT(lf);
+
+   lim = lf->aura_lim;
+   *start_iova = lim[aura_id].ptr_start;
+   *end_iova = lim[aura_id].ptr_end;
+}
+
 static int
 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s 
*aura,
   struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t 
aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 uint64_t start_iova,
 uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+uint64_t *start_iova,
+uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+uint64_t start_iova,
+uint64_t end_iova);
 int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
  struct npa_aura_s *aura, int pool_id,
  uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct 
roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
-   roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+   roc_npa_pool_op_range_set(xa

[PATCH 1/3] ip_frag: optimize key compare and hash generation

2023-05-23 Thread pbhagavatula
From: Pavan Nikhilesh 

Use optimized rte_hash_k32_cmp_eq routine for key comparison for
x86 and ARM64.
Use CRC instructions for hash generation on ARM64.

Signed-off-by: Pavan Nikhilesh 
---
 lib/hash/rte_cmp_arm64.h   | 16 
 lib/hash/rte_cmp_x86.h | 16 
 lib/ip_frag/ip_frag_common.h   | 17 ++---
 lib/ip_frag/ip_frag_internal.c |  4 ++--
 4 files changed, 28 insertions(+), 25 deletions(-)

diff --git a/lib/hash/rte_cmp_arm64.h b/lib/hash/rte_cmp_arm64.h
index e9e26f9abd..a3e85635eb 100644
--- a/lib/hash/rte_cmp_arm64.h
+++ b/lib/hash/rte_cmp_arm64.h
@@ -3,7 +3,7 @@
  */
 
 /* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
-static int
+static inline int
 rte_hash_k16_cmp_eq(const void *key1, const void *key2,
size_t key_len __rte_unused)
 {
@@ -24,7 +24,7 @@ rte_hash_k16_cmp_eq(const void *key1, const void *key2,
return !(x0 == 0 && x1 == 0);
 }
 
-static int
+static inline int
 rte_hash_k32_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -32,7 +32,7 @@ rte_hash_k32_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 16, key_len);
 }
 
-static int
+static inline int
 rte_hash_k48_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -42,7 +42,7 @@ rte_hash_k48_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }
 
-static int
+static inline int
 rte_hash_k64_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k32_cmp_eq(key1, key2, key_len) ||
@@ -50,7 +50,7 @@ rte_hash_k64_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }
 
-static int
+static inline int
 rte_hash_k80_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -58,7 +58,7 @@ rte_hash_k80_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 64, key_len);
 }
 
-static int
+static inline int
 rte_hash_k96_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -66,7 +66,7 @@ rte_hash_k96_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 64, key_len);
 }
 
-static int
+static inline int
 rte_hash_k112_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -76,7 +76,7 @@ rte_hash_k112_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 96, key_len);
 }
 
-static int
+static inline int
 rte_hash_k128_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
diff --git a/lib/hash/rte_cmp_x86.h b/lib/hash/rte_cmp_x86.h
index 13a5836351..ddfbef462f 100644
--- a/lib/hash/rte_cmp_x86.h
+++ b/lib/hash/rte_cmp_x86.h
@@ -5,7 +5,7 @@
 #include 
 
 /* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
-static int
+static inline int
 rte_hash_k16_cmp_eq(const void *key1, const void *key2, size_t key_len 
__rte_unused)
 {
const __m128i k1 = _mm_loadu_si128((const __m128i *) key1);
@@ -15,7 +15,7 @@ rte_hash_k16_cmp_eq(const void *key1, const void *key2, 
size_t key_len __rte_unu
return !_mm_test_all_zeros(x, x);
 }
 
-static int
+static inline int
 rte_hash_k32_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -23,7 +23,7 @@ rte_hash_k32_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 16, key_len);
 }
 
-static int
+static inline int
 rte_hash_k48_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -33,7 +33,7 @@ rte_hash_k48_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }
 
-static int
+static inline int
 rte_hash_k64_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k32_cmp_eq(key1, key2, key_len) ||
@@ -41,7 +41,7 @@ rte_hash_k64_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }
 
-static int
+static inline int
 rte_hash_k80_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -49,7 +49,7 @@ rte_hash_k80_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) k

[PATCH 3/3] test: add reassembly perf test

2023-05-23 Thread pbhagavatula
From: Pavan Nikhilesh 

Add reassembly perf autotest for both ipv4 and ipv6 reassembly.
Each test is performed with variable number of fragments per flow,
either ordered or unordered fragments and interleaved flows.

Signed-off-by: Pavan Nikhilesh 
---
 v4 Changes:
 - Rebase to master.
 v3 Changes:
 - Fix checkpatch issues.
 v2 Changes
 - Rebase to master, reduce memory consumption, set default mempool ops
 to ring_mp_mc.

 app/test/meson.build|2 +
 app/test/test_reassembly_perf.c | 1001 +++
 2 files changed, 1003 insertions(+)
 create mode 100644 app/test/test_reassembly_perf.c

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..8cc4f03db8 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -108,6 +108,7 @@ test_sources = files(
 'test_rawdev.c',
 'test_rcu_qsbr.c',
 'test_rcu_qsbr_perf.c',
+'test_reassembly_perf.c',
 'test_reciprocal_division.c',
 'test_reciprocal_division_perf.c',
 'test_red.c',
@@ -297,6 +298,7 @@ perf_test_names = [
 'trace_perf_autotest',
 'ipsec_perf_autotest',
 'thash_perf_autotest',
+'reassembly_perf_autotest',
 ]

 driver_test_names = [
diff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c
new file mode 100644
index 00..850485a9c5
--- /dev/null
+++ b/app/test/test_reassembly_perf.c
@@ -0,0 +1,1001 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Marvell.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "test.h"
+
+#define MAX_FLOWS  (1024 * 32)
+#define MAX_BKTS   MAX_FLOWS
+#define MAX_ENTRIES_PER_BKT 16
+#define MAX_FRAGMENTS  RTE_LIBRTE_IP_FRAG_MAX_FRAG
+#define MIN_FRAGMENTS  2
+#define MAX_PKTS   (MAX_FLOWS * MAX_FRAGMENTS)
+
+#define MAX_PKT_LEN 2048
+#define MAX_TTL_MS  (5 * MS_PER_S)
+
+/* use RFC863 Discard Protocol */
+#define UDP_SRC_PORT 9
+#define UDP_DST_PORT 9
+
+/* use RFC5735 / RFC2544 reserved network test addresses */
+#define IP_SRC_ADDR(x) ((198U << 24) | (18 << 16) | (0 << 8) | (x))
+#define IP_DST_ADDR(x) ((198U << 24) | (18 << 16) | (1 << 8) | (x))
+
+/* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */
+static uint8_t ip6_addr[16] = {32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0};
+#define IP6_VERSION 6
+
+#define IP_DEFTTL 64 /* from RFC 1340. */
+
+static struct rte_ip_frag_tbl *frag_tbl;
+static struct rte_mempool *pkt_pool;
+static struct rte_mbuf *mbufs[MAX_FLOWS][MAX_FRAGMENTS];
+static uint8_t frag_per_flow[MAX_FLOWS];
+static uint32_t flow_cnt;
+
+#define FILL_MODE_LINEAR  0
+#define FILL_MODE_RANDOM  1
+#define FILL_MODE_INTERLEAVED 2
+
+static int
+reassembly_test_setup(void)
+{
+   uint64_t max_ttl_cyc = (MAX_TTL_MS * rte_get_timer_hz()) / 1E3;
+
+   frag_tbl = rte_ip_frag_table_create(MAX_FLOWS, MAX_ENTRIES_PER_BKT,
+   MAX_FLOWS * MAX_ENTRIES_PER_BKT,
+   max_ttl_cyc, rte_socket_id());
+   if (frag_tbl == NULL)
+   return TEST_FAILED;
+
+   rte_mbuf_set_user_mempool_ops("ring_mp_mc");
+   pkt_pool = rte_pktmbuf_pool_create(
+   "reassembly_perf_pool", MAX_FLOWS * MAX_FRAGMENTS, 0, 0,
+   RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+   if (pkt_pool == NULL) {
+   printf("[%s] Failed to create pkt pool\n", __func__);
+   rte_ip_frag_table_destroy(frag_tbl);
+   return TEST_FAILED;
+   }
+
+   return TEST_SUCCESS;
+}
+
+static void
+reassembly_test_teardown(void)
+{
+   if (frag_tbl != NULL)
+   rte_ip_frag_table_destroy(frag_tbl);
+
+   if (pkt_pool != NULL)
+   rte_mempool_free(pkt_pool);
+}
+
+static void
+randomize_array_positions(void **array, uint8_t sz)
+{
+   void *tmp;
+   int i, j;
+
+   if (sz == 2) {
+   tmp = array[0];
+   array[0] = array[1];
+   array[1] = tmp;
+   } else {
+   for (i = sz - 1; i > 0; i--) {
+   j = rte_rand_max(i + 1);
+   tmp = array[i];
+   array[i] = array[j];
+   array[j] = tmp;
+   }
+   }
+}
+
+static void
+reassembly_print_banner(const char *proto_str)
+{
+   printf("+=="
+  "+\n");
+   printf("| %-32s| %-3s : %-58d|\n", proto_str, "Flow Count", MAX_FLOWS);
+   printf("+++=+=+"
+  "+===+\n");
+   printf("%-17s%-17s%-14s%-14s%-25s%-20s\n", "| Fragment Order",
+  "| Fragments/Flow", "| Outstanding", "| Cycles/Flow",
+ 

[PATCH 2/3] ip_frag: improve reassembly lookup performance

2023-05-23 Thread pbhagavatula
From: Pavan Nikhilesh 

Improve reassembly lookup performance by using NEON intrinsics for
key validation.

Signed-off-by: Pavan Nikhilesh 
---
 lib/ip_frag/ip_frag_internal.c   | 224 +--
 lib/ip_frag/ip_reassembly.h  |   6 +
 lib/ip_frag/rte_ip_frag_common.c |  10 ++
 3 files changed, 196 insertions(+), 44 deletions(-)

diff --git a/lib/ip_frag/ip_frag_internal.c b/lib/ip_frag/ip_frag_internal.c
index 7cbef647df..de78a0ed8f 100644
--- a/lib/ip_frag/ip_frag_internal.c
+++ b/lib/ip_frag/ip_frag_internal.c
@@ -4,8 +4,9 @@

 #include 

-#include 
 #include 
+#include 
+#include 

 #include "ip_frag_common.h"

@@ -280,10 +281,166 @@ ip_frag_find(struct rte_ip_frag_tbl *tbl, struct 
rte_ip_frag_death_row *dr,
return pkt;
 }

-struct ip_frag_pkt *
-ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
-   const struct ip_frag_key *key, uint64_t tms,
-   struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
+static inline void
+ip_frag_dbg(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *p,
+   uint32_t list_idx, uint32_t list_cnt)
+{
+   RTE_SET_USED(tbl);
+   RTE_SET_USED(list_idx);
+   RTE_SET_USED(list_cnt);
+   if (p->key.key_len == IPV4_KEYLEN)
+   IP_FRAG_LOG(DEBUG,
+   "%s:%d:\n"
+   "tbl: %p, max_entries: %u, use_entries: %u\n"
+   "ipv4_frag_pkt line0: %p, index: %u from %u\n"
+   "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
+   __func__, __LINE__, tbl, tbl->max_entries,
+   tbl->use_entries, p, list_idx, list_cnt,
+   p->key.src_dst[0], p->key.id, p->start);
+   else
+   IP_FRAG_LOG(DEBUG,
+   "%s:%d:\n"
+   "tbl: %p, max_entries: %u, use_entries: %u\n"
+   "ipv6_frag_pkt line0: %p, index: %u from %u\n"
+   "key: <" IPv6_KEY_BYTES_FMT
+   ", %#x>, start: %" PRIu64 "\n",
+   __func__, __LINE__, tbl, tbl->max_entries,
+   tbl->use_entries, p, list_idx, list_cnt,
+   IPv6_KEY_BYTES(p1[i].key.src_dst), p->key.id,
+   p->start);
+}
+
+#if defined(RTE_ARCH_ARM64)
+static inline struct ip_frag_pkt *
+ip_frag_lookup_neon(struct rte_ip_frag_tbl *tbl, const struct ip_frag_key 
*key, uint64_t tms,
+   struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
+{
+   struct ip_frag_pkt *empty, *old;
+   struct ip_frag_pkt *p1, *p2;
+   uint32_t assoc, sig1, sig2;
+   uint64_t max_cycles;
+
+   empty = NULL;
+   old = NULL;
+
+   max_cycles = tbl->max_cycles;
+   assoc = tbl->bucket_entries;
+
+   if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
+   return tbl->last;
+
+   /* different hashing methods for IPv4 and IPv6 */
+   if (key->key_len == IPV4_KEYLEN)
+   ipv4_frag_hash(key, &sig1, &sig2);
+   else
+   ipv6_frag_hash(key, &sig1, &sig2);
+
+   p1 = IP_FRAG_TBL_POS(tbl, sig1);
+   p2 = IP_FRAG_TBL_POS(tbl, sig2);
+
+   uint64x2_t key0, key1, key2, key3;
+   uint64_t vmask, zmask, ts_mask;
+   uint64x2_t ts0, ts1;
+   uint32x4_t nz_key;
+   uint8_t idx;
+   /* Bucket entries are always power of 2. */
+   rte_prefetch0(&p1[0].key);
+   rte_prefetch0(&p1[1].key);
+   rte_prefetch0(&p2[0].key);
+   rte_prefetch0(&p2[1].key);
+
+   while (assoc > 1) {
+   if (assoc > 2) {
+   rte_prefetch0(&p1[2].key);
+   rte_prefetch0(&p1[3].key);
+   rte_prefetch0(&p2[2].key);
+   rte_prefetch0(&p2[3].key);
+   }
+   struct ip_frag_pkt *p[] = {&p1[0], &p2[0], &p1[1], &p2[1]};
+   key0 = vld1q_u64(&p[0]->key.id_key_len);
+   key1 = vld1q_u64(&p[1]->key.id_key_len);
+   key2 = vld1q_u64(&p[2]->key.id_key_len);
+   key3 = vld1q_u64(&p[3]->key.id_key_len);
+
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key0), 1), nz_key, 0);
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key1), 1), nz_key, 1);
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key2), 1), nz_key, 2);
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key3), 1), nz_key, 3);
+
+   nz_key = vceqzq_u32(nz_key);
+   zmask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(nz_key, 
16)), 0);
+   vmask = ~zmask;
+
+   vmask &= 0x8000800080008000;
+   for (; vmask > 0; vmask &= vmask - 1) {
+   idx = __builtin_ctzll(vmask) >> 4;
+   if (ip_frag_key_cmp(key, &p[idx]->k

[PATCH v1 1/1] eal: warn user when lcore cpuset includes multiple sockets

2023-05-23 Thread Anatoly Burakov
Currently, it is allowed to specify a cpuset for lcores such that it
will include physical cores from different NUMA nodes. This has an
effect of setting `rte_socket_id()` for that lcore to SOCKET_ID_ANY,
so add a warning about it.

Signed-off-by: Anatoly Burakov 
---
 lib/eal/common/eal_common_thread.c | 20 +---
 1 file changed, 17 insertions(+), 3 deletions(-)

diff --git a/lib/eal/common/eal_common_thread.c 
b/lib/eal/common/eal_common_thread.c
index 079a385630..46fd2aca1e 100644
--- a/lib/eal/common/eal_common_thread.c
+++ b/lib/eal/common/eal_common_thread.c
@@ -65,10 +65,13 @@ static void
 thread_update_affinity(rte_cpuset_t *cpusetp)
 {
unsigned int lcore_id = rte_lcore_id();
+   int socket_id;
+
+   /* find socket ID from cpuset */
+   socket_id = eal_cpuset_socket_id(cpusetp);
 
/* store socket_id in TLS for quick access */
-   RTE_PER_LCORE(_socket_id) =
-   eal_cpuset_socket_id(cpusetp);
+   RTE_PER_LCORE(_socket_id) = socket_id;
 
/* store cpuset in TLS for quick access */
memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
@@ -76,9 +79,20 @@ thread_update_affinity(rte_cpuset_t *cpusetp)
 
if (lcore_id != (unsigned)LCORE_ID_ANY) {
/* EAL thread will update lcore_config */
-   lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
+   lcore_config[lcore_id].socket_id = socket_id;
memmove(&lcore_config[lcore_id].cpuset, cpusetp,
sizeof(rte_cpuset_t));
+   
+   /*
+* lcore_id is not LCORE_ID_ANY, meaning this is a DPDK lcore,
+* so having a valid NUMA affinity for this lcore is important.
+* However, if cpuset includes cores from multiple NUMA nodes,
+* the socket ID will be set to SOCKET_ID_ANY. Notify the user
+* about it if that happens.
+*/
+   if (socket_id == SOCKET_ID_ANY)
+   RTE_LOG(INFO, EAL, "DPDK lcore %u has NUMA affinity set 
to SOCKET_ID_ANY\n",
+   lcore_id);
}
 }
 
-- 
2.37.2



[PATCH v2 3/5] mempool/cnxk: add NPA aura range get/set APIs

2023-05-23 Thread Ashwin Sekhar T K
Current APIs to set range on auras modifies both the
aura range limits in software and pool range limits
in NPA hardware.

Newly added ROC APIs allow to set/get aura range limits
in software alone without modifying hardware.

The existing aura range set functionality has been moved
as a pool range set API.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_nix_queue.c |  2 +-
 drivers/common/cnxk/roc_npa.c   | 35 -
 drivers/common/cnxk/roc_npa.h   |  6 +
 drivers/common/cnxk/roc_sso.c   |  2 +-
 drivers/common/cnxk/version.map |  2 ++
 drivers/mempool/cnxk/cnxk_mempool_ops.c |  2 +-
 6 files changed, 45 insertions(+), 4 deletions(-)

diff --git a/drivers/common/cnxk/roc_nix_queue.c 
b/drivers/common/cnxk/roc_nix_queue.c
index 21bfe7d498..ac4d9856c1 100644
--- a/drivers/common/cnxk/roc_nix_queue.c
+++ b/drivers/common/cnxk/roc_nix_queue.c
@@ -1050,7 +1050,7 @@ sqb_pool_populate(struct roc_nix *roc_nix, struct 
roc_nix_sq *sq)
goto npa_fail;
}
 
-   roc_npa_aura_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
+   roc_npa_pool_op_range_set(sq->aura_handle, (uint64_t)sq->sqe_mem, iova);
roc_npa_aura_limit_modify(sq->aura_handle, nb_sqb_bufs);
sq->aura_sqb_bufs = nb_sqb_bufs;
 
diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index e3c925ddd1..3b0f95a304 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -18,7 +18,7 @@ roc_npa_lf_init_cb_register(roc_npa_lf_init_cb_t cb)
 }
 
 void
-roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+roc_npa_pool_op_range_set(uint64_t aura_handle, uint64_t start_iova,
  uint64_t end_iova)
 {
const uint64_t start = roc_npa_aura_handle_to_base(aura_handle) +
@@ -32,6 +32,7 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
PLT_ASSERT(lf);
lim = lf->aura_lim;
 
+   /* Change the range bookkeeping in software as well as in hardware */
lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
 
@@ -39,6 +40,38 @@ roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t 
start_iova,
roc_store_pair(lim[reg].ptr_end, reg, end);
 }
 
+void
+roc_npa_aura_op_range_set(uint64_t aura_handle, uint64_t start_iova,
+ uint64_t end_iova)
+{
+   uint64_t reg = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_lf *lf = idev_npa_obj_get();
+   struct npa_aura_lim *lim;
+
+   PLT_ASSERT(lf);
+   lim = lf->aura_lim;
+
+   /* Change only the bookkeeping in software */
+   lim[reg].ptr_start = PLT_MIN(lim[reg].ptr_start, start_iova);
+   lim[reg].ptr_end = PLT_MAX(lim[reg].ptr_end, end_iova);
+}
+
+void
+roc_npa_aura_op_range_get(uint64_t aura_handle, uint64_t *start_iova,
+ uint64_t *end_iova)
+{
+   uint64_t aura_id = roc_npa_aura_handle_to_aura(aura_handle);
+   struct npa_aura_lim *lim;
+   struct npa_lf *lf;
+
+   lf = idev_npa_obj_get();
+   PLT_ASSERT(lf);
+
+   lim = lf->aura_lim;
+   *start_iova = lim[aura_id].ptr_start;
+   *end_iova = lim[aura_id].ptr_end;
+}
+
 static int
 npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s 
*aura,
   struct npa_pool_s *pool)
diff --git a/drivers/common/cnxk/roc_npa.h b/drivers/common/cnxk/roc_npa.h
index df15dabe92..21608a40d9 100644
--- a/drivers/common/cnxk/roc_npa.h
+++ b/drivers/common/cnxk/roc_npa.h
@@ -732,6 +732,12 @@ int __roc_api roc_npa_pool_range_update_check(uint64_t 
aura_handle);
 void __roc_api roc_npa_aura_op_range_set(uint64_t aura_handle,
 uint64_t start_iova,
 uint64_t end_iova);
+void __roc_api roc_npa_aura_op_range_get(uint64_t aura_handle,
+uint64_t *start_iova,
+uint64_t *end_iova);
+void __roc_api roc_npa_pool_op_range_set(uint64_t aura_handle,
+uint64_t start_iova,
+uint64_t end_iova);
 int __roc_api roc_npa_aura_create(uint64_t *aura_handle, uint32_t block_count,
  struct npa_aura_s *aura, int pool_id,
  uint32_t flags);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index 4a6a5080f7..c376bd837f 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -523,7 +523,7 @@ sso_hwgrp_init_xaq_aura(struct dev *dev, struct 
roc_sso_xaq_data *xaq,
roc_npa_aura_op_free(xaq->aura_handle, 0, iova);
iova += xaq_buf_size;
}
-   roc_npa_aura_op_range_set(xaq->aura_handle, (uint64_t)xaq->mem, iova);
+   roc_npa_pool_op_range_set(xa

[PATCH v2 2/5] common/cnxk: add NPA aura create/destroy ROC APIs

2023-05-23 Thread Ashwin Sekhar T K
Add ROC APIs which allows to create NPA auras independently and
attach it to an existing NPA pool. Also add API to destroy
NPA auras independently.

Signed-off-by: Ashwin Sekhar T K 
---
 drivers/common/cnxk/roc_npa.c   | 219 
 drivers/common/cnxk/roc_npa.h   |   4 +
 drivers/common/cnxk/version.map |   2 +
 3 files changed, 225 insertions(+)

diff --git a/drivers/common/cnxk/roc_npa.c b/drivers/common/cnxk/roc_npa.c
index 20637fbf65..e3c925ddd1 100644
--- a/drivers/common/cnxk/roc_npa.c
+++ b/drivers/common/cnxk/roc_npa.c
@@ -85,6 +85,36 @@ npa_aura_pool_init(struct mbox *m_box, uint32_t aura_id, 
struct npa_aura_s *aura
return rc;
 }
 
+static int
+npa_aura_init(struct mbox *m_box, uint32_t aura_id, struct npa_aura_s *aura)
+{
+   struct npa_aq_enq_req *aura_init_req;
+   struct npa_aq_enq_rsp *aura_init_rsp;
+   struct mbox *mbox;
+   int rc = -ENOSPC;
+
+   mbox = mbox_get(m_box);
+   aura_init_req = mbox_alloc_msg_npa_aq_enq(mbox);
+   if (aura_init_req == NULL)
+   goto exit;
+   aura_init_req->aura_id = aura_id;
+   aura_init_req->ctype = NPA_AQ_CTYPE_AURA;
+   aura_init_req->op = NPA_AQ_INSTOP_INIT;
+   mbox_memcpy(&aura_init_req->aura, aura, sizeof(*aura));
+
+   rc = mbox_process_msg(mbox, (void **)&aura_init_rsp);
+   if (rc < 0)
+   goto exit;
+
+   if (aura_init_rsp->hdr.rc == 0)
+   rc = 0;
+   else
+   rc = NPA_ERR_AURA_POOL_INIT;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
 static int
 npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, uint64_t aura_handle)
 {
@@ -156,6 +186,54 @@ npa_aura_pool_fini(struct mbox *m_box, uint32_t aura_id, 
uint64_t aura_handle)
return rc;
 }
 
+static int
+npa_aura_fini(struct mbox *m_box, uint32_t aura_id)
+{
+   struct npa_aq_enq_req *aura_req;
+   struct npa_aq_enq_rsp *aura_rsp;
+   struct ndc_sync_op *ndc_req;
+   struct mbox *mbox;
+   int rc = -ENOSPC;
+
+   /* Procedure for disabling an aura/pool */
+   plt_delay_us(10);
+
+   mbox = mbox_get(m_box);
+   aura_req = mbox_alloc_msg_npa_aq_enq(mbox);
+   if (aura_req == NULL)
+   goto exit;
+   aura_req->aura_id = aura_id;
+   aura_req->ctype = NPA_AQ_CTYPE_AURA;
+   aura_req->op = NPA_AQ_INSTOP_WRITE;
+   aura_req->aura.ena = 0;
+   aura_req->aura_mask.ena = ~aura_req->aura_mask.ena;
+
+   rc = mbox_process_msg(mbox, (void **)&aura_rsp);
+   if (rc < 0)
+   goto exit;
+
+   if (aura_rsp->hdr.rc != 0)
+   return NPA_ERR_AURA_POOL_FINI;
+
+   /* Sync NDC-NPA for LF */
+   ndc_req = mbox_alloc_msg_ndc_sync_op(mbox);
+   if (ndc_req == NULL) {
+   rc = -ENOSPC;
+   goto exit;
+   }
+   ndc_req->npa_lf_sync = 1;
+   rc = mbox_process(mbox);
+   if (rc) {
+   plt_err("Error on NDC-NPA LF sync, rc %d", rc);
+   rc = NPA_ERR_AURA_POOL_FINI;
+   goto exit;
+   }
+   rc = 0;
+exit:
+   mbox_put(mbox);
+   return rc;
+}
+
 int
 roc_npa_pool_op_pc_reset(uint64_t aura_handle)
 {
@@ -493,6 +571,108 @@ roc_npa_pool_create(uint64_t *aura_handle, uint32_t 
block_size,
return rc;
 }
 
+static int
+npa_aura_alloc(struct npa_lf *lf, const uint32_t block_count, int pool_id,
+  struct npa_aura_s *aura, uint64_t *aura_handle, uint32_t flags)
+{
+   int rc, aura_id;
+
+   /* Sanity check */
+   if (!lf || !aura || !aura_handle)
+   return NPA_ERR_PARAM;
+
+   roc_npa_dev_lock();
+   /* Get aura_id from resource bitmap */
+   aura_id = find_free_aura(lf, flags);
+   if (aura_id < 0) {
+   roc_npa_dev_unlock();
+   return NPA_ERR_AURA_ID_ALLOC;
+   }
+
+   /* Mark aura as reserved */
+   plt_bitmap_clear(lf->npa_bmp, aura_id);
+
+   roc_npa_dev_unlock();
+   rc = (aura_id < 0 || pool_id >= (int)lf->nr_pools ||
+ aura_id >= (int)BIT_ULL(6 + lf->aura_sz)) ?
+   NPA_ERR_AURA_ID_ALLOC :
+   0;
+   if (rc)
+   goto exit;
+
+   /* Update aura fields */
+   aura->pool_addr = pool_id; /* AF will translate to associated poolctx */
+   aura->ena = 1;
+   aura->shift = plt_log2_u32(block_count);
+   aura->shift = aura->shift < 8 ? 0 : aura->shift - 8;
+   aura->limit = block_count;
+   aura->pool_caching = 1;
+   aura->err_int_ena = BIT(NPA_AURA_ERR_INT_AURA_ADD_OVER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_ADD_UNDER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_AURA_FREE_UNDER);
+   aura->err_int_ena |= BIT(NPA_AURA_ERR_INT_POOL_DIS);
+   aura->avg_con = 0;
+   /* Many to one reduction */
+   aura->err_qint_idx = aura_id % lf->qints;
+
+   /* Issue AURA_INIT and POOL_INIT op */
+   rc = npa_aura_init(lf->mbox, aura_

[Bug 1237] QAT Crypto PMD requires QAT compress PMD for no good reasons

2023-05-23 Thread bugzilla
https://bugs.dpdk.org/show_bug.cgi?id=1237

Bug ID: 1237
   Summary: QAT Crypto PMD requires QAT compress PMD for no good
reasons
   Product: DPDK
   Version: 23.03
  Hardware: All
OS: All
Status: UNCONFIRMED
  Severity: normal
  Priority: Normal
 Component: cryptodev
  Assignee: dev@dpdk.org
  Reporter: maxime.coque...@redhat.com
  Target Milestone: ---

It is currently impossible to build DPDK with QAT Crypto PMD without selecting
the QAT Compress PMD [0]:
"
# The driver should not build if both compression and crypto are disabled
#FIXME common code depends on compression files so check only compress!
if not qat_compress # and not qat_crypto
build = false
reason = '' # rely on reason for compress/crypto above
subdir_done()
endif
"

It should be possible to only select QAT Crypto PMD, especially since
compressdev APIs are all experimental.

[0]: https://git.dpdk.org/dpdk/tree/drivers/common/qat/meson.build#n58

-- 
You are receiving this mail because:
You are the assignee for the bug.

Re: [PATCH v1 1/1] eal: warn user when lcore cpuset includes multiple sockets

2023-05-23 Thread Bruce Richardson
On Tue, May 23, 2023 at 01:06:16PM +, Anatoly Burakov wrote:
> Currently, it is allowed to specify a cpuset for lcores such that it
> will include physical cores from different NUMA nodes. This has an
> effect of setting `rte_socket_id()` for that lcore to SOCKET_ID_ANY,
> so add a warning about it.
> 
> Signed-off-by: Anatoly Burakov 
> ---
>  lib/eal/common/eal_common_thread.c | 20 +---
>  1 file changed, 17 insertions(+), 3 deletions(-)
> 
> diff --git a/lib/eal/common/eal_common_thread.c 
> b/lib/eal/common/eal_common_thread.c
> index 079a385630..46fd2aca1e 100644
> --- a/lib/eal/common/eal_common_thread.c
> +++ b/lib/eal/common/eal_common_thread.c
> @@ -65,10 +65,13 @@ static void
>  thread_update_affinity(rte_cpuset_t *cpusetp)
>  {
>   unsigned int lcore_id = rte_lcore_id();
> + int socket_id;
> +
> + /* find socket ID from cpuset */
> + socket_id = eal_cpuset_socket_id(cpusetp);
>  
>   /* store socket_id in TLS for quick access */
> - RTE_PER_LCORE(_socket_id) =
> - eal_cpuset_socket_id(cpusetp);
> + RTE_PER_LCORE(_socket_id) = socket_id;
>  
>   /* store cpuset in TLS for quick access */
>   memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
> @@ -76,9 +79,20 @@ thread_update_affinity(rte_cpuset_t *cpusetp)
>  
>   if (lcore_id != (unsigned)LCORE_ID_ANY) {
>   /* EAL thread will update lcore_config */
> - lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
> + lcore_config[lcore_id].socket_id = socket_id;
>   memmove(&lcore_config[lcore_id].cpuset, cpusetp,
>   sizeof(rte_cpuset_t));
> + 
> + /*
> +  * lcore_id is not LCORE_ID_ANY, meaning this is a DPDK lcore,
> +  * so having a valid NUMA affinity for this lcore is important.
> +  * However, if cpuset includes cores from multiple NUMA nodes,
> +  * the socket ID will be set to SOCKET_ID_ANY. Notify the user
> +  * about it if that happens.
> +  */
> + if (socket_id == SOCKET_ID_ANY)
> + RTE_LOG(INFO, EAL, "DPDK lcore %u has NUMA affinity set 
> to SOCKET_ID_ANY\n",
> + lcore_id);
>   }
>  }
While having a warning comment in the code is good, should we not also have
a user visible warning to the user when the specific a corelist parameter
value which includes the cross-socket scenario?

/Bruce


Re: [PATCH v3] crypto/qat: default to IPsec MB for pre and post computes

2023-05-23 Thread Ji, Kai
Acked-by: Kai Ji 


From: Dooley, Brian 
Sent: Monday, May 22, 2023 15:39
To: Ji, Kai 
Cc: dev@dpdk.org ; gak...@marvell.com ; 
Dooley, Brian 
Subject: [PATCH v3] crypto/qat: default to IPsec MB for pre and post computes

Pre and post computations currently use the OpenSSL library by default.
This patch changes the default option to Intel IPsec MB library version
1.4 for the required computations. If this version of IPsec is not met
it will fallback to use OpenSSL.

Added version checks for libipsecmb and libcrypto into meson build.
Added directives for detecting IPsec MB or OpenSSL.

Signed-off-by: Brian Dooley 
---


Re: [PATCH v1 1/1] eal: warn user when lcore cpuset includes multiple sockets

2023-05-23 Thread Burakov, Anatoly

On 5/23/2023 2:55 PM, Bruce Richardson wrote:

On Tue, May 23, 2023 at 01:06:16PM +, Anatoly Burakov wrote:

Currently, it is allowed to specify a cpuset for lcores such that it
will include physical cores from different NUMA nodes. This has an
effect of setting `rte_socket_id()` for that lcore to SOCKET_ID_ANY,
so add a warning about it.

Signed-off-by: Anatoly Burakov 
---
  lib/eal/common/eal_common_thread.c | 20 +---
  1 file changed, 17 insertions(+), 3 deletions(-)

diff --git a/lib/eal/common/eal_common_thread.c 
b/lib/eal/common/eal_common_thread.c
index 079a385630..46fd2aca1e 100644
--- a/lib/eal/common/eal_common_thread.c
+++ b/lib/eal/common/eal_common_thread.c
@@ -65,10 +65,13 @@ static void
  thread_update_affinity(rte_cpuset_t *cpusetp)
  {
unsigned int lcore_id = rte_lcore_id();
+   int socket_id;
+
+   /* find socket ID from cpuset */
+   socket_id = eal_cpuset_socket_id(cpusetp);
  
  	/* store socket_id in TLS for quick access */

-   RTE_PER_LCORE(_socket_id) =
-   eal_cpuset_socket_id(cpusetp);
+   RTE_PER_LCORE(_socket_id) = socket_id;
  
  	/* store cpuset in TLS for quick access */

memmove(&RTE_PER_LCORE(_cpuset), cpusetp,
@@ -76,9 +79,20 @@ thread_update_affinity(rte_cpuset_t *cpusetp)
  
  	if (lcore_id != (unsigned)LCORE_ID_ANY) {

/* EAL thread will update lcore_config */
-   lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id);
+   lcore_config[lcore_id].socket_id = socket_id;
memmove(&lcore_config[lcore_id].cpuset, cpusetp,
sizeof(rte_cpuset_t));
+   
+   /*
+* lcore_id is not LCORE_ID_ANY, meaning this is a DPDK lcore,
+* so having a valid NUMA affinity for this lcore is important.
+* However, if cpuset includes cores from multiple NUMA nodes,
+* the socket ID will be set to SOCKET_ID_ANY. Notify the user
+* about it if that happens.
+*/
+   if (socket_id == SOCKET_ID_ANY)
+   RTE_LOG(INFO, EAL, "DPDK lcore %u has NUMA affinity set to 
SOCKET_ID_ANY\n",
+   lcore_id);
}
  }

While having a warning comment in the code is good, should we not also have
a user visible warning to the user when the specific a corelist parameter
value which includes the cross-socket scenario?

/Bruce


We could, yes. We could walk through the cpuset for a given lcore, and 
list their socket ID's, would that work?


--
Thanks,
Anatoly



Re: [PATCH v2] crypto/qat: support to enable insecure algorithms

2023-05-23 Thread Ji, Kai
Acked-by: Kai Ji 


From: Poddar, Vikash ChandraX 
Sent: Tuesday, May 23, 2023 10:14
To: Akhil Goyal ; Fan Zhang ; Ji, 
Kai 
Cc: dev@dpdk.org ; Poddar, Vikash ChandraX 

Subject: [PATCH v2] crypto/qat: support to enable insecure algorithms

All the insecure algorithms are default disable from
cryptodev Gen 1,2,3 and 4.
use qat_legacy_capa to enable all the legacy
algorithms.
These change effects both sym and asym insecure crypto
algorithms.

Signed-off-by: Vikash Poddar 
---
v2:
Extend the support to enable the insecure algorithm in
QAT Gen 1,3 and 4 for sym as well as asym.
---



[PATCH v2 2/3] ip_frag: improve reassembly lookup performance

2023-05-23 Thread pbhagavatula
From: Pavan Nikhilesh 

Improve reassembly lookup performance by using NEON intrinsics for
key validation.

Signed-off-by: Pavan Nikhilesh 
---
 lib/ip_frag/ip_frag_internal.c   | 224 +--
 lib/ip_frag/ip_reassembly.h  |   6 +
 lib/ip_frag/rte_ip_frag_common.c |  10 ++
 3 files changed, 196 insertions(+), 44 deletions(-)

diff --git a/lib/ip_frag/ip_frag_internal.c b/lib/ip_frag/ip_frag_internal.c
index 7cbef647df..de78a0ed8f 100644
--- a/lib/ip_frag/ip_frag_internal.c
+++ b/lib/ip_frag/ip_frag_internal.c
@@ -4,8 +4,9 @@
 
 #include 
 
-#include 
 #include 
+#include 
+#include 
 
 #include "ip_frag_common.h"
 
@@ -280,10 +281,166 @@ ip_frag_find(struct rte_ip_frag_tbl *tbl, struct 
rte_ip_frag_death_row *dr,
return pkt;
 }
 
-struct ip_frag_pkt *
-ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
-   const struct ip_frag_key *key, uint64_t tms,
-   struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
+static inline void
+ip_frag_dbg(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *p,
+   uint32_t list_idx, uint32_t list_cnt)
+{
+   RTE_SET_USED(tbl);
+   RTE_SET_USED(list_idx);
+   RTE_SET_USED(list_cnt);
+   if (p->key.key_len == IPV4_KEYLEN)
+   IP_FRAG_LOG(DEBUG,
+   "%s:%d:\n"
+   "tbl: %p, max_entries: %u, use_entries: %u\n"
+   "ipv4_frag_pkt line0: %p, index: %u from %u\n"
+   "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
+   __func__, __LINE__, tbl, tbl->max_entries,
+   tbl->use_entries, p, list_idx, list_cnt,
+   p->key.src_dst[0], p->key.id, p->start);
+   else
+   IP_FRAG_LOG(DEBUG,
+   "%s:%d:\n"
+   "tbl: %p, max_entries: %u, use_entries: %u\n"
+   "ipv6_frag_pkt line0: %p, index: %u from %u\n"
+   "key: <" IPv6_KEY_BYTES_FMT
+   ", %#x>, start: %" PRIu64 "\n",
+   __func__, __LINE__, tbl, tbl->max_entries,
+   tbl->use_entries, p, list_idx, list_cnt,
+   IPv6_KEY_BYTES(p1[i].key.src_dst), p->key.id,
+   p->start);
+}
+
+#if defined(RTE_ARCH_ARM64)
+static inline struct ip_frag_pkt *
+ip_frag_lookup_neon(struct rte_ip_frag_tbl *tbl, const struct ip_frag_key 
*key, uint64_t tms,
+   struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
+{
+   struct ip_frag_pkt *empty, *old;
+   struct ip_frag_pkt *p1, *p2;
+   uint32_t assoc, sig1, sig2;
+   uint64_t max_cycles;
+
+   empty = NULL;
+   old = NULL;
+
+   max_cycles = tbl->max_cycles;
+   assoc = tbl->bucket_entries;
+
+   if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
+   return tbl->last;
+
+   /* different hashing methods for IPv4 and IPv6 */
+   if (key->key_len == IPV4_KEYLEN)
+   ipv4_frag_hash(key, &sig1, &sig2);
+   else
+   ipv6_frag_hash(key, &sig1, &sig2);
+
+   p1 = IP_FRAG_TBL_POS(tbl, sig1);
+   p2 = IP_FRAG_TBL_POS(tbl, sig2);
+
+   uint64x2_t key0, key1, key2, key3;
+   uint64_t vmask, zmask, ts_mask;
+   uint64x2_t ts0, ts1;
+   uint32x4_t nz_key;
+   uint8_t idx;
+   /* Bucket entries are always power of 2. */
+   rte_prefetch0(&p1[0].key);
+   rte_prefetch0(&p1[1].key);
+   rte_prefetch0(&p2[0].key);
+   rte_prefetch0(&p2[1].key);
+
+   while (assoc > 1) {
+   if (assoc > 2) {
+   rte_prefetch0(&p1[2].key);
+   rte_prefetch0(&p1[3].key);
+   rte_prefetch0(&p2[2].key);
+   rte_prefetch0(&p2[3].key);
+   }
+   struct ip_frag_pkt *p[] = {&p1[0], &p2[0], &p1[1], &p2[1]};
+   key0 = vld1q_u64(&p[0]->key.id_key_len);
+   key1 = vld1q_u64(&p[1]->key.id_key_len);
+   key2 = vld1q_u64(&p[2]->key.id_key_len);
+   key3 = vld1q_u64(&p[3]->key.id_key_len);
+
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key0), 1), nz_key, 0);
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key1), 1), nz_key, 1);
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key2), 1), nz_key, 2);
+   nz_key = 
vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key3), 1), nz_key, 3);
+
+   nz_key = vceqzq_u32(nz_key);
+   zmask = vget_lane_u64(vreinterpret_u64_u16(vshrn_n_u32(nz_key, 
16)), 0);
+   vmask = ~zmask;
+
+   vmask &= 0x8000800080008000;
+   for (; vmask > 0; vmask &= vmask - 1) {
+   idx = __builtin_ctzll(vmask) >> 4;
+   if (ip_frag_key_cmp(key, &p[id

[PATCH v2 1/3] ip_frag: optimize key compare and hash generation

2023-05-23 Thread pbhagavatula
From: Pavan Nikhilesh 

Use optimized rte_hash_k32_cmp_eq routine for key comparison for
x86 and ARM64.
Use CRC instructions for hash generation on ARM64.

Signed-off-by: Pavan Nikhilesh 
---
 v2 Changes:
 - Fix compilation failure with non ARM64/x86 targets

 lib/hash/rte_cmp_arm64.h   | 16 
 lib/hash/rte_cmp_x86.h | 16 
 lib/ip_frag/ip_frag_common.h   | 14 +-
 lib/ip_frag/ip_frag_internal.c |  4 ++--
 4 files changed, 31 insertions(+), 19 deletions(-)

diff --git a/lib/hash/rte_cmp_arm64.h b/lib/hash/rte_cmp_arm64.h
index e9e26f9abd..a3e85635eb 100644
--- a/lib/hash/rte_cmp_arm64.h
+++ b/lib/hash/rte_cmp_arm64.h
@@ -3,7 +3,7 @@
  */

 /* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
-static int
+static inline int
 rte_hash_k16_cmp_eq(const void *key1, const void *key2,
size_t key_len __rte_unused)
 {
@@ -24,7 +24,7 @@ rte_hash_k16_cmp_eq(const void *key1, const void *key2,
return !(x0 == 0 && x1 == 0);
 }

-static int
+static inline int
 rte_hash_k32_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -32,7 +32,7 @@ rte_hash_k32_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 16, key_len);
 }

-static int
+static inline int
 rte_hash_k48_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -42,7 +42,7 @@ rte_hash_k48_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }

-static int
+static inline int
 rte_hash_k64_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k32_cmp_eq(key1, key2, key_len) ||
@@ -50,7 +50,7 @@ rte_hash_k64_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }

-static int
+static inline int
 rte_hash_k80_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -58,7 +58,7 @@ rte_hash_k80_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 64, key_len);
 }

-static int
+static inline int
 rte_hash_k96_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -66,7 +66,7 @@ rte_hash_k96_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 64, key_len);
 }

-static int
+static inline int
 rte_hash_k112_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -76,7 +76,7 @@ rte_hash_k112_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 96, key_len);
 }

-static int
+static inline int
 rte_hash_k128_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
diff --git a/lib/hash/rte_cmp_x86.h b/lib/hash/rte_cmp_x86.h
index 13a5836351..ddfbef462f 100644
--- a/lib/hash/rte_cmp_x86.h
+++ b/lib/hash/rte_cmp_x86.h
@@ -5,7 +5,7 @@
 #include 

 /* Functions to compare multiple of 16 byte keys (up to 128 bytes) */
-static int
+static inline int
 rte_hash_k16_cmp_eq(const void *key1, const void *key2, size_t key_len 
__rte_unused)
 {
const __m128i k1 = _mm_loadu_si128((const __m128i *) key1);
@@ -15,7 +15,7 @@ rte_hash_k16_cmp_eq(const void *key1, const void *key2, 
size_t key_len __rte_unu
return !_mm_test_all_zeros(x, x);
 }

-static int
+static inline int
 rte_hash_k32_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -23,7 +23,7 @@ rte_hash_k32_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 16, key_len);
 }

-static int
+static inline int
 rte_hash_k48_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k16_cmp_eq(key1, key2, key_len) ||
@@ -33,7 +33,7 @@ rte_hash_k48_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }

-static int
+static inline int
 rte_hash_k64_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k32_cmp_eq(key1, key2, key_len) ||
@@ -41,7 +41,7 @@ rte_hash_k64_cmp_eq(const void *key1, const void *key2, 
size_t key_len)
(const char *) key2 + 32, key_len);
 }

-static int
+static inline int
 rte_hash_k80_cmp_eq(const void *key1, const void *key2, size_t key_len)
 {
return rte_hash_k64_cmp_eq(key1, key2, key_len) ||
@@ -49,7 +49,7 @@ rte_hash_k80_cmp_eq(const void *key1, const void *key2, 
size_t key_l

[PATCH v2 3/3] test: add reassembly perf test

2023-05-23 Thread pbhagavatula
From: Pavan Nikhilesh 

Add reassembly perf autotest for both ipv4 and ipv6 reassembly.
Each test is performed with variable number of fragments per flow,
either ordered or unordered fragments and interleaved flows.

Signed-off-by: Pavan Nikhilesh 
---
 app/test/meson.build|2 +
 app/test/test_reassembly_perf.c | 1001 +++
 2 files changed, 1003 insertions(+)
 create mode 100644 app/test/test_reassembly_perf.c

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..8cc4f03db8 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -108,6 +108,7 @@ test_sources = files(
 'test_rawdev.c',
 'test_rcu_qsbr.c',
 'test_rcu_qsbr_perf.c',
+'test_reassembly_perf.c',
 'test_reciprocal_division.c',
 'test_reciprocal_division_perf.c',
 'test_red.c',
@@ -297,6 +298,7 @@ perf_test_names = [
 'trace_perf_autotest',
 'ipsec_perf_autotest',
 'thash_perf_autotest',
+'reassembly_perf_autotest',
 ]
 
 driver_test_names = [
diff --git a/app/test/test_reassembly_perf.c b/app/test/test_reassembly_perf.c
new file mode 100644
index 00..850485a9c5
--- /dev/null
+++ b/app/test/test_reassembly_perf.c
@@ -0,0 +1,1001 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Marvell.
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "test.h"
+
+#define MAX_FLOWS  (1024 * 32)
+#define MAX_BKTS   MAX_FLOWS
+#define MAX_ENTRIES_PER_BKT 16
+#define MAX_FRAGMENTS  RTE_LIBRTE_IP_FRAG_MAX_FRAG
+#define MIN_FRAGMENTS  2
+#define MAX_PKTS   (MAX_FLOWS * MAX_FRAGMENTS)
+
+#define MAX_PKT_LEN 2048
+#define MAX_TTL_MS  (5 * MS_PER_S)
+
+/* use RFC863 Discard Protocol */
+#define UDP_SRC_PORT 9
+#define UDP_DST_PORT 9
+
+/* use RFC5735 / RFC2544 reserved network test addresses */
+#define IP_SRC_ADDR(x) ((198U << 24) | (18 << 16) | (0 << 8) | (x))
+#define IP_DST_ADDR(x) ((198U << 24) | (18 << 16) | (1 << 8) | (x))
+
+/* 2001:0200::/48 is IANA reserved range for IPv6 benchmarking (RFC5180) */
+static uint8_t ip6_addr[16] = {32, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 
0};
+#define IP6_VERSION 6
+
+#define IP_DEFTTL 64 /* from RFC 1340. */
+
+static struct rte_ip_frag_tbl *frag_tbl;
+static struct rte_mempool *pkt_pool;
+static struct rte_mbuf *mbufs[MAX_FLOWS][MAX_FRAGMENTS];
+static uint8_t frag_per_flow[MAX_FLOWS];
+static uint32_t flow_cnt;
+
+#define FILL_MODE_LINEAR  0
+#define FILL_MODE_RANDOM  1
+#define FILL_MODE_INTERLEAVED 2
+
+static int
+reassembly_test_setup(void)
+{
+   uint64_t max_ttl_cyc = (MAX_TTL_MS * rte_get_timer_hz()) / 1E3;
+
+   frag_tbl = rte_ip_frag_table_create(MAX_FLOWS, MAX_ENTRIES_PER_BKT,
+   MAX_FLOWS * MAX_ENTRIES_PER_BKT,
+   max_ttl_cyc, rte_socket_id());
+   if (frag_tbl == NULL)
+   return TEST_FAILED;
+
+   rte_mbuf_set_user_mempool_ops("ring_mp_mc");
+   pkt_pool = rte_pktmbuf_pool_create(
+   "reassembly_perf_pool", MAX_FLOWS * MAX_FRAGMENTS, 0, 0,
+   RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+   if (pkt_pool == NULL) {
+   printf("[%s] Failed to create pkt pool\n", __func__);
+   rte_ip_frag_table_destroy(frag_tbl);
+   return TEST_FAILED;
+   }
+
+   return TEST_SUCCESS;
+}
+
+static void
+reassembly_test_teardown(void)
+{
+   if (frag_tbl != NULL)
+   rte_ip_frag_table_destroy(frag_tbl);
+
+   if (pkt_pool != NULL)
+   rte_mempool_free(pkt_pool);
+}
+
+static void
+randomize_array_positions(void **array, uint8_t sz)
+{
+   void *tmp;
+   int i, j;
+
+   if (sz == 2) {
+   tmp = array[0];
+   array[0] = array[1];
+   array[1] = tmp;
+   } else {
+   for (i = sz - 1; i > 0; i--) {
+   j = rte_rand_max(i + 1);
+   tmp = array[i];
+   array[i] = array[j];
+   array[j] = tmp;
+   }
+   }
+}
+
+static void
+reassembly_print_banner(const char *proto_str)
+{
+   printf("+=="
+  "+\n");
+   printf("| %-32s| %-3s : %-58d|\n", proto_str, "Flow Count", MAX_FLOWS);
+   printf("+++=+=+"
+  "+===+\n");
+   printf("%-17s%-17s%-14s%-14s%-25s%-20s\n", "| Fragment Order",
+  "| Fragments/Flow", "| Outstanding", "| Cycles/Flow",
+  "| Cycles/Fragment insert", "| Cycles/Reassembly |");
+   printf("+++=+=+"
+  "=

Re: [PATCH] eventdev: fix alignment padding

2023-05-23 Thread Jerin Jacob
On Wed, May 17, 2023 at 7:05 PM Morten Brørup  
wrote:
>
> > From: Jerin Jacob [mailto:jerinjac...@gmail.com]
> > Sent: Wednesday, 17 May 2023 15.20
> >
> > On Tue, Apr 18, 2023 at 8:46 PM Mattias Rönnblom
> >  wrote:
> > >
> > > On 2023-04-18 16:07, Morten Brørup wrote:
> > > >> From: Mattias Rönnblom [mailto:mattias.ronnb...@ericsson.com]
> > > >> Sent: Tuesday, 18 April 2023 14.31
> > > >>
> > > >> On 2023-04-18 12:45, Sivaprasad Tummala wrote:
> > > >>> fixed the padding required to align to cacheline size.
> > > >>>
> > > >>
> > > >> What's the point in having this structure cache-line aligned? False
> > > >> sharing is a non-issue, since this is more or less a read only struct.
> > > >>
> > > >> This is not so much a comment on your patch, but the 
> > > >> __rte_cache_aligned
> > > >> attribute.
> > > >
> > > > When the structure is cache aligned, an individual entry in the array 
> > > > does
> > not unnecessarily cross a cache line border. With 16 pointers and aligned, 
> > it
> > uses exactly two cache lines. If unaligned, it may span three cache lines.
> > > >
> > > An *element* in the reserved uint64_t array won't span across two cache
> > > lines, regardless if __rte_cache_aligned is specified or not. You would
> > > need a packed struct for that to occur, plus the reserved array field
> > > being preceded by some appropriately-sized fields.
> > >
> > > The only effect __rte_cache_aligned has on this particular struct is
> > > that if you instantiate the struct on the stack, or as a static
> > > variable, it will be cache-line aligned. That effect you can get by
> > > specifying the attribute when you define the variable, and you will save
> > > some space (by having smaller elements). In this case it doesn't matter
> > > if the array is compact or not, since an application is likely to only
> > > use one of the members in the array.
> > >
> > > It also doesn't matter of the struct is two or three cache lines, as
> > > long as only the first two are used.
> >
> >
> > Discussions stalled at this point.
>
> Not stalled at this point. You seem to have missed my follow-up email 
> clarifying why cache aligning is relevant:
> http://inbox.dpdk.org/dev/98cbd80474fa8b44bf855df32c47dc35d87...@smartserver.smartshare.dk/
>
> But the patch still breaks the ABI, and thus should be postponed to 23.11.

Yes.

>
> >
> > Hi Shiva,
> >
> > Marking this patch as rejected. If you think the other way, Please
> > change patchwork status and let's discuss more here.
>
> I am not taking any action regarding the status of this patch. I will leave 
> that decision to Jerin and Shiva.

It is good to merge.

Shiva,

Please send ABI change notice for this for 23.11 NOW.
Once it is Acked and merged. I will merge the patch for 23.11 release.

I am marking the patch as DEFERRED in patchwork and next release
window it will come as NEW in patchwork.

>
> >
> >
> >
> > >
> > > >>
> > > >>> Fixes: 54f17843a887 ("eventdev: add port maintenance API")
> > > >>> Cc: mattias.ronnb...@ericsson.com
> > > >>>
> > > >>> Signed-off-by: Sivaprasad Tummala 
> > > >>> ---
> > > >>>lib/eventdev/rte_eventdev_core.h | 2 +-
> > > >>>1 file changed, 1 insertion(+), 1 deletion(-)
> > > >>>
> > > >>> diff --git a/lib/eventdev/rte_eventdev_core.h
> > > >> b/lib/eventdev/rte_eventdev_core.h
> > > >>> index c328bdbc82..c27a52ccc0 100644
> > > >>> --- a/lib/eventdev/rte_eventdev_core.h
> > > >>> +++ b/lib/eventdev/rte_eventdev_core.h
> > > >>> @@ -65,7 +65,7 @@ struct rte_event_fp_ops {
> > > >>> /**< PMD Tx adapter enqueue same destination function. */
> > > >>> event_crypto_adapter_enqueue_t ca_enqueue;
> > > >>> /**< PMD Crypto adapter enqueue function. */
> > > >>> -   uintptr_t reserved[6];
> > > >>> +   uintptr_t reserved[5];
> > > >>>} __rte_cache_aligned;
> > > >>>
> > > >>>extern struct rte_event_fp_ops 
> > > >>> rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
> > > >
> > >


RE: [PATCH 1/1] app/test: resolve mbuf_test application failure

2023-05-23 Thread Rakesh Kudurumalla
Ping

Regards,
Rakesh

> -Original Message-
> From: Rakesh Kudurumalla 
> Sent: Wednesday, April 26, 2023 2:58 PM
> To: Olivier Matz 
> Cc: dev@dpdk.org; Jerin Jacob Kollanukkaran ; Nithin
> Kumar Dabilpuram ; Rakesh Kudurumalla
> 
> Subject: [PATCH 1/1] app/test: resolve mbuf_test application failure
> 
> when RTE_ENABLE_ASSERT is defined test_mbuf application is failing
> because we are trying to attach extbuf to a cloned buffer to which external
> mbuf is already attached.This patch fixes the same.
> 
> Signed-off-by: Rakesh Kudurumalla 
> ---
> v2: removed gerrit id
> 
>  app/test/test_mbuf.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c index
> 8d8d3b9386..e2b81db308 100644
> --- a/app/test/test_mbuf.c
> +++ b/app/test/test_mbuf.c
> @@ -2375,6 +2375,7 @@ test_pktmbuf_ext_shinfo_init_helper(struct
> rte_mempool *pktmbuf_pool)
>   GOTO_FAIL("%s: Bad packet length\n", __func__);
> 
>   /* attach the same external buffer to the cloned mbuf */
> + clone->ol_flags = 0;
>   rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
>   ret_shinfo);
>   if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
> --
> 2.25.1



Re: [PATCH 1/1] vfio: Make buildable with MUSL runtime

2023-05-23 Thread Philip Prindeville



> On May 23, 2023, at 5:33 AM, Thomas Monjalon  wrote:
> 
> 22/05/2023 11:27, Burakov, Anatoly:
>> On 5/20/2023 7:07 PM, Philip Prindeville wrote:
>>> From: Philip Prindeville 
>>> 
>>> pread64() and pwrite64() are declared in  in MUSL and
>>> other (i.e. not glibc) C runtimes.
>>> 
>>> Signed-off-by: Philip Prindeville 
>> Acked-by: Anatoly Burakov 
> 
> It was "buildable" already, and I don't understand how.
> In any doubt, I've added Cc: sta...@dpdk.org for backports.
> 
> Applied, thanks.
> 
> 


Yeah, it builds with glibc just fine.  MUSL is another story...



Re: [EXT] Re: [PATCH v2] lib/cryptodev: fix assertion to remove GCC compilation warning

2023-05-23 Thread Stephen Hemminger
On Tue, 23 May 2023 08:12:28 +
Akhil Goyal  wrote:

> > 
> > This could happen if the passed in length to this routine was larger than
> > the amount of data in the mbuf. Should the function check and return an 
> > error?
> > 
> > Panic should only be reserved for seriously corrupted input (like invalid 
> > mbuf).
> > 
> > Also, this is a big enough function that it really should not be inlined.  
> 
> This is a datapath API. RTE_ASSERT is normally not enabled in release build.
> So, this assert is not doing any check for normal scenario.
> We normally avoid these type of error checks in the datapath.
> And while building in debug mode, we need these asserts to give a backtrace 
> also
> To debug the rootcause of the issue.
> 
> I would suggest fixing the assert itself instead of adding a check.
> Current patch will affect performance.
> 
> Agreed, that the function is big for being an inline function,
> but that is what all the datapath APIs are and
> we keep them inline to improve the performance.

Inline is not a magic go fast switch. Turns out that the compilers and cpu's
already do good job with functions.  Using LTO helps too.


RE: [PATCH v2 2/3] ip_frag: improve reassembly lookup performance

2023-05-23 Thread Honnappa Nagarahalli



> -Original Message-
> From: pbhagavat...@marvell.com 
> Sent: Tuesday, May 23, 2023 9:39 AM
> To: jer...@marvell.com; Honnappa Nagarahalli
> ; nd ; Konstantin Ananyev
> 
> Cc: dev@dpdk.org; Pavan Nikhilesh 
> Subject: [PATCH v2 2/3] ip_frag: improve reassembly lookup performance
> 
> From: Pavan Nikhilesh 
> 
> Improve reassembly lookup performance by using NEON intrinsics for key
> validation.
What is the improvement do you see with this?

> 
> Signed-off-by: Pavan Nikhilesh 
> ---
>  lib/ip_frag/ip_frag_internal.c   | 224 +--
>  lib/ip_frag/ip_reassembly.h  |   6 +
>  lib/ip_frag/rte_ip_frag_common.c |  10 ++
>  3 files changed, 196 insertions(+), 44 deletions(-)
> 
> diff --git a/lib/ip_frag/ip_frag_internal.c b/lib/ip_frag/ip_frag_internal.c 
> index
> 7cbef647df..de78a0ed8f 100644
> --- a/lib/ip_frag/ip_frag_internal.c
> +++ b/lib/ip_frag/ip_frag_internal.c
> @@ -4,8 +4,9 @@
> 
>  #include 
> 
> -#include 
>  #include 
> +#include 
> +#include 
> 
>  #include "ip_frag_common.h"
> 
> @@ -280,10 +281,166 @@ ip_frag_find(struct rte_ip_frag_tbl *tbl, struct
> rte_ip_frag_death_row *dr,
>   return pkt;
>  }
> 
> -struct ip_frag_pkt *
> -ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
> - const struct ip_frag_key *key, uint64_t tms,
> - struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
> +static inline void
> +ip_frag_dbg(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *p,
> + uint32_t list_idx, uint32_t list_cnt) {
> + RTE_SET_USED(tbl);
> + RTE_SET_USED(list_idx);
> + RTE_SET_USED(list_cnt);
> + if (p->key.key_len == IPV4_KEYLEN)
> + IP_FRAG_LOG(DEBUG,
> + "%s:%d:\n"
> + "tbl: %p, max_entries: %u, use_entries: %u\n"
> + "ipv4_frag_pkt line0: %p, index: %u from %u\n"
> + "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
> + __func__, __LINE__, tbl, tbl->max_entries,
> + tbl->use_entries, p, list_idx, list_cnt,
> + p->key.src_dst[0], p->key.id, p->start);
> + else
> + IP_FRAG_LOG(DEBUG,
> + "%s:%d:\n"
> + "tbl: %p, max_entries: %u, use_entries: %u\n"
> + "ipv6_frag_pkt line0: %p, index: %u from %u\n"
> + "key: <" IPv6_KEY_BYTES_FMT
> + ", %#x>, start: %" PRIu64 "\n",
> + __func__, __LINE__, tbl, tbl->max_entries,
> + tbl->use_entries, p, list_idx, list_cnt,
> + IPv6_KEY_BYTES(p1[i].key.src_dst), p->key.id,
> + p->start);
> +}
> +
> +#if defined(RTE_ARCH_ARM64)
> +static inline struct ip_frag_pkt *
> +ip_frag_lookup_neon(struct rte_ip_frag_tbl *tbl, const struct ip_frag_key
> *key, uint64_t tms,
> + struct ip_frag_pkt **free, struct ip_frag_pkt **stale) {
> + struct ip_frag_pkt *empty, *old;
> + struct ip_frag_pkt *p1, *p2;
> + uint32_t assoc, sig1, sig2;
> + uint64_t max_cycles;
> +
> + empty = NULL;
> + old = NULL;
> +
> + max_cycles = tbl->max_cycles;
> + assoc = tbl->bucket_entries;
> +
> + if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
> + return tbl->last;
> +
> + /* different hashing methods for IPv4 and IPv6 */
> + if (key->key_len == IPV4_KEYLEN)
> + ipv4_frag_hash(key, &sig1, &sig2);
> + else
> + ipv6_frag_hash(key, &sig1, &sig2);
> +
> + p1 = IP_FRAG_TBL_POS(tbl, sig1);
> + p2 = IP_FRAG_TBL_POS(tbl, sig2);
> +
> + uint64x2_t key0, key1, key2, key3;
> + uint64_t vmask, zmask, ts_mask;
> + uint64x2_t ts0, ts1;
> + uint32x4_t nz_key;
> + uint8_t idx;
> + /* Bucket entries are always power of 2. */
> + rte_prefetch0(&p1[0].key);
> + rte_prefetch0(&p1[1].key);
> + rte_prefetch0(&p2[0].key);
> + rte_prefetch0(&p2[1].key);
> +
> + while (assoc > 1) {
> + if (assoc > 2) {
> + rte_prefetch0(&p1[2].key);
> + rte_prefetch0(&p1[3].key);
> + rte_prefetch0(&p2[2].key);
> + rte_prefetch0(&p2[3].key);
> + }
> + struct ip_frag_pkt *p[] = {&p1[0], &p2[0], &p1[1], &p2[1]};
> + key0 = vld1q_u64(&p[0]->key.id_key_len);
> + key1 = vld1q_u64(&p[1]->key.id_key_len);
> + key2 = vld1q_u64(&p[2]->key.id_key_len);
> + key3 = vld1q_u64(&p[3]->key.id_key_len);
> +
> + nz_key =
> vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key0), 1), nz_key, 0);
> + nz_key =
> vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key1), 1), nz_key, 1);
> + nz_key =
> vsetq_lane_u32(vgetq_lane_u32(vreinterpretq_u32_u64(key2), 1), nz_key, 2);
> + nz_key =
> vsetq_lane_u32(vgetq_l

Re: [PATCH 1/1] vfio: Make buildable with MUSL runtime

2023-05-23 Thread Thomas Monjalon
23/05/2023 17:46, Philip Prindeville:
> 
> > On May 23, 2023, at 5:33 AM, Thomas Monjalon  wrote:
> > 
> > 22/05/2023 11:27, Burakov, Anatoly:
> >> On 5/20/2023 7:07 PM, Philip Prindeville wrote:
> >>> From: Philip Prindeville 
> >>> 
> >>> pread64() and pwrite64() are declared in  in MUSL and
> >>> other (i.e. not glibc) C runtimes.
> >>> 
> >>> Signed-off-by: Philip Prindeville 
> >> Acked-by: Anatoly Burakov 
> > 
> > It was "buildable" already, and I don't understand how.
> > In any doubt, I've added Cc: sta...@dpdk.org for backports.
> > 
> > Applied, thanks.
> 
> Yeah, it builds with glibc just fine.  MUSL is another story...

It is also building on Alpine/musl.
I would like to understand why.
Do you have failure logs and versions?




[PATCH v2] ethdev: fix asynchronous destroy and push tracepoints

2023-05-23 Thread Alexander Kozyrev
The rte_flow_async_destroy() and rte_flow_push() API is
intended to be as fast as possible and tracepoints for
these functions must be marked as fast-path tracepoints.

Fixes: 6679cf21d608 ("ethdev: add trace points")

Signed-off-by: Alexander Kozyrev 
Acked-by: Ankur Dwivedi 
Acked-by: Ori Kam 
---
 lib/ethdev/ethdev_trace.h | 42 +++
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/lib/ethdev/ethdev_trace.h b/lib/ethdev/ethdev_trace.h
index 3dc7d028b8..18bf1a706f 100644
--- a/lib/ethdev/ethdev_trace.h
+++ b/lib/ethdev/ethdev_trace.h
@@ -1582,27 +1582,6 @@ RTE_TRACE_POINT(
rte_trace_point_emit_int(ret);
 )
 
-RTE_TRACE_POINT(
-   rte_flow_trace_async_destroy,
-   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
-   const struct rte_flow_op_attr *op_attr,
-   const struct rte_flow *flow, const void *user_data, int ret),
-   rte_trace_point_emit_u16(port_id);
-   rte_trace_point_emit_u32(queue_id);
-   rte_trace_point_emit_ptr(op_attr);
-   rte_trace_point_emit_ptr(flow);
-   rte_trace_point_emit_ptr(user_data);
-   rte_trace_point_emit_int(ret);
-)
-
-RTE_TRACE_POINT(
-   rte_flow_trace_push,
-   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id, int ret),
-   rte_trace_point_emit_u16(port_id);
-   rte_trace_point_emit_u32(queue_id);
-   rte_trace_point_emit_int(ret);
-)
-
 RTE_TRACE_POINT(
rte_mtr_trace_capabilities_get,
RTE_TRACE_POINT_ARGS(uint16_t port_id,
@@ -2345,6 +2324,27 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_ptr(flow);
 )
 
+RTE_TRACE_POINT_FP(
+   rte_flow_trace_async_destroy,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
+   const struct rte_flow_op_attr *op_attr,
+   const struct rte_flow *flow, const void *user_data, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_trace_point_emit_u32(queue_id);
+   rte_trace_point_emit_ptr(op_attr);
+   rte_trace_point_emit_ptr(flow);
+   rte_trace_point_emit_ptr(user_data);
+   rte_trace_point_emit_int(ret);
+)
+
+RTE_TRACE_POINT_FP(
+   rte_flow_trace_push,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_trace_point_emit_u32(queue_id);
+   rte_trace_point_emit_int(ret);
+)
+
 RTE_TRACE_POINT_FP(
rte_flow_trace_pull,
RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
-- 
2.18.2



RE: [PATCH] ethdev: fix asynchronous destroy and push tracepoints

2023-05-23 Thread Alexander Kozyrev
> Patch doesn't apply cleanly because it can't find
> 'rte_flow_trace_async_update', is there a dependency?

Fixed in v2. The patch was based on my "update rule" commit.


[PATCH v2] ethdev: fix asynchronous destroy and push tracepoints

2023-05-23 Thread Alexander Kozyrev
The rte_flow_async_destroy() and rte_flow_push() API is
intended to be as fast as possible and tracepoints for
these functions must be marked as fast-path tracepoints.

Fixes: 6679cf21d608 ("ethdev: add trace points")

Signed-off-by: Alexander Kozyrev 
Acked-by: Ankur Dwivedi 
Acked-by: Ori Kam 
---
 lib/ethdev/ethdev_trace.h | 42 +++
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/lib/ethdev/ethdev_trace.h b/lib/ethdev/ethdev_trace.h
index 3dc7d028b8..18bf1a706f 100644
--- a/lib/ethdev/ethdev_trace.h
+++ b/lib/ethdev/ethdev_trace.h
@@ -1582,27 +1582,6 @@ RTE_TRACE_POINT(
rte_trace_point_emit_int(ret);
 )
 
-RTE_TRACE_POINT(
-   rte_flow_trace_async_destroy,
-   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
-   const struct rte_flow_op_attr *op_attr,
-   const struct rte_flow *flow, const void *user_data, int ret),
-   rte_trace_point_emit_u16(port_id);
-   rte_trace_point_emit_u32(queue_id);
-   rte_trace_point_emit_ptr(op_attr);
-   rte_trace_point_emit_ptr(flow);
-   rte_trace_point_emit_ptr(user_data);
-   rte_trace_point_emit_int(ret);
-)
-
-RTE_TRACE_POINT(
-   rte_flow_trace_push,
-   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id, int ret),
-   rte_trace_point_emit_u16(port_id);
-   rte_trace_point_emit_u32(queue_id);
-   rte_trace_point_emit_int(ret);
-)
-
 RTE_TRACE_POINT(
rte_mtr_trace_capabilities_get,
RTE_TRACE_POINT_ARGS(uint16_t port_id,
@@ -2345,6 +2324,27 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_ptr(flow);
 )
 
+RTE_TRACE_POINT_FP(
+   rte_flow_trace_async_destroy,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
+   const struct rte_flow_op_attr *op_attr,
+   const struct rte_flow *flow, const void *user_data, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_trace_point_emit_u32(queue_id);
+   rte_trace_point_emit_ptr(op_attr);
+   rte_trace_point_emit_ptr(flow);
+   rte_trace_point_emit_ptr(user_data);
+   rte_trace_point_emit_int(ret);
+)
+
+RTE_TRACE_POINT_FP(
+   rte_flow_trace_push,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_trace_point_emit_u32(queue_id);
+   rte_trace_point_emit_int(ret);
+)
+
 RTE_TRACE_POINT_FP(
rte_flow_trace_pull,
RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
-- 
2.18.2



RE: [PATCH v2 2/3] ip_frag: improve reassembly lookup performance

2023-05-23 Thread Pavan Nikhilesh Bhagavatula
> > -Original Message-
> > From: pbhagavat...@marvell.com 
> > Sent: Tuesday, May 23, 2023 9:39 AM
> > To: jer...@marvell.com; Honnappa Nagarahalli
> > ; nd ; Konstantin
> Ananyev
> > 
> > Cc: dev@dpdk.org; Pavan Nikhilesh 
> > Subject: [PATCH v2 2/3] ip_frag: improve reassembly lookup performance
> >
> > From: Pavan Nikhilesh 
> >
> > Improve reassembly lookup performance by using NEON intrinsics for key
> > validation.
> What is the improvement do you see with this?

On Neoverse-N2 I see around improvement of 300-600c per flow and ~200c per 
insert.

Here are some test results.

Without patch:
+==+
| IPV4| Flow Count : 32768  
   |
+++=+=++===+
| Fragment Order | Fragments/Flow | Outstanding | Cycles/Flow | Cycles/Fragment 
insert | Cycles/Reassembly |
+++=+=++===+
| LINEAR | 2  | 0   | 1244| 919 
   | 114   |
+++=+=++===+
| RANDOM | 2  | 0   | 1653| 968 
   | 128   |
+++=+=++===+
| LINEAR | 3  | 0   | 1379| 503 
   | 110   |
+++=+=++===+
| RANDOM | 3  | 0   | 1613| 520 
   | 139   |
+++=+=++===+
| LINEAR | 8  | 0   | 2030| 199 
   | 190   |
+++=+=++===+
| RANDOM | 8  | 0   | 4393| 309 
   | 402   |
+++=+=++===+
| LINEAR | RANDOM | 0   | 1531| 333 
   | 147   |
+++=+=++===+
| RANDOM | RANDOM | 0   | 2771| 357 
   | 213   |
+++=+=++===+
| LINEAR | 2  | 100 | 1228| 920 
   | 102   |
+++=+=++===+
| LINEAR | 2  | 500 | 1197| 905 
   | 103   |
+++=+=++===+
| LINEAR | 2  | 1000| 1183| 904 
   | 104   |
+++=+=++===+
| LINEAR | 2  | 2000| 1153| 921 
   | 105   |
+++=+=++===+
| LINEAR | 2  | 3000| 1123| 911 
   | 111   |
+++=+=++===+
| LINEAR | 8  | 100 | 829 | 193 
   | 690   |
+++=+=++===+
| LINEAR | 8  | 500 | 830 | 195 
   | 682   |
+++=+=++===+
| LINEAR | 8  | 1000| 817 | 211 
   | 690   |
+++=+=++===+
| LINEAR | 8  | 2000| 819 | 195 
   | 690   |
+++=+=++===+
| LINEAR | 8  | 3000| 823 | 223 
   | 676   |
+===

[PATCH v4] ethdev: add flow rule actions update API

2023-05-23 Thread Alexander Kozyrev
Introduce the new rte_flow_actions_update() API allowing users
to update the action list in the already existing rule.
Flow rules can be updated now without the need to destroy
the rule first and create a new one instead.
A single API call ensures that no packets are lost by
guaranteeing atomicity and flow state correctness.
The rte_flow_async_actions_update() is added as well.
The matcher is not updated, only the action list is.

Signed-off-by: Alexander Kozyrev 
Acked-by: Ori Kam 
---
 doc/guides/prog_guide/rte_flow.rst | 42 +
 doc/guides/rel_notes/release_23_07.rst |  5 +++
 lib/ethdev/ethdev_trace.h  | 29 
 lib/ethdev/ethdev_trace_points.c   |  6 +++
 lib/ethdev/rte_flow.c  | 54 ++
 lib/ethdev/rte_flow.h  | 62 ++
 lib/ethdev/rte_flow_driver.h   | 16 +++
 lib/ethdev/version.map |  4 ++
 8 files changed, 218 insertions(+)

diff --git a/doc/guides/prog_guide/rte_flow.rst 
b/doc/guides/prog_guide/rte_flow.rst
index 32fc45516a..dada2568fe 100644
--- a/doc/guides/prog_guide/rte_flow.rst
+++ b/doc/guides/prog_guide/rte_flow.rst
@@ -3446,6 +3446,31 @@ Return values:
 
 - 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
 
+Update
+~~
+
+Update an existing flow rule with a new set of actions.
+
+.. code-block:: c
+
+   struct rte_flow *
+   rte_flow_actions_update(uint16_t port_id,
+   struct rte_flow *flow,
+   const struct rte_flow_action *actions[],
+   struct rte_flow_error *error);
+
+Arguments:
+
+- ``port_id``: port identifier of Ethernet device.
+- ``flow``: flow rule handle to update.
+- ``actions``: associated actions (list terminated by the END action).
+- ``error``: perform verbose error reporting if not NULL. PMDs initialize
+  this structure in case of error only.
+
+Return values:
+
+- 0 on success, a negative errno value otherwise and ``rte_errno`` is set.
+
 Flush
 ~
 
@@ -3795,6 +3820,23 @@ Enqueueing a flow rule destruction operation is similar 
to simple destruction.
   void *user_data,
   struct rte_flow_error *error);
 
+Enqueue update operation
+~~
+
+Enqueueing a flow rule update operation to replace actions in the existing 
rule.
+
+.. code-block:: c
+
+   int
+   rte_flow_async_actions_update(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow *flow,
+ const struct rte_flow_action actions[],
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error);
+
 Enqueue indirect action creation operation
 ~~
 
diff --git a/doc/guides/rel_notes/release_23_07.rst 
b/doc/guides/rel_notes/release_23_07.rst
index a9b1293689..76377614d2 100644
--- a/doc/guides/rel_notes/release_23_07.rst
+++ b/doc/guides/rel_notes/release_23_07.rst
@@ -55,6 +55,11 @@ New Features
  Also, make sure to start the actual text at the margin.
  ===
 
+   * **Added flow rule update to the Flow API.**
+
+ * Added API for updating the action list in the already existing rule.
+   Introduced both rte_flow_actions_update() and
+   rte_flow_async_actions_update() functions.
 
 Removed Items
 -
diff --git a/lib/ethdev/ethdev_trace.h b/lib/ethdev/ethdev_trace.h
index 3dc7d028b8..f14c67e1c6 100644
--- a/lib/ethdev/ethdev_trace.h
+++ b/lib/ethdev/ethdev_trace.h
@@ -2220,6 +2220,17 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_int(ret);
 )
 
+/* Called in loop in app/test-flow-perf */
+RTE_TRACE_POINT_FP(
+   rte_flow_trace_actions_update,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, const struct rte_flow *flow,
+   const struct rte_flow_action *actions, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_trace_point_emit_ptr(flow);
+   rte_trace_point_emit_ptr(actions);
+   rte_trace_point_emit_int(ret);
+)
+
 RTE_TRACE_POINT_FP(
rte_flow_trace_query,
RTE_TRACE_POINT_ARGS(uint16_t port_id, const struct rte_flow *flow,
@@ -2345,6 +2356,24 @@ RTE_TRACE_POINT_FP(
rte_trace_point_emit_ptr(flow);
 )
 
+RTE_TRACE_POINT_FP(
+   rte_flow_trace_async_actions_update,
+   RTE_TRACE_POINT_ARGS(uint16_t port_id, uint32_t queue_id,
+   const struct rte_flow_op_attr *op_attr,
+   const struct rte_flow *flow,
+   const struct rte_flow_action *actions,
+   uint8_t actions_template_index,
+   const void *user_data, int ret),
+   rte_trace_point_emit_u16(port_id);
+   rte_tra

[PATCH v1 0/6] baseband/fpga_5gnr_fec: changes for 23.07

2023-05-23 Thread Hernan Vargas
Targeting 23.07 if possible. Add support for AGX100 (N6000) and corner case 
fixes.

Hernan Vargas (6):
  baseband/fpga_5gnr_fec: fix possible div by zero
  baseband/fpga_5gnr_fec: fix seg fault unconf queue
  baseband/fpga_5gnr_fec: renaming for consistency
  baseband/fpga_5gnr_fec: add Vista Creek variant
  baseband/fpga_5gnr_fec: add AGX100 support
  baseband/fpga_5gnr_fec: cosmetic comment changes

 doc/guides/bbdevs/fpga_5gnr_fec.rst   |   72 +-
 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h   |  273 ++
 .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h|  349 +--
 .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 2261 -
 .../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h |   27 +-
 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h  |  140 +
 6 files changed, 2157 insertions(+), 965 deletions(-)
 create mode 100644 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
 create mode 100644 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h

-- 
2.37.1



[PATCH v1 1/6] baseband/fpga_5gnr_fec: fix possible div by zero

2023-05-23 Thread Hernan Vargas
Add fix to have an early exit when z_c is zero to prevent a possible
division by zero.

Fixes: 44dc6faa796f ("baseband/fpga_5gnr_fec: add LDPC processing functions")
Cc: sta...@dpdk.org

Signed-off-by: Hernan Vargas 
---
 drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 
b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index f29565af8cca..9388cce52960 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -879,6 +879,8 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t 
rv_index)
 {
if (rv_index == 0)
return 0;
+   if (z_c == 0)
+   return 0;
uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
if (n_cb == n) {
if (rv_index == 1)
-- 
2.37.1



[PATCH v1 2/6] baseband/fpga_5gnr_fec: fix seg fault unconf queue

2023-05-23 Thread Hernan Vargas
Adding exception to prevent segmentation fault in case a queue is
started which was not configured earlier.

Fixes: c58109a8871d ("baseband/fpga_5gnr_fec: add queue configuration")
Cc: sta...@dpdk.org

Signed-off-by: Hernan Vargas 
---
 drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 
b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index 9388cce52960..a6211f73e6e3 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -573,6 +573,10 @@ fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
return -1;
}
 #endif
+   if (dev->data->queues[queue_id].queue_private == NULL) {
+   rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id);
+   return -1;
+   }
struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
-- 
2.37.1



[PATCH v1 3/6] baseband/fpga_5gnr_fec: renaming for consistency

2023-05-23 Thread Hernan Vargas
Rename generic functions and constants using the FPGA 5GNR prefix naming
to prepare for code reuse for new FPGA implementation variant.
No functional impact.

Signed-off-by: Hernan Vargas 
---
 .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h| 117 +++--
 .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 455 --
 .../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h |  17 +-
 3 files changed, 269 insertions(+), 320 deletions(-)

diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h 
b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index e3038112fabb..9300349a731b 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -31,26 +31,26 @@
 #define FPGA_5GNR_FEC_VF_DEVICE_ID (0x0D90)
 
 /* Align DMA descriptors to 256 bytes - cache-aligned */
-#define FPGA_RING_DESC_ENTRY_LENGTH (8)
+#define FPGA_5GNR_RING_DESC_ENTRY_LENGTH (8)
 /* Ring size is in 256 bits (32 bytes) units */
 #define FPGA_RING_DESC_LEN_UNIT_BYTES (32)
 /* Maximum size of queue */
-#define FPGA_RING_MAX_SIZE (1024)
+#define FPGA_5GNR_RING_MAX_SIZE (1024)
 
 #define FPGA_NUM_UL_QUEUES (32)
 #define FPGA_NUM_DL_QUEUES (32)
 #define FPGA_TOTAL_NUM_QUEUES (FPGA_NUM_UL_QUEUES + FPGA_NUM_DL_QUEUES)
 #define FPGA_NUM_INTR_VEC (FPGA_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)
 
-#define FPGA_INVALID_HW_QUEUE_ID (0x)
+#define FPGA_5GNR_INVALID_HW_QUEUE_ID (0x)
 
-#define FPGA_QUEUE_FLUSH_TIMEOUT_US (1000)
-#define FPGA_HARQ_RDY_TIMEOUT (10)
-#define FPGA_TIMEOUT_CHECK_INTERVAL (5)
-#define FPGA_DDR_OVERFLOW (0x10)
+#define FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US (1000)
+#define FPGA_5GNR_HARQ_RDY_TIMEOUT (10)
+#define FPGA_5GNR_TIMEOUT_CHECK_INTERVAL (5)
+#define FPGA_5GNR_DDR_OVERFLOW (0x10)
 
-#define FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES 8
-#define FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES 8
+#define FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES 8
+#define FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES 8
 
 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
 #define N_ZC_1 66 /* N = 66 Zc for BG 1 */
@@ -152,7 +152,7 @@ struct __rte_packed fpga_dma_enc_desc {
};
 
uint8_t sw_ctxt[FPGA_RING_DESC_LEN_UNIT_BYTES *
-   (FPGA_RING_DESC_ENTRY_LENGTH - 1)];
+   (FPGA_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
};
 };
 
@@ -197,7 +197,7 @@ struct __rte_packed fpga_dma_dec_desc {
uint8_t cbs_in_op;
};
 
-   uint32_t sw_ctxt[8 * (FPGA_RING_DESC_ENTRY_LENGTH - 1)];
+   uint32_t sw_ctxt[8 * (FPGA_5GNR_RING_DESC_ENTRY_LENGTH - 1)];
};
 };
 
@@ -207,8 +207,8 @@ union fpga_dma_desc {
struct fpga_dma_dec_desc dec_req;
 };
 
-/* FPGA 5GNR FEC Ring Control Register */
-struct __rte_packed fpga_ring_ctrl_reg {
+/* FPGA 5GNR Ring Control Register. */
+struct __rte_packed fpga_5gnr_ring_ctrl_reg {
uint64_t ring_base_addr;
uint64_t ring_head_addr;
uint16_t ring_size:11;
@@ -226,38 +226,37 @@ struct __rte_packed fpga_ring_ctrl_reg {
uint16_t rsrvd3;
uint16_t head_point;
uint16_t rsrvd4;
-
 };
 
-/* Private data structure for each FPGA FEC device */
+/* Private data structure for each FPGA 5GNR device. */
 struct fpga_5gnr_fec_device {
-   /** Base address of MMIO registers (BAR0) */
+   /** Base address of MMIO registers (BAR0). */
void *mmio_base;
-   /** Base address of memory for sw rings */
+   /** Base address of memory for sw rings. */
void *sw_rings;
-   /** Physical address of sw_rings */
+   /** Physical address of sw_rings. */
rte_iova_t sw_rings_phys;
/** Number of bytes available for each queue in device. */
uint32_t sw_ring_size;
-   /** Max number of entries available for each queue in device */
+   /** Max number of entries available for each queue in device. */
uint32_t sw_ring_max_depth;
-   /** Base address of response tail pointer buffer */
+   /** Base address of response tail pointer buffer. */
uint32_t *tail_ptrs;
-   /** Physical address of tail pointers */
+   /** Physical address of tail pointers. */
rte_iova_t tail_ptr_phys;
-   /** Queues flush completion flag */
+   /** Queues flush completion flag. */
uint64_t *flush_queue_status;
-   /* Bitmap capturing which Queues are bound to the PF/VF */
+   /** Bitmap capturing which Queues are bound to the PF/VF. */
uint64_t q_bound_bit_map;
-   /* Bitmap capturing which Queues have already been assigned */
+   /** Bitmap capturing which Queues have already been assigned. */
uint64_t q_assigned_bit_map;
-   /** True if this is a PF FPGA FEC device */
+   /** True if this is a PF FPGA 5GNR device. */
bool pf_device;
 };
 
-/* Structure associated with each queue. */
-struct __rte_cache_aligned fpga_queue {
-   struct fpga_ring_ctrl_

[PATCH v1 4/6] baseband/fpga_5gnr_fec: add Vista Creek variant

2023-05-23 Thread Hernan Vargas
Create a new file vc_5gnr_pmd.h to store structures and macros specific
to Vista Creek 5G FPGA implementation and rename functions specific to
the Vista Creek variant.

Signed-off-by: Hernan Vargas 
---
 .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h| 183 +-
 .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 531 +-
 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h  | 140 +
 3 files changed, 426 insertions(+), 428 deletions(-)
 create mode 100644 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h

diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h 
b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index 9300349a731b..c88d276cc48f 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -8,6 +8,8 @@
 #include 
 #include 
 
+#include "vc_5gnr_pmd.h"
+
 /* Helper macro for logging */
 #define rte_bbdev_log(level, fmt, ...) \
rte_log(RTE_LOG_ ## level, fpga_5gnr_fec_logtype, fmt "\n", \
@@ -25,32 +27,20 @@
 #define FPGA_5GNR_FEC_PF_DRIVER_NAME intel_fpga_5gnr_fec_pf
 #define FPGA_5GNR_FEC_VF_DRIVER_NAME intel_fpga_5gnr_fec_vf
 
-/* FPGA 5GNR FEC PCI vendor & device IDs */
-#define FPGA_5GNR_FEC_VENDOR_ID (0x8086)
-#define FPGA_5GNR_FEC_PF_DEVICE_ID (0x0D8F)
-#define FPGA_5GNR_FEC_VF_DEVICE_ID (0x0D90)
-
-/* Align DMA descriptors to 256 bytes - cache-aligned */
-#define FPGA_5GNR_RING_DESC_ENTRY_LENGTH (8)
-/* Ring size is in 256 bits (32 bytes) units */
-#define FPGA_RING_DESC_LEN_UNIT_BYTES (32)
-/* Maximum size of queue */
-#define FPGA_5GNR_RING_MAX_SIZE (1024)
-
-#define FPGA_NUM_UL_QUEUES (32)
-#define FPGA_NUM_DL_QUEUES (32)
-#define FPGA_TOTAL_NUM_QUEUES (FPGA_NUM_UL_QUEUES + FPGA_NUM_DL_QUEUES)
-#define FPGA_NUM_INTR_VEC (FPGA_TOTAL_NUM_QUEUES - RTE_INTR_VEC_RXTX_OFFSET)
-
 #define FPGA_5GNR_INVALID_HW_QUEUE_ID (0x)
-
 #define FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US (1000)
 #define FPGA_5GNR_HARQ_RDY_TIMEOUT (10)
 #define FPGA_5GNR_TIMEOUT_CHECK_INTERVAL (5)
 #define FPGA_5GNR_DDR_OVERFLOW (0x10)
-
 #define FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES 8
 #define FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES 8
+/* Align DMA descriptors to 256 bytes - cache-aligned. */
+#define FPGA_5GNR_RING_DESC_ENTRY_LENGTH (8)
+/* Maximum size of queue. */
+#define FPGA_5GNR_RING_MAX_SIZE (1024)
+
+#define VC_5GNR_FPGA_VARIANT   0
+#define AGX100_FPGA_VARIANT1
 
 /* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
 #define N_ZC_1 66 /* N = 66 Zc for BG 1 */
@@ -62,32 +52,7 @@
 #define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
 #define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
 
-/* FPGA 5GNR FEC Register mapping on BAR0 */
-enum {
-   FPGA_5GNR_FEC_VERSION_ID = 0x, /* len: 4B */
-   FPGA_5GNR_FEC_CONFIGURATION = 0x0004, /* len: 2B */
-   FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE = 0x0008, /* len: 1B */
-   FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR = 0x000a, /* len: 2B */
-   FPGA_5GNR_FEC_RING_DESC_LEN = 0x000c, /* len: 2B */
-   FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW = 0x0018, /* len: 4B */
-   FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI = 0x001c, /* len: 4B */
-   FPGA_5GNR_FEC_QUEUE_MAP = 0x0040, /* len: 256B */
-   FPGA_5GNR_FEC_RING_CTRL_REGS = 0x0200, /* len: 2048B */
-   FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS = 0x0A00, /* len: 4B */
-   FPGA_5GNR_FEC_DDR4_WR_DATA_REGS = 0x0A08, /* len: 8B */
-   FPGA_5GNR_FEC_DDR4_WR_DONE_REGS = 0x0A10, /* len: 1B */
-   FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS = 0x0A18, /* len: 4B */
-   FPGA_5GNR_FEC_DDR4_RD_DONE_REGS = 0x0A20, /* len: 1B */
-   FPGA_5GNR_FEC_DDR4_RD_RDY_REGS = 0x0A28, /* len: 1B */
-   FPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x0A30, /* len: 8B */
-   FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x0A38, /* len: 1B */
-   FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x0A40, /* len: 1B */
-   FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x0A48, /* len: 4B */
-   FPGA_5GNR_FEC_MUTEX = 0x0A60, /* len: 4B */
-   FPGA_5GNR_FEC_MUTEX_RESET = 0x0A68  /* len: 4B */
-};
-
-/* FPGA 5GNR FEC Ring Control Registers */
+/* FPGA 5GNR Ring Control Registers. */
 enum {
FPGA_5GNR_FEC_RING_HEAD_ADDR = 0x0008,
FPGA_5GNR_FEC_RING_SIZE = 0x0010,
@@ -98,113 +63,27 @@ enum {
FPGA_5GNR_FEC_RING_HEAD_POINT = 0x001C
 };
 
-/* FPGA 5GNR FEC DESCRIPTOR ERROR */
+/* VC 5GNR and AGX100 common register mapping on BAR0. */
 enum {
-   DESC_ERR_NO_ERR = 0x0,
-   DESC_ERR_K_P_OUT_OF_RANGE = 0x1,
-   DESC_ERR_Z_C_NOT_LEGAL = 0x2,
-   DESC_ERR_DESC_OFFSET_ERR = 0x3,
-   DESC_ERR_DESC_READ_FAIL = 0x8,
-   DESC_ERR_DESC_READ_TIMEOUT = 0x9,
-   DESC_ERR_DESC_READ_TLP_POISONED = 0xA,
-   DESC_ERR_HARQ_INPUT_LEN = 0xB,
-   DESC_ERR_CB_READ_FAIL = 0xC,
-   DESC_ERR_CB_READ_TIMEOUT = 0xD,
-   DESC_ERR_CB_READ_TLP_POISONED = 0xE,
-   DESC_ERR_HBSTORE_ERR = 0xF
-};
-
-
-/* FPGA 5GNR FEC DMA Encoding Request Descrip

[PATCH v1 5/6] baseband/fpga_5gnr_fec: add AGX100 support

2023-05-23 Thread Hernan Vargas
Add support for new FPGA variant AGX100 (on Arrow Creek N6000).

Signed-off-by: Hernan Vargas 
---
 doc/guides/bbdevs/fpga_5gnr_fec.rst   |   72 +-
 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h   |  273 
 .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h|6 +
 .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 1197 +++--
 4 files changed, 1395 insertions(+), 153 deletions(-)
 create mode 100644 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h

diff --git a/doc/guides/bbdevs/fpga_5gnr_fec.rst 
b/doc/guides/bbdevs/fpga_5gnr_fec.rst
index 9d71585e9e18..c27db695a834 100644
--- a/doc/guides/bbdevs/fpga_5gnr_fec.rst
+++ b/doc/guides/bbdevs/fpga_5gnr_fec.rst
@@ -6,12 +6,13 @@ Intel(R) FPGA 5GNR FEC Poll Mode Driver
 
 The BBDEV FPGA 5GNR FEC poll mode driver (PMD) supports an FPGA implementation 
of a VRAN
 LDPC Encode / Decode 5GNR wireless acceleration function, using Intel's PCI-e 
and FPGA
-based Vista Creek device.
+based Vista Creek (N3000, referred to as VC_5GNR in the code) as well as Arrow 
Creek (N6000,
+referred to as AGX100 in the code).
 
 Features
 
 
-FPGA 5GNR FEC PMD supports the following features:
+FPGA 5GNR FEC PMD supports the following BBDEV capabilities:
 
 - LDPC Encode in the DL
 - LDPC Decode in the UL
@@ -67,10 +68,18 @@ Initialization
 
 When the device first powers up, its PCI Physical Functions (PF) can be listed 
through this command:
 
+Vista Creek (N3000)
+
 .. code-block:: console
 
   sudo lspci -vd8086:0d8f
 
+Arrow Creek (N6000)
+
+.. code-block:: console
+
+  sudo lspci -vd8086:5799
+
 The physical and virtual functions are compatible with Linux UIO drivers:
 ``vfio`` and ``igb_uio``. However, in order to work the FPGA 5GNR FEC device 
firstly needs
 to be bound to one of these linux drivers through DPDK.
@@ -85,24 +94,34 @@ Install the DPDK igb_uio driver, bind it with the PF PCI 
device ID and use
 The igb_uio driver may be bound to the PF PCI device using one of two methods:
 
 
-1. PCI functions (physical or virtual, depending on the use case) can be bound 
to
-the UIO driver by repeating this command for every function.
+1. PCI functions (physical or virtual, depending on the use case) can be bound 
to the UIO driver by repeating this command for every function.
 
-.. code-block:: console
+  .. code-block:: console
+
+insmod igb_uio.ko
+
+  Bind N3000 to igb_uio
+
+  .. code-block:: console
 
-  insmod igb_uio.ko
-  echo "8086 0d8f" > /sys/bus/pci/drivers/igb_uio/new_id
-  lspci -vd8086:0d8f
+echo "8086 0d8f" > /sys/bus/pci/drivers/igb_uio/new_id
+lspci -vd8086:0d8f
 
+  Bind N6000 to igb_uio
+
+  .. code-block:: console
+
+echo "8086 5799" > /sys/bus/pci/drivers/igb_uio/new_id
+lspci -vd8086:5799
 
 2. Another way to bind PF with DPDK UIO driver is by using the 
``dpdk-devbind.py`` tool
 
-.. code-block:: console
+  .. code-block:: console
 
-  cd 
-  ./usertools/dpdk-devbind.py -b igb_uio :06:00.0
+cd 
+./usertools/dpdk-devbind.py -b igb_uio :06:00.0
 
-where the PCI device ID (example: :06:00.0) is obtained using lspci 
-vd8086:0d8f
+where the PCI device ID (example: :06:00.0) is obtained using lspci 
-vd8086:0d8f for N3000 or lspci -vd8086:5799 for N6000
 
 
 In the same way the FPGA 5GNR FEC PF can be bound with vfio, but vfio driver 
does not
@@ -165,7 +184,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
   uint8_t dl_bandwidth;
   uint8_t ul_load_balance;
   uint8_t dl_load_balance;
-  uint16_t flr_time_out;
   };
 
 - ``pf_mode_en``: identifies whether only PF is to be used, or the VFs. PF and
@@ -176,12 +194,12 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` 
structure:
 
 - ``vf_*l_queues_number``: defines the hardware queue mapping for every VF.
 
-- ``*l_bandwidth``: in case of congestion on PCIe interface. The device
-  allocates different bandwidth to UL and DL. The weight is configured by this
-  setting. The unit of weight is 3 code blocks. For example, if the code block
-  cbps (code block per second) ratio between UL and DL is 12:1, then the
-  configuration value should be set to 36:3. The schedule algorithm is based
-  on code block regardless the length of each block.
+- ``*l_bandwidth``: Only used for the Vista Creek schedule algorithm in case of
+  congestion on PCIe interface. The device allocates different bandwidth to UL
+  and DL. The weight is configured by this setting. The unit of weight is 3 
code
+  blocks. For example, if the code block cbps (code block per second) ratio 
between
+  UL and DL is 12:1, then the configuration value should be set to 36:3.
+  The schedule algorithm is based on code block regardless the length of each 
block.
 
 - ``*l_load_balance``: hardware queues are load-balanced in a round-robin
   fashion. Queues get filled first-in first-out until they reach a pre-defined
@@ -191,10 +209,6 @@ parameters defined in ``rte_fpga_5gnr_fec_conf`` structure:
   If all hardware queues exceeds the watermark, no code blocks wi

[PATCH v1 6/6] baseband/fpga_5gnr_fec: cosmetic comment changes

2023-05-23 Thread Hernan Vargas
Cosmetic changes for comments.
No functional impact.

Signed-off-by: Hernan Vargas 
---
 .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h|  93 ++--
 .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 398 +-
 .../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h |  16 +-
 3 files changed, 252 insertions(+), 255 deletions(-)

diff --git a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h 
b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
index d0d9ee64dbde..c2aa5af2af40 100644
--- a/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
+++ b/drivers/baseband/fpga_5gnr_fec/fpga_5gnr_fec.h
@@ -11,7 +11,7 @@
 #include "agx100_pmd.h"
 #include "vc_5gnr_pmd.h"
 
-/* Helper macro for logging */
+/* Helper macro for logging. */
 #define rte_bbdev_log(level, fmt, ...) \
rte_log(RTE_LOG_ ## level, fpga_5gnr_fec_logtype, fmt "\n", \
##__VA_ARGS__)
@@ -24,7 +24,7 @@
 #define rte_bbdev_log_debug(fmt, ...)
 #endif
 
-/* FPGA 5GNR FEC driver names */
+/* FPGA 5GNR FEC driver names. */
 #define FPGA_5GNR_FEC_PF_DRIVER_NAME intel_fpga_5gnr_fec_pf
 #define FPGA_5GNR_FEC_VF_DRIVER_NAME intel_fpga_5gnr_fec_vf
 
@@ -43,15 +43,15 @@
 #define VC_5GNR_FPGA_VARIANT   0
 #define AGX100_FPGA_VARIANT1
 
-/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2 */
-#define N_ZC_1 66 /* N = 66 Zc for BG 1 */
-#define N_ZC_2 50 /* N = 50 Zc for BG 2 */
-#define K0_1_1 17 /* K0 fraction numerator for rv 1 and BG 1 */
-#define K0_1_2 13 /* K0 fraction numerator for rv 1 and BG 2 */
-#define K0_2_1 33 /* K0 fraction numerator for rv 2 and BG 1 */
-#define K0_2_2 25 /* K0 fraction numerator for rv 2 and BG 2 */
-#define K0_3_1 56 /* K0 fraction numerator for rv 3 and BG 1 */
-#define K0_3_2 43 /* K0 fraction numerator for rv 3 and BG 2 */
+/* Constants from K0 computation from 3GPP 38.212 Table 5.4.2.1-2. */
+#define N_ZC_1 66 /**< N = 66 Zc for BG 1. */
+#define N_ZC_2 50 /**< N = 50 Zc for BG 2. */
+#define K0_1_1 17 /**< K0 fraction numerator for rv 1 and BG 1. */
+#define K0_1_2 13 /**< K0 fraction numerator for rv 1 and BG 2. */
+#define K0_2_1 33 /**< K0 fraction numerator for rv 2 and BG 1. */
+#define K0_2_2 25 /**< K0 fraction numerator for rv 2 and BG 2. */
+#define K0_3_1 56 /**< K0 fraction numerator for rv 3 and BG 1. */
+#define K0_3_2 43 /**< K0 fraction numerator for rv 3 and BG 2. */
 
 /* FPGA 5GNR Ring Control Registers. */
 enum {
@@ -66,25 +66,25 @@ enum {
 
 /* VC 5GNR and AGX100 common register mapping on BAR0. */
 enum {
-   FPGA_5GNR_FEC_VERSION_ID = 0x, /**< len: 4B */
-   FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE = 0x0008, /**< len: 1B */
-   FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR = 0x000A, /**< len: 2B */
-   FPGA_5GNR_FEC_RING_DESC_LEN = 0x000C, /**< len: 2B */
-   FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW = 0x0018, /**< len: 4B */
-   FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI = 0x001C, /**< len: 4B */
-   FPGA_5GNR_FEC_RING_CTRL_REGS = 0x0200, /**< len: 2048B */
-   FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS = 0x0A00, /**< len: 4B */
-   FPGA_5GNR_FEC_DDR4_WR_DATA_REGS = 0x0A08, /**< len: 8B */
-   FPGA_5GNR_FEC_DDR4_WR_DONE_REGS = 0x0A10, /**< len: 1B */
-   FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS = 0x0A18, /**< len: 4B */
-   FPGA_5GNR_FEC_DDR4_RD_DONE_REGS = 0x0A20, /**< len: 1B */
-   FPGA_5GNR_FEC_DDR4_RD_RDY_REGS = 0x0A28, /**< len: 1B */
-   FPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x0A30, /**< len: 8B */
-   FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x0A38, /**< len: 1B */
-   FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x0A40, /**< len: 1B */
-   FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x0A48, /**< len: 4B */
-   FPGA_5GNR_FEC_MUTEX = 0x0A60, /**< len: 4B */
-   FPGA_5GNR_FEC_MUTEX_RESET = 0x0A68  /**< len: 4B */
+   FPGA_5GNR_FEC_VERSION_ID = 0x, /**< len: 4B. */
+   FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE = 0x0008, /**< len: 1B. */
+   FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR = 0x000A, /**< len: 2B. */
+   FPGA_5GNR_FEC_RING_DESC_LEN = 0x000C, /**< len: 2B. */
+   FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW = 0x0018, /**< len: 4B. */
+   FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI = 0x001C, /**< len: 4B. */
+   FPGA_5GNR_FEC_RING_CTRL_REGS = 0x0200, /**< len: 2048B. */
+   FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS = 0x0A00, /**< len: 4B. */
+   FPGA_5GNR_FEC_DDR4_WR_DATA_REGS = 0x0A08, /**< len: 8B. */
+   FPGA_5GNR_FEC_DDR4_WR_DONE_REGS = 0x0A10, /**< len: 1B. */
+   FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS = 0x0A18, /**< len: 4B. */
+   FPGA_5GNR_FEC_DDR4_RD_DONE_REGS = 0x0A20, /**< len: 1B. */
+   FPGA_5GNR_FEC_DDR4_RD_RDY_REGS = 0x0A28, /**< len: 1B. */
+   FPGA_5GNR_FEC_DDR4_RD_DATA_REGS = 0x0A30, /**< len: 8B. */
+   FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS = 0x0A38, /**< len: 1B. */
+   FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS = 0x0A40, /**< len: 1B. */
+   FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS = 0x0A48, /**< le

Re: [PATCH 1/1] vfio: Make buildable with MUSL runtime

2023-05-23 Thread Philip Prindeville



> On May 23, 2023, at 11:36 AM, Thomas Monjalon  wrote:
> 
> 23/05/2023 17:46, Philip Prindeville:
>> 
>>> On May 23, 2023, at 5:33 AM, Thomas Monjalon  wrote:
>>> 
>>> 22/05/2023 11:27, Burakov, Anatoly:
 On 5/20/2023 7:07 PM, Philip Prindeville wrote:
> From: Philip Prindeville 
> 
> pread64() and pwrite64() are declared in  in MUSL and
> other (i.e. not glibc) C runtimes.
> 
> Signed-off-by: Philip Prindeville 
 Acked-by: Anatoly Burakov 
>>> 
>>> It was "buildable" already, and I don't understand how.
>>> In any doubt, I've added Cc: sta...@dpdk.org for backports.
>>> 
>>> Applied, thanks.
>> 
>> Yeah, it builds with glibc just fine.  MUSL is another story...
> 
> It is also building on Alpine/musl.
> I would like to understand why.
> Do you have failure logs and versions?
> 


Have a look at:

https://github.com/k13132/openwrt-dpdk/pull/9#issuecomment-1556124647



Re: [PATCH v1 0/6] baseband/fpga_5gnr_fec: changes for 23.07

2023-05-23 Thread Maxime Coquelin

Hi Hernan,

On 5/23/23 20:48, Hernan Vargas wrote:

Targeting 23.07 if possible. Add support for AGX100 (N6000) and corner case 
fixes.


We can take the fixes in 23.07, but for the new devices support, it will
have to wait for v23.11. Indeed, the submission deadline was one month
ago (April 22nd).

Regards,
Maxime


Hernan Vargas (6):
   baseband/fpga_5gnr_fec: fix possible div by zero
   baseband/fpga_5gnr_fec: fix seg fault unconf queue
   baseband/fpga_5gnr_fec: renaming for consistency
   baseband/fpga_5gnr_fec: add Vista Creek variant
   baseband/fpga_5gnr_fec: add AGX100 support
   baseband/fpga_5gnr_fec: cosmetic comment changes

  doc/guides/bbdevs/fpga_5gnr_fec.rst   |   72 +-
  drivers/baseband/fpga_5gnr_fec/agx100_pmd.h   |  273 ++
  .../baseband/fpga_5gnr_fec/fpga_5gnr_fec.h|  349 +--
  .../fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 2261 -
  .../fpga_5gnr_fec/rte_pmd_fpga_5gnr_fec.h |   27 +-
  drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h  |  140 +
  6 files changed, 2157 insertions(+), 965 deletions(-)
  create mode 100644 drivers/baseband/fpga_5gnr_fec/agx100_pmd.h
  create mode 100644 drivers/baseband/fpga_5gnr_fec/vc_5gnr_pmd.h





Re: [PATCH v1 1/6] baseband/fpga_5gnr_fec: fix possible div by zero

2023-05-23 Thread Maxime Coquelin




On 5/23/23 20:48, Hernan Vargas wrote:

Add fix to have an early exit when z_c is zero to prevent a possible
division by zero.

Fixes: 44dc6faa796f ("baseband/fpga_5gnr_fec: add LDPC processing functions")
Cc: sta...@dpdk.org

Signed-off-by: Hernan Vargas 
---
  drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 2 ++
  1 file changed, 2 insertions(+)

diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 
b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index f29565af8cca..9388cce52960 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -879,6 +879,8 @@ get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t 
rv_index)
  {
if (rv_index == 0)
return 0;
+   if (z_c == 0)
+   return 0;
uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;


You could take the opportunity to move n declaration at the top of the
function.

We this done, feel free to add:
Reviewed-by: Maxime Coquelin 

Thanks,
Maxime



if (n_cb == n) {
if (rv_index == 1)




Re: [PATCH v1 2/6] baseband/fpga_5gnr_fec: fix seg fault unconf queue

2023-05-23 Thread Maxime Coquelin




On 5/23/23 20:48, Hernan Vargas wrote:

Adding exception to prevent segmentation fault in case a queue is
started which was not configured earlier.

Fixes: c58109a8871d ("baseband/fpga_5gnr_fec: add queue configuration")
Cc: sta...@dpdk.org

Signed-off-by: Hernan Vargas 
---
  drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c | 4 
  1 file changed, 4 insertions(+)

diff --git a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c 
b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
index 9388cce52960..a6211f73e6e3 100644
--- a/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
+++ b/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c
@@ -573,6 +573,10 @@ fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
return -1;
}
  #endif
+   if (dev->data->queues[queue_id].queue_private == NULL) {
+   rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id);
+   return -1;
+   }
struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);


Same comment here for offset and q declarations, it should be at the top
of the function.

Maxime



[PATCH 00/13] Add MACsec unit test cases

2023-05-23 Thread Akhil Goyal
Inline MACsec offload was supported in DPDK 22.11
using rte_security APIs.
This patchset adds few minor changes in the rte_security APIs
to specify the direction of SA/SC and update the SC configuration
to set packet number threshold.

The patchset also add functional test cases in dpdk-test app
to verify MACsec functionality.

This patchset is pending from last release [1] due to lack of
hardware to test. Now the test cases are verified on Marvell cnxk PMD
and the pmd support is added as a separate patchset.


Akhil Goyal (10):
  security: add direction in SA/SC configuration
  security: add MACsec packet number threshold
  test/security: add inline MACsec cases
  test/security: add MACsec integrity cases
  test/security: verify multi flow MACsec
  test/security: add MACsec VLAN cases
  test/security: add MACsec negative cases
  test/security: verify MACsec stats
  test/security: verify MACsec Tx HW rekey
  test/security: remove no MACsec support case

Ankur Dwivedi (3):
  test/security: verify MACsec interrupts
  test/security: verify MACsec Rx rekey
  test/security: verify MACsec anti replay

 app/test/meson.build  |1 +
 app/test/test_security.c  |   37 -
 app/test/test_security_inline_macsec.c| 2332 ++
 .../test_security_inline_macsec_vectors.h | 3895 +
 lib/security/rte_security.c   |   16 +-
 lib/security/rte_security.h   |   24 +-
 lib/security/rte_security_driver.h|   12 +-
 7 files changed, 6266 insertions(+), 51 deletions(-)
 create mode 100644 app/test/test_security_inline_macsec.c
 create mode 100644 app/test/test_security_inline_macsec_vectors.h

-- 
2.25.1



[PATCH 01/13] security: add direction in SA/SC configuration

2023-05-23 Thread Akhil Goyal
MACsec SC/SA ids are created based on direction of the flow.
Hence, added the missing field for configuration and cleanup
of the SCs and SAs.

Signed-off-by: Akhil Goyal 
---
 lib/security/rte_security.c| 16 ++--
 lib/security/rte_security.h| 14 ++
 lib/security/rte_security_driver.h | 12 ++--
 3 files changed, 30 insertions(+), 12 deletions(-)

diff --git a/lib/security/rte_security.c b/lib/security/rte_security.c
index e102c55e55..c4d64bb8e9 100644
--- a/lib/security/rte_security.c
+++ b/lib/security/rte_security.c
@@ -164,13 +164,14 @@ rte_security_macsec_sa_create(struct rte_security_ctx 
*instance,
 }
 
 int
-rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t 
sc_id)
+rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t 
sc_id,
+  enum rte_security_macsec_direction dir)
 {
int ret;
 
RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_destroy, -EINVAL, 
-ENOTSUP);
 
-   ret = instance->ops->macsec_sc_destroy(instance->device, sc_id);
+   ret = instance->ops->macsec_sc_destroy(instance->device, sc_id, dir);
if (ret != 0)
return ret;
 
@@ -181,13 +182,14 @@ rte_security_macsec_sc_destroy(struct rte_security_ctx 
*instance, uint16_t sc_id
 }
 
 int
-rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t 
sa_id)
+rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t 
sa_id,
+  enum rte_security_macsec_direction dir)
 {
int ret;
 
RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_destroy, -EINVAL, 
-ENOTSUP);
 
-   ret = instance->ops->macsec_sa_destroy(instance->device, sa_id);
+   ret = instance->ops->macsec_sa_destroy(instance->device, sa_id, dir);
if (ret != 0)
return ret;
 
@@ -199,22 +201,24 @@ rte_security_macsec_sa_destroy(struct rte_security_ctx 
*instance, uint16_t sa_id
 
 int
 rte_security_macsec_sc_stats_get(struct rte_security_ctx *instance, uint16_t 
sc_id,
+enum rte_security_macsec_direction dir,
 struct rte_security_macsec_sc_stats *stats)
 {
RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sc_stats_get, -EINVAL, 
-ENOTSUP);
RTE_PTR_OR_ERR_RET(stats, -EINVAL);
 
-   return instance->ops->macsec_sc_stats_get(instance->device, sc_id, 
stats);
+   return instance->ops->macsec_sc_stats_get(instance->device, sc_id, dir, 
stats);
 }
 
 int
 rte_security_macsec_sa_stats_get(struct rte_security_ctx *instance, uint16_t 
sa_id,
+enum rte_security_macsec_direction dir,
 struct rte_security_macsec_sa_stats *stats)
 {
RTE_PTR_CHAIN3_OR_ERR_RET(instance, ops, macsec_sa_stats_get, -EINVAL, 
-ENOTSUP);
RTE_PTR_OR_ERR_RET(stats, -EINVAL);
 
-   return instance->ops->macsec_sa_stats_get(instance->device, sa_id, 
stats);
+   return instance->ops->macsec_sa_stats_get(instance->device, sa_id, dir, 
stats);
 }
 
 int
diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h
index 4bacf9fcd9..c7a523b6d6 100644
--- a/lib/security/rte_security.h
+++ b/lib/security/rte_security.h
@@ -761,6 +761,7 @@ rte_security_macsec_sc_create(struct rte_security_ctx 
*instance,
  *
  * @param   instance   security instance
  * @param   sc_id  SC ID to be destroyed
+ * @param   dirdirection of the SC
  * @return
  *  - 0 if successful.
  *  - -EINVAL if sc_id is invalid or instance is NULL.
@@ -768,7 +769,8 @@ rte_security_macsec_sc_create(struct rte_security_ctx 
*instance,
  */
 __rte_experimental
 int
-rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t 
sc_id);
+rte_security_macsec_sc_destroy(struct rte_security_ctx *instance, uint16_t 
sc_id,
+  enum rte_security_macsec_direction dir);
 
 /**
  * @warning
@@ -798,6 +800,7 @@ rte_security_macsec_sa_create(struct rte_security_ctx 
*instance,
  *
  * @param   instance   security instance
  * @param   sa_id  SA ID to be destroyed
+ * @param   dirdirection of the SA
  * @return
  *  - 0 if successful.
  *  - -EINVAL if sa_id is invalid or instance is NULL.
@@ -805,7 +808,8 @@ rte_security_macsec_sa_create(struct rte_security_ctx 
*instance,
  */
 __rte_experimental
 int
-rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t 
sa_id);
+rte_security_macsec_sa_destroy(struct rte_security_ctx *instance, uint16_t 
sa_id,
+  enum rte_security_macsec_direction dir);
 
 /** Device-specific metadata field type */
 typedef uint64_t rte_security_dynfield_t;
@@ -1077,6 +1081,7 @@ rte_security_session_stats_get(struct rte_security_ctx 
*instance,
  *
  * @param  instancesecurity instance
  * @param  sa_id   SA ID for which stats are needed
+ * @param  dir

[PATCH 02/13] security: add MACsec packet number threshold

2023-05-23 Thread Akhil Goyal
Added Packet number threshold parameter in MACsec SC
configuration to identify the maximum allowed threshold
for packet number field in the packet.
A field is_xpn is also added to identify if the SAs are
configured for extended packet number or not so that
packet number threshold can be configured accordingly.

Signed-off-by: Akhil Goyal 
---
 lib/security/rte_security.h | 10 --
 1 file changed, 8 insertions(+), 2 deletions(-)

diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h
index c7a523b6d6..30bac4e25a 100644
--- a/lib/security/rte_security.h
+++ b/lib/security/rte_security.h
@@ -399,6 +399,8 @@ struct rte_security_macsec_sa {
 struct rte_security_macsec_sc {
/** Direction of SC */
enum rte_security_macsec_direction dir;
+   /** Packet number threshold */
+   uint64_t pn_threshold;
union {
struct {
/** SAs for each association number */
@@ -407,8 +409,10 @@ struct rte_security_macsec_sc {
uint8_t sa_in_use[RTE_SECURITY_MACSEC_NUM_AN];
/** Channel is active */
uint8_t active : 1;
+   /** Extended packet number is enabled for SAs */
+   uint8_t is_xpn : 1;
/** Reserved bitfields for future */
-   uint8_t reserved : 7;
+   uint8_t reserved : 6;
} sc_rx;
struct {
uint16_t sa_id; /**< SA ID to be used for encryption */
@@ -416,8 +420,10 @@ struct rte_security_macsec_sc {
uint64_t sci; /**< SCI value to be used if send_sci is 
set */
uint8_t active : 1; /**< Channel is active */
uint8_t re_key_en : 1; /**< Enable Rekeying */
+   /** Extended packet number is enabled for SAs */
+   uint8_t is_xpn : 1;
/** Reserved bitfields for future */
-   uint8_t reserved : 6;
+   uint8_t reserved : 5;
} sc_tx;
};
 };
-- 
2.25.1



[PATCH 03/13] test/security: add inline MACsec cases

2023-05-23 Thread Akhil Goyal
Updated test app to verify Inline MACsec offload using
rte_security APIs.
A couple of test cases are added to verify encap only
and decap only of some known test vectors from MACsec
specification.

Signed-off-by: Akhil Goyal 
---
 app/test/meson.build  |1 +
 app/test/test_security_inline_macsec.c| 1108 +
 .../test_security_inline_macsec_vectors.h | 1086 
 3 files changed, 2195 insertions(+)
 create mode 100644 app/test/test_security_inline_macsec.c
 create mode 100644 app/test/test_security_inline_macsec_vectors.h

diff --git a/app/test/meson.build b/app/test/meson.build
index b9b5432496..69c1d19f7b 100644
--- a/app/test/meson.build
+++ b/app/test/meson.build
@@ -128,6 +128,7 @@ test_sources = files(
 'test_rwlock.c',
 'test_sched.c',
 'test_security.c',
+'test_security_inline_macsec.c',
 'test_security_inline_proto.c',
 'test_seqlock.c',
 'test_service_cores.c',
diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
new file mode 100644
index 00..22a54dd65b
--- /dev/null
+++ b/app/test/test_security_inline_macsec.c
@@ -0,0 +1,1108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+
+#include 
+#include 
+
+#include 
+#include 
+#include 
+
+#include "test.h"
+#include "test_security_inline_macsec_vectors.h"
+
+#ifdef RTE_EXEC_ENV_WINDOWS
+static int
+test_inline_macsec(void)
+{
+   printf("Inline MACsec not supported on Windows, skipping test\n");
+   return TEST_SKIPPED;
+}
+
+#else
+
+#define NB_ETHPORTS_USED   1
+#define MEMPOOL_CACHE_SIZE 32
+#define RTE_TEST_RX_DESC_DEFAULT   1024
+#define RTE_TEST_TX_DESC_DEFAULT   1024
+#define RTE_PORT_ALL   (~(uint16_t)0x0)
+
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 0 /**< Default values of RX write-back threshold reg. */
+
+#define TX_PTHRESH 32 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_TRAFFIC_BURST  2048
+#define NB_MBUF10240
+
+#define MCS_INVALID_SA 0x
+#define MCS_DEFAULT_PN_THRESHOLD   0xF
+
+static struct rte_mempool *mbufpool;
+static struct rte_mempool *sess_pool;
+/* ethernet addresses of ports */
+static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+
+struct mcs_test_opts {
+   int val_frames;
+   int nb_td;
+   uint16_t mtu;
+   uint8_t sa_in_use;
+   bool encrypt;
+   bool protect_frames;
+   uint8_t sectag_insert_mode;
+   uint8_t nb_vlan;
+   uint32_t replay_win_sz;
+   uint8_t replay_protect;
+   uint8_t rekey_en;
+   const struct mcs_test_vector *rekey_td;
+   bool dump_all_stats;
+   uint8_t check_untagged_rx;
+   uint8_t check_bad_tag_cnt;
+   uint8_t check_sa_not_in_use;
+   uint8_t check_decap_stats;
+   uint8_t check_verify_only_stats;
+   uint8_t check_pkts_invalid_stats;
+   uint8_t check_pkts_unchecked_stats;
+   uint8_t check_out_pkts_untagged;
+   uint8_t check_out_pkts_toolong;
+   uint8_t check_encap_stats;
+   uint8_t check_auth_only_stats;
+   uint8_t check_sectag_interrupts;
+};
+
+static struct rte_eth_conf port_conf = {
+   .rxmode = {
+   .mq_mode = RTE_ETH_MQ_RX_NONE,
+   .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM |
+   RTE_ETH_RX_OFFLOAD_MACSEC_STRIP,
+   },
+   .txmode = {
+   .mq_mode = RTE_ETH_MQ_TX_NONE,
+   .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE |
+   RTE_ETH_TX_OFFLOAD_MACSEC_INSERT,
+   },
+   .lpbk_mode = 1,  /* enable loopback */
+};
+
+static struct rte_eth_rxconf rx_conf = {
+   .rx_thresh = {
+   .pthresh = RX_PTHRESH,
+   .hthresh = RX_HTHRESH,
+   .wthresh = RX_WTHRESH,
+   },
+   .rx_free_thresh = 32,
+};
+
+static struct rte_eth_txconf tx_conf = {
+   .tx_thresh = {
+   .pthresh = TX_PTHRESH,
+   .hthresh = TX_HTHRESH,
+   .wthresh = TX_WTHRESH,
+   },
+   .tx_free_thresh = 32, /* Use PMD default values */
+   .tx_rs_thresh = 32, /* Use PMD default values */
+};
+
+static uint16_t port_id;
+
+static uint64_t link_mbps;
+
+static struct rte_flow *default_tx_flow[RTE_MAX_ETHPORTS];
+static struct rte_flow *default_rx_flow[RTE_MAX_ETHPORTS];
+
+static struct rte_mbuf **tx_pkts_burst;
+static struct rte_mbuf **rx_pkts_burst;
+
+static inline struct rte_mbuf *
+init_packet(struct rte_mempool *mp, const uint8_t *data, unsigned int len)
+{
+   struct rte_mbuf *pkt;
+
+   

[PATCH 04/13] test/security: add MACsec integrity cases

2023-05-23 Thread Akhil Goyal
Added test vectors and test cases to verify
auth_only/verify_only and encap-decap/auth-verify
to verify the complete TX-RX path using the loopback
mode of ethdev.

Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c| 153 +++
 .../test_security_inline_macsec_vectors.h | 995 ++
 2 files changed, 1148 insertions(+)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index 22a54dd65b..9047b7adff 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -937,6 +937,143 @@ test_inline_macsec_decap_all(const void *data 
__rte_unused)
return all_err;
 }
 
+static int
+test_inline_macsec_auth_only_all(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   size = (sizeof(list_mcs_integrity_vectors) / 
sizeof((list_mcs_integrity_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_integrity_vectors[i];
+   err = test_macsec(&cur_td, MCS_AUTH_ONLY, &opts);
+   if (err) {
+   printf("\nAuth Generate case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\nAuth Generate case %d Passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+
+   return all_err;
+}
+
+static int
+test_inline_macsec_verify_only_all(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   size = (sizeof(list_mcs_integrity_vectors) / 
sizeof((list_mcs_integrity_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_integrity_vectors[i];
+   err = test_macsec(&cur_td, MCS_VERIFY_ONLY, &opts);
+   if (err) {
+   printf("\nAuth Verify case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\nAuth Verify case %d Passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+
+   return all_err;
+}
+
+static int
+test_inline_macsec_encap_decap_all(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.encrypt = true;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   size = (sizeof(list_mcs_cipher_vectors) / 
sizeof((list_mcs_cipher_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_cipher_vectors[i];
+   err = test_macsec(&cur_td, MCS_ENCAP_DECAP, &opts);
+   if (err) {
+   printf("\nCipher Auth Encap-decap case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\nCipher Auth Encap-decap case %d Passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+
+   return all_err;
+}
+
+
+static int
+test_inline_macsec_auth_verify_all(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   size = (sizeof(list_mcs_integrity_vectors) / 
sizeof((list_mcs_integrity_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_integrity_vectors[i];
+   err = test_macsec(&cur_td, MCS_AUTH_VERIFY, &opts);
+   if (err) {
+   printf("\nAuth Generate 

[PATCH 05/13] test/security: verify multi flow MACsec

2023-05-23 Thread Akhil Goyal
Added test case and test vectors to verify multiple
flows of MACsec.

Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c|  49 
 .../test_security_inline_macsec_vectors.h | 110 +-
 2 files changed, 158 insertions(+), 1 deletion(-)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index 9047b7adff..c32f747961 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -1074,6 +1074,51 @@ test_inline_macsec_auth_verify_all(const void *data 
__rte_unused)
return all_err;
 }
 
+static int
+test_inline_macsec_multi_flow(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *tv[MCS_MAX_FLOWS];
+   struct mcs_test_vector iter[MCS_MAX_FLOWS];
+   struct mcs_test_opts opts = {0};
+   int i, err;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.encrypt = true;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = MCS_MAX_FLOWS;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   for (i = 0; i < MCS_MAX_FLOWS; i++) {
+   memcpy(&iter[i].sa_key.data, sa_key, MCS_MULTI_FLOW_TD_KEY_SZ);
+   memcpy(&iter[i].plain_pkt.data, eth_addrs[i], 2 * 
RTE_ETHER_ADDR_LEN);
+   memcpy(&iter[i].plain_pkt.data[2 * RTE_ETHER_ADDR_LEN], 
plain_user_data,
+  MCS_MULTI_FLOW_TD_PLAIN_DATA_SZ);
+   memcpy(&iter[i].secure_pkt.data, eth_addrs[i], 2 * 
RTE_ETHER_ADDR_LEN);
+   memcpy(&iter[i].secure_pkt.data[2 * RTE_ETHER_ADDR_LEN], 
secure_user_data,
+  MCS_MULTI_FLOW_TD_SECURE_DATA_SZ);
+   iter[i].sa_key.len = MCS_MULTI_FLOW_TD_KEY_SZ;
+   iter[i].plain_pkt.len = MCS_MULTI_FLOW_TD_PLAIN_DATA_SZ +
+   (2 * RTE_ETHER_ADDR_LEN);
+   iter[i].secure_pkt.len = MCS_MULTI_FLOW_TD_SECURE_DATA_SZ +
+   (2 * RTE_ETHER_ADDR_LEN);
+   iter[i].alg = RTE_SECURITY_MACSEC_ALG_GCM_128;
+   iter[i].ssci = 0x0;
+   iter[i].xpn = 0x0;
+   tv[i] = (const struct mcs_test_vector *)&iter[i];
+   }
+   err = test_macsec(tv, MCS_ENCAP_DECAP, &opts);
+   if (err) {
+   printf("\nCipher Auth Encryption multi flow failed");
+   err = -1;
+   } else {
+   printf("\nCipher Auth Encryption multi flow Passed");
+   err = 0;
+   }
+   return err;
+}
+
 static int
 ut_setup_inline_macsec(void)
 {
@@ -1219,6 +1264,10 @@ inline_macsec_testsuite_teardown(void)
 static struct unit_test_suite inline_macsec_testsuite  = {
.suite_name = "Inline MACsec Ethernet Device Unit Test Suite",
.unit_test_cases = {
+   TEST_CASE_NAMED_ST(
+   "MACsec Encap + decap Multi flow",
+   ut_setup_inline_macsec, ut_teardown_inline_macsec,
+   test_inline_macsec_multi_flow),
TEST_CASE_NAMED_ST(
"MACsec encap(Cipher+Auth) known vector",
ut_setup_inline_macsec, ut_teardown_inline_macsec,
diff --git a/app/test/test_security_inline_macsec_vectors.h 
b/app/test/test_security_inline_macsec_vectors.h
index f6c668c281..8d9c2cae77 100644
--- a/app/test/test_security_inline_macsec_vectors.h
+++ b/app/test/test_security_inline_macsec_vectors.h
@@ -8,7 +8,6 @@
 #define MCS_MAX_KEY_LEN32
 #define MCS_IV_LEN 12
 #define MCS_SALT_LEN   12
-#define MCS_MAX_FLOWS  63
 
 enum mcs_op {
MCS_NO_OP,
@@ -2078,4 +2077,113 @@ static const struct mcs_test_vector 
list_mcs_integrity_vectors[] = {
 },
 };
 
+#define MCS_MULTI_FLOW_TD_KEY_SZ   16
+#define MCS_MULTI_FLOW_TD_PLAIN_DATA_SZ42
+#define MCS_MULTI_FLOW_TD_SECURE_DATA_SZ   66
+#define MCS_MULTI_FLOW_TD_KEY_SZ   16
+#define MCS_MAX_FLOWS  63
+
+uint8_t sa_key[MCS_MULTI_FLOW_TD_KEY_SZ] = {
+   0x07, 0x1B, 0x11, 0x3B, 0x0C, 0xA7, 0x43, 0xFE,
+   0xCC, 0xCF, 0x3D, 0x05, 0x1F, 0x73, 0x73, 0x82,
+};
+
+uint8_t eth_addrs[MCS_MAX_FLOWS][2 * RTE_ETHER_ADDR_LEN] = {
+   {0xE2, 0x00, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76, 0x1E, 0x8D, 
0xCD, 0x3D,},
+   {0xE2, 0x01, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76, 0x1E, 0x8D, 
0xCD, 0x3D,},
+   {0xE2, 0x02, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76, 0x1E, 0x8D, 
0xCD, 0x3D,},
+   {0xE2, 0x03, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76, 0x1E, 0x8D, 
0xCD, 0x3D,},
+   {0xE2, 0x04, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76, 0x1E, 0x8D, 
0xCD, 0x3D,},
+   {0xE2, 0x05, 0x06, 0xD7, 0xCD, 0x0D, 0xF0, 0x76, 0x1E, 0x8D, 
0xCD, 0x3D,},
+   {0xE2, 0x06, 0x06, 0x

[PATCH 06/13] test/security: add MACsec VLAN cases

2023-05-23 Thread Akhil Goyal
Added cases to verify MACsec processing with VLAN
tags inserted. Vectors are added to verify 1/2/3
VLAN tags in clear or encrypted data.

Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c|  67 ++
 .../test_security_inline_macsec_vectors.h | 217 ++
 2 files changed, 284 insertions(+)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index c32f747961..353b07477e 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -1119,6 +1119,69 @@ test_inline_macsec_multi_flow(const void *data 
__rte_unused)
return err;
 }
 
+static int
+test_inline_macsec_with_vlan(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   size = (sizeof(list_mcs_vlan_vectors) / 
sizeof((list_mcs_vlan_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_vlan_vectors[i];
+   if (i == 0) {
+   opts.sectag_insert_mode = 1;
+   } else if (i == 1) {
+   opts.sectag_insert_mode = 0; /* offset from special 
E-type */
+   opts.nb_vlan = 1;
+   } else if (i == 2) {
+   opts.sectag_insert_mode = 0; /* offset from special 
E-type */
+   opts.nb_vlan = 2;
+   }
+   err = test_macsec(&cur_td, MCS_ENCAP, &opts);
+   if (err) {
+   printf("\n VLAN Encap case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\n VLAN Encap case %d passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_vlan_vectors[i];
+   if (i == 0) {
+   opts.sectag_insert_mode = 1;
+   } else if (i == 1) {
+   opts.sectag_insert_mode = 0; /* offset from special 
E-type */
+   opts.nb_vlan = 1;
+   } else if (i == 2) {
+   opts.sectag_insert_mode = 0; /* offset from special 
E-type */
+   opts.nb_vlan = 2;
+   }
+   err = test_macsec(&cur_td, MCS_DECAP, &opts);
+   if (err) {
+   printf("\n VLAN Decap case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\n VLAN Decap case %d passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, (2 * size) + 
all_err, -all_err);
+   return all_err;
+}
+
 static int
 ut_setup_inline_macsec(void)
 {
@@ -1292,6 +1355,10 @@ static struct unit_test_suite inline_macsec_testsuite  = 
{
"MACsec auth + verify known vector",
ut_setup_inline_macsec, ut_teardown_inline_macsec,
test_inline_macsec_auth_verify_all),
+   TEST_CASE_NAMED_ST(
+   "MACsec Encap and decap with VLAN",
+   ut_setup_inline_macsec, ut_teardown_inline_macsec,
+   test_inline_macsec_with_vlan),
 
TEST_CASES_END() /**< NULL terminate unit test array */
},
diff --git a/app/test/test_security_inline_macsec_vectors.h 
b/app/test/test_security_inline_macsec_vectors.h
index 8d9c2cae77..4bcb82783c 100644
--- a/app/test/test_security_inline_macsec_vectors.h
+++ b/app/test/test_security_inline_macsec_vectors.h
@@ -2185,5 +2185,222 @@ uint8_t 
secure_user_data[MCS_MULTI_FLOW_TD_SECURE_DATA_SZ] = {
0x2A, 0x5D, 0x6C, 0x2B, 0x96, 0x04, 0x94, 0xC3,
 };
 
+static const struct mcs_test_vector list_mcs_vlan_vectors[] = {
+/* No clear tag, VLAN after macsec header */
+{
+   .test_idx = 1,
+   .alg = RTE_SECURITY_MACSEC_ALG_GCM_128,
+   .ssci = 0,
+   .xpn = 0, /* Most significant 32 bits */
+   .salt = {0},
+   .sa_key = {
+   .data = {
+   0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+   0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11,
+   },
+   .len = 16,
+   },
+   .plain_pkt = {
+   .data = {/* MAC DA */
+   0xCA, 0xCB, 0xCD, 0x41, 0x42, 0x43,
+   /* MAC SA */
+   0xCA, 0xCB, 0xCD, 0x21, 0x22, 0x23,
+   /* User Data with VLAN Tag */
+   

[PATCH 07/13] test/security: add MACsec negative cases

2023-05-23 Thread Akhil Goyal
Added MACsec negative test cases to verify
pkt drop, untagged rx, bad tag rx, sa not in use,
out packets untagged, pkts too long.

Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c| 346 +
 .../test_security_inline_macsec_vectors.h | 475 ++
 2 files changed, 821 insertions(+)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index 353b07477e..9c4546fa38 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -660,6 +660,103 @@ mcs_stats_dump(struct rte_security_ctx *ctx, enum mcs_op 
op,
}
 }
 
+static int
+mcs_stats_check(struct rte_security_ctx *ctx, enum mcs_op op,
+   const struct mcs_test_opts *opts,
+   const struct mcs_test_vector *td,
+   void *rx_sess, void *tx_sess,
+   uint8_t rx_sc_id, uint8_t tx_sc_id,
+   uint16_t rx_sa_id[], uint16_t tx_sa_id[])
+{
+   struct rte_security_stats sess_stats = {0};
+   struct rte_security_macsec_secy_stats *secy_stat;
+   struct rte_security_macsec_sc_stats sc_stat = {0};
+   struct rte_security_macsec_sa_stats sa_stat = {0};
+   int i;
+
+   if (op == MCS_DECAP || op == MCS_ENCAP_DECAP ||
+   op == MCS_VERIFY_ONLY || op == MCS_AUTH_VERIFY) {
+   rte_security_session_stats_get(ctx, rx_sess, &sess_stats);
+   secy_stat = &sess_stats.macsec;
+
+   if ((opts->check_untagged_rx && secy_stat->pkt_notag_cnt != 1) 
||
+   (opts->check_untagged_rx && 
secy_stat->pkt_untaged_cnt != 1))
+   return TEST_FAILED;
+
+   if (opts->check_bad_tag_cnt && secy_stat->pkt_badtag_cnt != 1)
+   return TEST_FAILED;
+
+   if (opts->check_sa_not_in_use && secy_stat->pkt_nosaerror_cnt 
!= 1)
+   return TEST_FAILED;
+
+   if (opts->check_decap_stats && secy_stat->octet_decrypted_cnt !=
+   (uint16_t)(td->plain_pkt.len - 2 * 
RTE_ETHER_ADDR_LEN))
+   return TEST_FAILED;
+
+   if (opts->check_verify_only_stats && 
secy_stat->octet_validated_cnt !=
+   (uint16_t)(td->plain_pkt.len - 2 * 
RTE_ETHER_ADDR_LEN))
+   return TEST_FAILED;
+
+   rte_security_macsec_sc_stats_get(ctx, rx_sc_id,
+   RTE_SECURITY_MACSEC_DIR_RX, &sc_stat);
+
+   if ((opts->check_decap_stats || opts->check_verify_only_stats) 
&&
+   sc_stat.pkt_ok_cnt != 1)
+   return TEST_FAILED;
+
+   if (opts->check_pkts_invalid_stats && sc_stat.pkt_notvalid_cnt 
!= 1)
+   return TEST_FAILED;
+
+   if (opts->check_pkts_unchecked_stats && 
sc_stat.pkt_unchecked_cnt != 1)
+   return TEST_FAILED;
+
+   for (i = 0; i < RTE_SECURITY_MACSEC_NUM_AN; i++) {
+   memset(&sa_stat, 0, sizeof(struct 
rte_security_macsec_sa_stats));
+   rte_security_macsec_sa_stats_get(ctx, rx_sa_id[i],
+   RTE_SECURITY_MACSEC_DIR_RX, &sa_stat);
+
+   }
+   }
+
+   if (op == MCS_ENCAP || op == MCS_ENCAP_DECAP ||
+   op == MCS_AUTH_ONLY || op == MCS_AUTH_VERIFY) {
+   memset(&sess_stats, 0, sizeof(struct rte_security_stats));
+   rte_security_session_stats_get(ctx, tx_sess, &sess_stats);
+   secy_stat = &sess_stats.macsec;
+
+   if (opts->check_out_pkts_untagged && 
secy_stat->pkt_untagged_cnt != 1)
+   return TEST_FAILED;
+
+   if (opts->check_out_pkts_toolong && secy_stat->pkt_toolong_cnt 
!= 1)
+   return TEST_FAILED;
+
+   if (opts->check_encap_stats && secy_stat->octet_encrypted_cnt !=
+   (uint16_t)(td->plain_pkt.len - 2 * 
RTE_ETHER_ADDR_LEN))
+   return TEST_FAILED;
+
+   if (opts->check_auth_only_stats && 
secy_stat->octet_protected_cnt !=
+   (uint16_t)(td->plain_pkt.len - 2 * 
RTE_ETHER_ADDR_LEN))
+   return TEST_FAILED;
+
+
+   memset(&sc_stat, 0, sizeof(struct 
rte_security_macsec_sc_stats));
+   rte_security_macsec_sc_stats_get(ctx, tx_sc_id, 
RTE_SECURITY_MACSEC_DIR_TX,
+&sc_stat);
+
+   if (opts->check_encap_stats && sc_stat.pkt_encrypt_cnt != 1)
+   return TEST_FAILED;
+
+   if (opts->check_auth_only_stats && sc_stat.pkt_protected_cnt != 
1)
+   return TEST_FAILED;
+
+   memset(&sa_stat, 0, sizeof(struct 
rte_security_macsec_sa_stats));
+   rte_security_macsec_sa_stats_ge

[PATCH 08/13] test/security: verify MACsec stats

2023-05-23 Thread Akhil Goyal
Added cases to verify various stats of MACsec.

Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c | 222 +
 1 file changed, 222 insertions(+)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index 9c4546fa38..a6d23f2769 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -1438,6 +1438,140 @@ test_inline_macsec_sa_not_in_use(const void *data 
__rte_unused)
return all_err;
 }
 
+static int
+test_inline_macsec_decap_stats(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+   opts.check_decap_stats = 1;
+
+   size = (sizeof(list_mcs_cipher_vectors) / 
sizeof((list_mcs_cipher_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_cipher_vectors[i];
+   err = test_macsec(&cur_td, MCS_DECAP, &opts);
+   if (err) {
+   printf("\nDecap stats case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\nDecap stats case %d passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+
+   return all_err;
+}
+
+static int
+test_inline_macsec_verify_only_stats(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+   opts.check_verify_only_stats = 1;
+
+   size = (sizeof(list_mcs_integrity_vectors) / 
sizeof((list_mcs_integrity_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_integrity_vectors[i];
+   err = test_macsec(&cur_td, MCS_VERIFY_ONLY, &opts);
+   if (err) {
+   printf("\nVerify only stats case %d failed", 
cur_td->test_idx);
+   err = -1;
+   } else {
+   printf("\nVerify only stats case %d Passed", 
cur_td->test_idx);
+   err = 0;
+   }
+   all_err += err;
+   }
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+
+   return all_err;
+}
+
+static int
+test_inline_macsec_pkts_invalid_stats(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+
+   size = (sizeof(list_mcs_err_cipher_vectors) / 
sizeof((list_mcs_err_cipher_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_err_cipher_vectors[i];
+   err = test_macsec(&cur_td, MCS_DECAP, &opts);
+   if (err)
+   err = 0;
+   else
+   err = -1;
+
+   all_err += err;
+   }
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+   return all_err;
+}
+
+static int
+test_inline_macsec_pkts_unchecked_stats(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_DISABLE;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+   opts.check_pkts_unchecked_stats = 1;
+
+   size = (sizeof(list_mcs_integrity_vectors) / 
sizeof((list_mcs_integrity_vectors)[0]));
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_integrity_vectors[i];
+   err = test_macsec(&cur_td, MCS_VERIFY_ONLY, &opts);
+   if (err)
+   err = -1;
+   else
+   err = 0;
+
+   all_err += err;
+   }
+
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+   return all_err;
+}
+
 static int
 test_inline_macsec_out_pkts_un

[PATCH 09/13] test/security: verify MACsec interrupts

2023-05-23 Thread Akhil Goyal
From: Ankur Dwivedi 

This patch enables the test_inline_macsec_interrupts_all
test case for MACSEC.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c| 124 +++
 .../test_security_inline_macsec_vectors.h | 306 +-
 2 files changed, 429 insertions(+), 1 deletion(-)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index a6d23f2769..4cb184b62c 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -757,6 +757,71 @@ mcs_stats_check(struct rte_security_ctx *ctx, enum mcs_op 
op,
return 0;
 }
 
+static int
+test_macsec_event_callback(uint16_t port_id, enum rte_eth_event_type type,
+  void *param, void *ret_param)
+{
+   struct mcs_err_vector *vector = (struct mcs_err_vector *)param;
+   struct rte_eth_event_macsec_desc *event_desc = NULL;
+
+   RTE_SET_USED(port_id);
+
+   if (type != RTE_ETH_EVENT_MACSEC)
+   return -1;
+
+   event_desc = ret_param;
+   if (event_desc == NULL) {
+   printf("Event descriptor not set\n");
+   return -1;
+   }
+   vector->notify_event = true;
+
+   switch (event_desc->type) {
+   case RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR:
+   vector->event = RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR;
+   switch (event_desc->subtype) {
+   case RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1:
+   vector->event_subtype = 
RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1;
+   break;
+   case RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1:
+   vector->event_subtype = 
RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1;
+   break;
+   case RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48:
+   vector->event_subtype = 
RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48;
+   break;
+   case RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1:
+   vector->event_subtype = 
RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1;
+   break;
+   case RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1:
+   vector->event_subtype = 
RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1;
+   break;
+   default:
+   printf("\nUnknown Macsec event subtype: %d", 
event_desc->subtype);
+   }
+   break;
+   case RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP:
+   vector->event = RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP;
+   break;
+   case RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP:
+   vector->event = RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP;
+   break;
+   case RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP:
+   vector->event = RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP;
+   break;
+   case RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP:
+   vector->event = RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP;
+   break;
+   case RTE_ETH_EVENT_MACSEC_SA_NOT_VALID:
+   vector->event = RTE_ETH_EVENT_MACSEC_SA_NOT_VALID;
+   break;
+   default:
+   printf("Invalid MACsec event reported\n");
+   return -1;
+   }
+
+   return 0;
+}
+
 static int
 test_macsec(const struct mcs_test_vector *td[], enum mcs_op op, const struct 
mcs_test_opts *opts)
 {
@@ -914,6 +979,8 @@ test_macsec(const struct mcs_test_vector *td[], enum mcs_op 
op, const struct mcs
while (--nb_rx >= 0)
rte_pktmbuf_free(rx_pkts_burst[nb_rx]);
ret = TEST_FAILED;
+   if (opts->check_sectag_interrupts == 1)
+   ret = TEST_SUCCESS;
goto out;
}
 
@@ -1702,6 +1769,59 @@ test_inline_macsec_auth_only_stats(const void *data 
__rte_unused)
return all_err;
 }
 
+static int
+test_inline_macsec_interrupts_all(const void *data __rte_unused)
+{
+   struct mcs_err_vector err_vector = {0};
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int i, size;
+   int err, all_err = 0;
+   enum rte_eth_event_macsec_subtype subtype[] =  {
+   RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1,
+   RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1,
+   RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48,
+   RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1,
+   RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1,
+   };
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+   opts.check_sectag_interrupts = 1;
+
+   err_vector.ev

[PATCH 10/13] test/security: verify MACsec Tx HW rekey

2023-05-23 Thread Akhil Goyal
This patch enables the Tx HW rekey test case for MACSEC.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c| 137 +-
 .../test_security_inline_macsec_vectors.h | 243 ++
 2 files changed, 378 insertions(+), 2 deletions(-)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index 4cb184b62c..a4c64429b3 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -207,6 +207,8 @@ fill_macsec_sc_conf(const struct mcs_test_vector *td,
uint8_t i;
 
sc_conf->dir = dir;
+   sc_conf->pn_threshold = ((uint64_t)td->xpn << 32) |
+   rte_be_to_cpu_32(*(const uint32_t 
*)(&td->secure_pkt.data[tci_off + 2]));
if (dir == RTE_SECURITY_MACSEC_DIR_TX) {
sc_conf->sc_tx.sa_id = sa_id[0];
if (sa_id[1] != MCS_INVALID_SA) {
@@ -232,12 +234,16 @@ fill_macsec_sc_conf(const struct mcs_test_vector *td,
/* use some default SCI */
sc_conf->sc_tx.sci = 0xf1341e023a2b1c5d;
}
+   if (td->xpn > 0)
+   sc_conf->sc_tx.is_xpn = 1;
} else {
for (i = 0; i < RTE_SECURITY_MACSEC_NUM_AN; i++) {
sc_conf->sc_rx.sa_id[i] = sa_id[i];
sc_conf->sc_rx.sa_in_use[i] = opts->sa_in_use;
}
sc_conf->sc_rx.active = 1;
+   if (td->xpn > 0)
+   sc_conf->sc_rx.is_xpn = 1;
}
 }
 
@@ -834,6 +840,7 @@ test_macsec(const struct mcs_test_vector *td[], enum mcs_op 
op, const struct mcs
struct rte_security_session_conf sess_conf = {0};
struct rte_security_macsec_sa sa_conf = {0};
struct rte_security_macsec_sc sc_conf = {0};
+   struct mcs_err_vector err_vector = {0};
struct rte_security_ctx *ctx;
int nb_rx = 0, nb_sent;
int i, j = 0, ret, id, an = 0;
@@ -868,6 +875,34 @@ test_macsec(const struct mcs_test_vector *td[], enum 
mcs_op op, const struct mcs
}
j++;
 
+   if (opts->rekey_en) {
+
+   err_vector.td = td[i];
+   err_vector.rekey_td = opts->rekey_td;
+   err_vector.event = RTE_ETH_EVENT_MACSEC_UNKNOWN;
+   err_vector.event_subtype = 
RTE_ETH_SUBEVENT_MACSEC_UNKNOWN;
+   rte_eth_dev_callback_register(port_id, 
RTE_ETH_EVENT_MACSEC,
+   test_macsec_event_callback, 
&err_vector);
+   if (op == MCS_DECAP || op == MCS_VERIFY_ONLY)
+   tx_pkts_burst[j] = init_packet(mbufpool,
+   opts->rekey_td->secure_pkt.data,
+   opts->rekey_td->secure_pkt.len);
+   else {
+   tx_pkts_burst[j] = init_packet(mbufpool,
+   opts->rekey_td->plain_pkt.data,
+   opts->rekey_td->plain_pkt.len);
+
+   tx_pkts_burst[j]->ol_flags |= 
RTE_MBUF_F_TX_MACSEC;
+   }
+   if (tx_pkts_burst[j] == NULL) {
+   while (j--)
+   rte_pktmbuf_free(tx_pkts_burst[j]);
+   ret = TEST_FAILED;
+   goto out;
+   }
+   j++;
+   }
+
if (op == MCS_DECAP || op == MCS_ENCAP_DECAP ||
op == MCS_VERIFY_ONLY || op == MCS_AUTH_VERIFY) 
{
for (an = 0; an < RTE_SECURITY_MACSEC_NUM_AN; an++) {
@@ -922,6 +957,20 @@ test_macsec(const struct mcs_test_vector *td[], enum 
mcs_op op, const struct mcs
}
tx_sa_id[i][0] = (uint16_t)id;
tx_sa_id[i][1] = MCS_INVALID_SA;
+   if (opts->rekey_en) {
+   memset(&sa_conf, 0, sizeof(struct 
rte_security_macsec_sa));
+   fill_macsec_sa_conf(opts->rekey_td, &sa_conf,
+   RTE_SECURITY_MACSEC_DIR_TX,
+   
opts->rekey_td->secure_pkt.data[tci_off] &
+   RTE_MACSEC_AN_MASK,
+   tci_off);
+   id = rte_security_macsec_sa_create(ctx, 
&sa_conf);
+   if (id < 0) {
+   printf("MACsec rekey SA create failed : 
%d.\n", id);
+   goto out;
+   }
+   tx_sa_id[i][1] = 

[PATCH 11/13] test/security: verify MACsec Rx rekey

2023-05-23 Thread Akhil Goyal
From: Ankur Dwivedi 

This patch enables the Rx rekey test case for MACSEC.

Signed-off-by: Ankur Dwivedi 
---
 app/test/test_security_inline_macsec.c | 50 +-
 1 file changed, 49 insertions(+), 1 deletion(-)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index a4c64429b3..6f9cec333d 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -906,8 +906,14 @@ test_macsec(const struct mcs_test_vector *td[], enum 
mcs_op op, const struct mcs
if (op == MCS_DECAP || op == MCS_ENCAP_DECAP ||
op == MCS_VERIFY_ONLY || op == MCS_AUTH_VERIFY) 
{
for (an = 0; an < RTE_SECURITY_MACSEC_NUM_AN; an++) {
+   if (opts->rekey_en && an ==
+   
(opts->rekey_td->secure_pkt.data[tci_off] &
+   RTE_MACSEC_AN_MASK))
+   fill_macsec_sa_conf(opts->rekey_td, 
&sa_conf,
+   RTE_SECURITY_MACSEC_DIR_RX, an, 
tci_off);
+   else
/* For simplicity, using same SA conf for all 
AN */
-   fill_macsec_sa_conf(td[i], &sa_conf,
+   fill_macsec_sa_conf(td[i], &sa_conf,
RTE_SECURITY_MACSEC_DIR_RX, an, 
tci_off);
id = rte_security_macsec_sa_create(ctx, 
&sa_conf);
if (id < 0) {
@@ -1054,6 +1060,9 @@ test_macsec(const struct mcs_test_vector *td[], enum 
mcs_op op, const struct mcs
}
tx_sa_id[0][0] = (uint16_t)id;
break;
+   case RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP:
+   printf("Received RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP 
event\n");
+   break;
default:
printf("Received unsupported event\n");
}
@@ -1951,6 +1960,41 @@ test_inline_macsec_rekey_tx(const void *data 
__rte_unused)
return all_err;
 }
 
+static int
+test_inline_macsec_rekey_rx(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   int err, all_err = 0;
+   int i, size;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.protect_frames = true;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.mtu = RTE_ETHER_MTU;
+   opts.rekey_en = 1;
+
+   size = (sizeof(list_mcs_rekey_vectors) / 
sizeof((list_mcs_rekey_vectors)[0]));
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_rekey_vectors[i];
+   opts.rekey_td = &list_mcs_rekey_vectors[++i];
+   err = test_macsec(&cur_td, MCS_DECAP, &opts);
+   if (err) {
+   printf("Rx rekey test case %d failed\n", i);
+   err = -1;
+   } else {
+   printf("Rx rekey test case %d passed\n", i);
+   err = 0;
+   }
+   all_err += err;
+   }
+
+   printf("\n%s: Success: %d, Failure: %d\n", __func__, size + all_err, 
-all_err);
+   return all_err;
+}
+
 static int
 ut_setup_inline_macsec(void)
 {
@@ -2184,6 +2228,10 @@ static struct unit_test_suite inline_macsec_testsuite  = 
{
"MACsec re-key Tx",
ut_setup_inline_macsec, ut_teardown_inline_macsec,
test_inline_macsec_rekey_tx),
+   TEST_CASE_NAMED_ST(
+   "MACsec re-key Rx",
+   ut_setup_inline_macsec, ut_teardown_inline_macsec,
+   test_inline_macsec_rekey_rx),
 
TEST_CASES_END() /**< NULL terminate unit test array */
},
-- 
2.25.1



[PATCH 12/13] test/security: verify MACsec anti replay

2023-05-23 Thread Akhil Goyal
From: Ankur Dwivedi 

This patch enables anti replay test case for MACsec.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Akhil Goyal 
---
 app/test/test_security_inline_macsec.c|  82 +++
 .../test_security_inline_macsec_vectors.h | 467 ++
 2 files changed, 549 insertions(+)

diff --git a/app/test/test_security_inline_macsec.c 
b/app/test/test_security_inline_macsec.c
index 6f9cec333d..de67744f78 100644
--- a/app/test/test_security_inline_macsec.c
+++ b/app/test/test_security_inline_macsec.c
@@ -61,6 +61,7 @@ struct mcs_test_opts {
uint8_t replay_protect;
uint8_t rekey_en;
const struct mcs_test_vector *rekey_td;
+   const struct mcs_test_vector *ar_td[3];
bool dump_all_stats;
uint8_t check_untagged_rx;
uint8_t check_bad_tag_cnt;
@@ -716,6 +717,15 @@ mcs_stats_check(struct rte_security_ctx *ctx, enum mcs_op 
op,
if (opts->check_pkts_unchecked_stats && 
sc_stat.pkt_unchecked_cnt != 1)
return TEST_FAILED;
 
+   if (opts->replay_protect) {
+   if (opts->replay_win_sz == 0 &&
+   sc_stat.pkt_late_cnt != 2)
+   return TEST_FAILED;
+   else if (opts->replay_win_sz == 32 &&
+   sc_stat.pkt_late_cnt != 1)
+   return TEST_FAILED;
+   }
+
for (i = 0; i < RTE_SECURITY_MACSEC_NUM_AN; i++) {
memset(&sa_stat, 0, sizeof(struct 
rte_security_macsec_sa_stats));
rte_security_macsec_sa_stats_get(ctx, rx_sa_id[i],
@@ -845,6 +855,7 @@ test_macsec(const struct mcs_test_vector *td[], enum mcs_op 
op, const struct mcs
int nb_rx = 0, nb_sent;
int i, j = 0, ret, id, an = 0;
uint8_t tci_off;
+   int k;
 
memset(rx_pkts_burst, 0, sizeof(rx_pkts_burst[0]) * opts->nb_td);
 
@@ -875,6 +886,20 @@ test_macsec(const struct mcs_test_vector *td[], enum 
mcs_op op, const struct mcs
}
j++;
 
+   if (opts->replay_protect) {
+   for (k = 0; k < 3; k++, j++) {
+   tx_pkts_burst[j] = init_packet(mbufpool,
+   opts->ar_td[k]->secure_pkt.data,
+   opts->ar_td[k]->secure_pkt.len);
+   if (tx_pkts_burst[j] == NULL) {
+   while (j--)
+   
rte_pktmbuf_free(tx_pkts_burst[j]);
+   ret = TEST_FAILED;
+   goto out;
+   }
+   }
+   }
+
if (opts->rekey_en) {
 
err_vector.td = td[i];
@@ -1068,6 +1093,15 @@ test_macsec(const struct mcs_test_vector *td[], enum 
mcs_op op, const struct mcs
}
}
 
+   if (opts->replay_protect) {
+   for (i = 0; i < nb_rx; i++) {
+   rte_pktmbuf_free(rx_pkts_burst[i]);
+   rx_pkts_burst[i] = NULL;
+   }
+   ret = TEST_SUCCESS;
+   goto out;
+   }
+
for (i = 0; i < nb_rx; i++) {
if (opts->rekey_en && i == 1) {
/* The second received packet is matched with
@@ -1995,6 +2029,50 @@ test_inline_macsec_rekey_rx(const void *data 
__rte_unused)
return all_err;
 }
 
+static int
+test_inline_macsec_anti_replay(const void *data __rte_unused)
+{
+   const struct mcs_test_vector *cur_td;
+   struct mcs_test_opts opts = {0};
+   uint16_t replay_win_sz[2] = {32, 0};
+   int err, all_err = 0;
+   int i, size;
+   int j;
+
+   opts.val_frames = RTE_SECURITY_MACSEC_VALIDATE_STRICT;
+   opts.sa_in_use = 1;
+   opts.nb_td = 1;
+   opts.sectag_insert_mode = 1;
+   opts.replay_protect = 1;
+
+   size = (sizeof(list_mcs_anti_replay_vectors) / 
sizeof((list_mcs_anti_replay_vectors)[0]));
+
+   for (j = 0; j < 2; j++) {
+   opts.replay_win_sz = replay_win_sz[j];
+
+   for (i = 0; i < size; i++) {
+   cur_td = &list_mcs_anti_replay_vectors[i];
+   opts.ar_td[0] = &list_mcs_anti_replay_vectors[++i];
+   opts.ar_td[1] = &list_mcs_anti_replay_vectors[++i];
+   opts.ar_td[2] = &list_mcs_anti_replay_vectors[++i];
+   err = test_macsec(&cur_td, MCS_DECAP, &opts);
+   if (err) {
+   printf("Replay window: %u, Anti replay test 
case %d failed\n",
+  opts.replay_win_sz, i);
+   err = -1;
+   } else {
+   printf("Replay window: %

[PATCH 13/13] test/security: remove no MACsec support case

2023-05-23 Thread Akhil Goyal
Removed the test_capability_get_no_support_for_macsec case
as MACsec is now supported and capability can have valid
MACsec support.

Signed-off-by: Akhil Goyal 
---
 app/test/test_security.c | 37 -
 1 file changed, 37 deletions(-)

diff --git a/app/test/test_security.c b/app/test/test_security.c
index e4ddcefe40..4783cd0663 100644
--- a/app/test/test_security.c
+++ b/app/test/test_security.c
@@ -1828,41 +1828,6 @@ test_capability_get_no_matching_protocol(void)
return TEST_SUCCESS;
 }
 
-/**
- * Test execution of rte_security_capability_get when macsec protocol
- * is searched and capabilities table contain proper entry.
- * However macsec records search is not supported in rte_security.
- */
-static int
-test_capability_get_no_support_for_macsec(void)
-{
-   struct security_unittest_params *ut_params = &unittest_params;
-   struct rte_security_capability_idx idx = {
-   .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
-   .protocol = RTE_SECURITY_PROTOCOL_MACSEC,
-   };
-   struct rte_security_capability capabilities[] = {
-   {
-   .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
-   .protocol = RTE_SECURITY_PROTOCOL_MACSEC,
-   },
-   {
-   .action = RTE_SECURITY_ACTION_TYPE_NONE,
-   },
-   };
-
-   mock_capabilities_get_exp.device = NULL;
-   mock_capabilities_get_exp.ret = capabilities;
-
-   const struct rte_security_capability *ret;
-   ret = rte_security_capability_get(&ut_params->ctx, &idx);
-   TEST_ASSERT_MOCK_FUNCTION_CALL_RET(rte_security_capability_get,
-   ret, NULL, "%p");
-   TEST_ASSERT_MOCK_CALLS(mock_capabilities_get_exp, 1);
-
-   return TEST_SUCCESS;
-}
-
 /**
  * Test execution of rte_security_capability_get when capabilities table
  * does not contain entry with matching ipsec proto field
@@ -2319,8 +2284,6 @@ static struct unit_test_suite security_testsuite  = {
test_capability_get_no_matching_action),
TEST_CASE_ST(ut_setup_with_session, ut_teardown,
test_capability_get_no_matching_protocol),
-   TEST_CASE_ST(ut_setup_with_session, ut_teardown,
-   test_capability_get_no_support_for_macsec),
TEST_CASE_ST(ut_setup_with_session, ut_teardown,
test_capability_get_ipsec_mismatch_proto),
TEST_CASE_ST(ut_setup_with_session, ut_teardown,
-- 
2.25.1



[PATCH 00/15] net/cnxk: add MACsec support

2023-05-23 Thread Akhil Goyal
Added MACsec support in Marvell cnxk PMD.
The patchset is pending from last release [1]
Sending as a new series as the functionality is now
complete and tested on hardware.

Depends-on: https://patches.dpdk.org/project/dpdk/list/?series=28140

[1] 
https://patches.dpdk.org/project/dpdk/cover/20220928124516.93050-1-gak...@marvell.com/

Akhil Goyal (15):
  common/cnxk: add ROC MACsec initialization
  common/cnxk: add MACsec SA configuration
  common/cnxk: add MACsec SC configuration APIs
  common/cnxk: add MACsec secy and flow configuration
  common/cnxk: add MACsec PN and LMAC mode configuration
  common/cnxk: add MACsec stats
  common/cnxk: add MACsec interrupt APIs
  common/cnxk: add MACsec port configuration
  common/cnxk: add MACsec control port configuration
  common/cnxk: add MACsec FIPS mbox
  common/cnxk: derive hash key for MACsec
  net/cnxk: add MACsec initialization
  net/cnxk: create/destroy MACsec SC/SA
  net/cnxk: add MACsec session and flow configuration
  net/cnxk: add MACsec stats

 drivers/common/cnxk/meson.build   |   3 +
 drivers/common/cnxk/roc_aes.c |  86 ++-
 drivers/common/cnxk/roc_aes.h |   4 +-
 drivers/common/cnxk/roc_api.h |   3 +
 drivers/common/cnxk/roc_dev.c |  86 +++
 drivers/common/cnxk/roc_features.h|   6 +
 drivers/common/cnxk/roc_idev.c|  21 +
 drivers/common/cnxk/roc_idev.h|   2 +
 drivers/common/cnxk/roc_idev_priv.h   |   1 +
 drivers/common/cnxk/roc_mbox.h| 524 ++-
 drivers/common/cnxk/roc_mcs.c | 895 ++
 drivers/common/cnxk/roc_mcs.h | 619 ++
 drivers/common/cnxk/roc_mcs_priv.h|  73 +++
 drivers/common/cnxk/roc_mcs_sec_cfg.c | 528 +++
 drivers/common/cnxk/roc_mcs_stats.c   | 193 ++
 drivers/common/cnxk/roc_priv.h|   3 +
 drivers/common/cnxk/roc_utils.c   |   5 +
 drivers/common/cnxk/version.map   |  44 ++
 drivers/net/cnxk/cn10k_ethdev_sec.c   |  25 +-
 drivers/net/cnxk/cn10k_flow.c |  22 +-
 drivers/net/cnxk/cnxk_ethdev.c|  15 +
 drivers/net/cnxk/cnxk_ethdev.h|  30 +
 drivers/net/cnxk/cnxk_ethdev_mcs.c| 726 +
 drivers/net/cnxk/cnxk_ethdev_mcs.h| 111 
 drivers/net/cnxk/cnxk_ethdev_sec.c|   2 +-
 drivers/net/cnxk/cnxk_flow.c  |   5 +
 drivers/net/cnxk/meson.build  |   1 +
 27 files changed, 3995 insertions(+), 38 deletions(-)
 create mode 100644 drivers/common/cnxk/roc_mcs.c
 create mode 100644 drivers/common/cnxk/roc_mcs.h
 create mode 100644 drivers/common/cnxk/roc_mcs_priv.h
 create mode 100644 drivers/common/cnxk/roc_mcs_sec_cfg.c
 create mode 100644 drivers/common/cnxk/roc_mcs_stats.c
 create mode 100644 drivers/net/cnxk/cnxk_ethdev_mcs.c
 create mode 100644 drivers/net/cnxk/cnxk_ethdev_mcs.h

-- 
2.25.1



[PATCH 01/15] common/cnxk: add ROC MACsec initialization

2023-05-23 Thread Akhil Goyal
Added ROC init and fini APIs for supporting MACsec.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/meson.build |   1 +
 drivers/common/cnxk/roc_api.h   |   3 +
 drivers/common/cnxk/roc_features.h  |   6 +
 drivers/common/cnxk/roc_idev.c  |  21 +++
 drivers/common/cnxk/roc_idev.h  |   2 +
 drivers/common/cnxk/roc_idev_priv.h |   1 +
 drivers/common/cnxk/roc_mbox.h  |  65 +++-
 drivers/common/cnxk/roc_mcs.c   | 245 
 drivers/common/cnxk/roc_mcs.h   |  39 +
 drivers/common/cnxk/roc_mcs_priv.h  |  65 
 drivers/common/cnxk/roc_priv.h  |   3 +
 drivers/common/cnxk/roc_utils.c |   5 +
 drivers/common/cnxk/version.map |   6 +
 13 files changed, 461 insertions(+), 1 deletion(-)
 create mode 100644 drivers/common/cnxk/roc_mcs.c
 create mode 100644 drivers/common/cnxk/roc_mcs.h
 create mode 100644 drivers/common/cnxk/roc_mcs_priv.h

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 631b594f32..e33c002676 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -26,6 +26,7 @@ sources = files(
 'roc_irq.c',
 'roc_ie_ot.c',
 'roc_mbox.c',
+'roc_mcs.c',
 'roc_ml.c',
 'roc_model.c',
 'roc_nix.c',
diff --git a/drivers/common/cnxk/roc_api.h b/drivers/common/cnxk/roc_api.h
index bbc94ab48e..f630853088 100644
--- a/drivers/common/cnxk/roc_api.h
+++ b/drivers/common/cnxk/roc_api.h
@@ -114,4 +114,7 @@
 /* ML */
 #include "roc_ml.h"
 
+/* MACsec */
+#include "roc_mcs.h"
+
 #endif /* _ROC_API_H_ */
diff --git a/drivers/common/cnxk/roc_features.h 
b/drivers/common/cnxk/roc_features.h
index 252f306a86..dd39259c81 100644
--- a/drivers/common/cnxk/roc_features.h
+++ b/drivers/common/cnxk/roc_features.h
@@ -40,4 +40,10 @@ roc_feature_nix_has_reass(void)
return roc_model_is_cn10ka();
 }
 
+static inline bool
+roc_feature_nix_has_macsec(void)
+{
+   return roc_model_is_cn10kb();
+}
+
 #endif
diff --git a/drivers/common/cnxk/roc_idev.c b/drivers/common/cnxk/roc_idev.c
index 62a4fd8880..f9b94f3ca0 100644
--- a/drivers/common/cnxk/roc_idev.c
+++ b/drivers/common/cnxk/roc_idev.c
@@ -38,6 +38,7 @@ idev_set_defaults(struct idev_cfg *idev)
idev->num_lmtlines = 0;
idev->bphy = NULL;
idev->cpt = NULL;
+   idev->mcs = NULL;
idev->nix_inl_dev = NULL;
plt_spinlock_init(&idev->nix_inl_dev_lock);
plt_spinlock_init(&idev->npa_dev_lock);
@@ -186,6 +187,26 @@ roc_idev_cpt_get(void)
return NULL;
 }
 
+struct roc_mcs *
+roc_idev_mcs_get(void)
+{
+   struct idev_cfg *idev = idev_get_cfg();
+
+   if (idev != NULL)
+   return idev->mcs;
+
+   return NULL;
+}
+
+void
+roc_idev_mcs_set(struct roc_mcs *mcs)
+{
+   struct idev_cfg *idev = idev_get_cfg();
+
+   if (idev != NULL)
+   __atomic_store_n(&idev->mcs, mcs, __ATOMIC_RELEASE);
+}
+
 uint64_t *
 roc_nix_inl_outb_ring_base_get(struct roc_nix *roc_nix)
 {
diff --git a/drivers/common/cnxk/roc_idev.h b/drivers/common/cnxk/roc_idev.h
index 926aac0634..dbf1f46335 100644
--- a/drivers/common/cnxk/roc_idev.h
+++ b/drivers/common/cnxk/roc_idev.h
@@ -18,4 +18,6 @@ void __roc_api roc_idev_cpt_set(struct roc_cpt *cpt);
 struct roc_nix *__roc_api roc_idev_npa_nix_get(void);
 uint64_t __roc_api roc_idev_nix_inl_meta_aura_get(void);
 
+struct roc_mcs *__roc_api roc_idev_mcs_get(void);
+void __roc_api roc_idev_mcs_set(struct roc_mcs *mcs);
 #endif /* _ROC_IDEV_H_ */
diff --git a/drivers/common/cnxk/roc_idev_priv.h 
b/drivers/common/cnxk/roc_idev_priv.h
index b97d2936a2..ce26caa062 100644
--- a/drivers/common/cnxk/roc_idev_priv.h
+++ b/drivers/common/cnxk/roc_idev_priv.h
@@ -30,6 +30,7 @@ struct idev_cfg {
struct roc_bphy *bphy;
struct roc_cpt *cpt;
struct roc_sso *sso;
+   struct roc_mcs *mcs;
struct nix_inl_dev *nix_inl_dev;
struct idev_nix_inl_cfg inl_cfg;
plt_spinlock_t nix_inl_dev_lock;
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index af3c10b0b0..2ba35377da 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -275,7 +275,12 @@ struct mbox_msghdr {
M(NIX_SPI_TO_SA_ADD, 0x8026, nix_spi_to_sa_add, nix_spi_to_sa_add_req, \
  nix_spi_to_sa_add_rsp)   \
M(NIX_SPI_TO_SA_DELETE, 0x8027, nix_spi_to_sa_delete,  \
- nix_spi_to_sa_delete_req, msg_rsp)
+ nix_spi_to_sa_delete_req, msg_rsp)   \
+   /* MCS mbox IDs (range 0xa000 - 0xbFFF) */  
   \
+   M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, 
   \
+ mcs_alloc_rsrc_rsp)   
   \
+   M(MCS

[PATCH 03/15] common/cnxk: add MACsec SC configuration APIs

2023-05-23 Thread Akhil Goyal
Added ROC APIs to configure MACsec secure channel(SC)
and its mapping with SAs for both Rx and Tx.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_mbox.h|  37 ++
 drivers/common/cnxk/roc_mcs.h |  41 ++
 drivers/common/cnxk/roc_mcs_sec_cfg.c | 171 ++
 drivers/common/cnxk/version.map   |   7 ++
 4 files changed, 256 insertions(+)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 66a6de2cd2..0673c31389 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -280,7 +280,10 @@ struct mbox_msghdr {
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, 
   \
  mcs_alloc_rsrc_rsp)   
   \
M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, 
msg_rsp)  \
+   M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, 
mcs_rx_sc_cam_write_req, msg_rsp)  \
M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, 
msg_rsp)\
+   M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, 
mcs_tx_sc_sa_map, msg_rsp)   \
+   M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, 
mcs_rx_sc_sa_map, msg_rsp)   \
M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info)   
   \
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -706,6 +709,16 @@ struct mcs_free_rsrc_req {
uint64_t __io rsvd;
 };
 
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io sci; /* SCI */
+   uint64_t __io secy_id; /* secy index mapped to SC */
+   uint8_t __io sc_id;/* SC CAM entry index */
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
+
 struct mcs_sa_plcy_write_req {
struct mbox_msghdr hdr;
uint64_t __io plcy[2][9]; /* Support 2 SA policy */
@@ -716,6 +729,30 @@ struct mcs_sa_plcy_write_req {
uint64_t __io rsvd;
 };
 
+struct mcs_tx_sc_sa_map {
+   struct mbox_msghdr hdr;
+   uint8_t __io sa_index0;
+   uint8_t __io sa_index1;
+   uint8_t __io rekey_ena;
+   uint8_t __io sa_index0_vld;
+   uint8_t __io sa_index1_vld;
+   uint8_t __io tx_sa_active;
+   uint64_t __io sectag_sci;
+   uint8_t __io sc_id; /* used as index for SA_MEM_MAP */
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+   struct mbox_msghdr hdr;
+   uint8_t __io sa_index;
+   uint8_t __io sa_in_use;
+   uint8_t __io sc_id;
+   /* an range is 0-3, sc_id + an used as index SA_MEM_MAP */
+   uint8_t __io an;
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
 
 struct mcs_hw_info {
struct mbox_msghdr hdr;
diff --git a/drivers/common/cnxk/roc_mcs.h b/drivers/common/cnxk/roc_mcs.h
index a345d2a880..2787d6a940 100644
--- a/drivers/common/cnxk/roc_mcs.h
+++ b/drivers/common/cnxk/roc_mcs.h
@@ -32,6 +32,12 @@ struct roc_mcs_free_rsrc_req {
uint8_t all; /* Free all the cam resources */
 };
 
+/* RX SC_CAM mapping */
+struct roc_mcs_rx_sc_cam_write_req {
+   uint64_t sci; /* SCI */
+   uint64_t secy_id; /* secy index mapped to SC */
+   uint8_t sc_id;/* SC CAM entry index */
+};
 
 struct roc_mcs_sa_plcy_write_req {
uint64_t plcy[2][9];
@@ -40,6 +46,24 @@ struct roc_mcs_sa_plcy_write_req {
uint8_t dir;
 };
 
+struct roc_mcs_tx_sc_sa_map {
+   uint8_t sa_index0;
+   uint8_t sa_index1;
+   uint8_t rekey_ena;
+   uint8_t sa_index0_vld;
+   uint8_t sa_index1_vld;
+   uint8_t tx_sa_active;
+   uint64_t sectag_sci;
+   uint8_t sc_id; /* used as index for SA_MEM_MAP */
+};
+
+struct roc_mcs_rx_sc_sa_map {
+   uint8_t sa_index;
+   uint8_t sa_in_use;
+   uint8_t sc_id;
+   uint8_t an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+};
+
 struct roc_mcs_hw_info {
uint8_t num_mcs_blks; /* Number of MCS blocks */
uint8_t tcam_entries; /* RX/TX Tcam entries per mcs block */
@@ -79,4 +103,21 @@ __roc_api int roc_mcs_sa_policy_write(struct roc_mcs *mcs,
  struct roc_mcs_sa_plcy_write_req 
*sa_plcy);
 __roc_api int roc_mcs_sa_policy_read(struct roc_mcs *mcs,
 struct roc_mcs_sa_plcy_write_req *sa_plcy);
+/* RX SC read, write and enable */
+__roc_api int roc_mcs_rx_sc_cam_write(struct roc_mcs *mcs,
+ struct roc_mcs_rx_sc_cam_write_req 
*rx_sc_cam);
+__roc_api int roc_mcs_rx_sc_cam_read(struct roc_mcs *mcs,
+struct roc_mcs_rx_sc_cam_write_req 
*rx_sc_cam);
+__roc_api int roc_mcs_rx_sc_cam_enable(struct roc_mcs *mcs,
+  struct roc_mcs_rx_sc_cam_write_req 
*

[PATCH 02/15] common/cnxk: add MACsec SA configuration

2023-05-23 Thread Akhil Goyal
Added ROC APIs to allocate/free MACsec resources
and APIs to write SA policy.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/meson.build   |   1 +
 drivers/common/cnxk/roc_mbox.h|  12 ++
 drivers/common/cnxk/roc_mcs.h |  43 ++
 drivers/common/cnxk/roc_mcs_sec_cfg.c | 211 ++
 drivers/common/cnxk/version.map   |   4 +
 5 files changed, 271 insertions(+)
 create mode 100644 drivers/common/cnxk/roc_mcs_sec_cfg.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index e33c002676..589baf74fe 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -27,6 +27,7 @@ sources = files(
 'roc_ie_ot.c',
 'roc_mbox.c',
 'roc_mcs.c',
+'roc_mcs_sec_cfg.c',
 'roc_ml.c',
 'roc_model.c',
 'roc_nix.c',
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 2ba35377da..66a6de2cd2 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -280,6 +280,7 @@ struct mbox_msghdr {
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, 
   \
  mcs_alloc_rsrc_rsp)   
   \
M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, 
msg_rsp)  \
+   M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, 
msg_rsp)\
M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info)   
   \
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -705,6 +706,17 @@ struct mcs_free_rsrc_req {
uint64_t __io rsvd;
 };
 
+struct mcs_sa_plcy_write_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io plcy[2][9]; /* Support 2 SA policy */
+   uint8_t __io sa_index[2];
+   uint8_t __io sa_cnt;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+
 struct mcs_hw_info {
struct mbox_msghdr hdr;
uint8_t __io num_mcs_blks; /* Number of MCS blocks */
diff --git a/drivers/common/cnxk/roc_mcs.h b/drivers/common/cnxk/roc_mcs.h
index 504671a833..a345d2a880 100644
--- a/drivers/common/cnxk/roc_mcs.h
+++ b/drivers/common/cnxk/roc_mcs.h
@@ -7,6 +7,39 @@
 
 #define MCS_AES_GCM_256_KEYLEN 32
 
+struct roc_mcs_alloc_rsrc_req {
+   uint8_t rsrc_type;
+   uint8_t rsrc_cnt; /* Resources count */
+   uint8_t dir;  /* Macsec ingress or egress side */
+   uint8_t all;  /* Allocate all resource type one each */
+};
+
+struct roc_mcs_alloc_rsrc_rsp {
+   uint8_t flow_ids[128]; /* Index of reserved entries */
+   uint8_t secy_ids[128];
+   uint8_t sc_ids[128];
+   uint8_t sa_ids[256];
+   uint8_t rsrc_type;
+   uint8_t rsrc_cnt; /* No of entries reserved */
+   uint8_t dir;
+   uint8_t all;
+};
+
+struct roc_mcs_free_rsrc_req {
+   uint8_t rsrc_id; /* Index of the entry to be freed */
+   uint8_t rsrc_type;
+   uint8_t dir;
+   uint8_t all; /* Free all the cam resources */
+};
+
+
+struct roc_mcs_sa_plcy_write_req {
+   uint64_t plcy[2][9];
+   uint8_t sa_index[2];
+   uint8_t sa_cnt;
+   uint8_t dir;
+};
+
 struct roc_mcs_hw_info {
uint8_t num_mcs_blks; /* Number of MCS blocks */
uint8_t tcam_entries; /* RX/TX Tcam entries per mcs block */
@@ -36,4 +69,14 @@ __roc_api void roc_mcs_dev_fini(struct roc_mcs *mcs);
 __roc_api struct roc_mcs *roc_mcs_dev_get(uint8_t mcs_idx);
 /* HW info get */
 __roc_api int roc_mcs_hw_info_get(struct roc_mcs_hw_info *hw_info);
+
+/* Resource allocation and free */
+__roc_api int roc_mcs_alloc_rsrc(struct roc_mcs *mcs, struct 
roc_mcs_alloc_rsrc_req *req,
+struct roc_mcs_alloc_rsrc_rsp *rsp);
+__roc_api int roc_mcs_free_rsrc(struct roc_mcs *mcs, struct 
roc_mcs_free_rsrc_req *req);
+/* SA policy read and write */
+__roc_api int roc_mcs_sa_policy_write(struct roc_mcs *mcs,
+ struct roc_mcs_sa_plcy_write_req 
*sa_plcy);
+__roc_api int roc_mcs_sa_policy_read(struct roc_mcs *mcs,
+struct roc_mcs_sa_plcy_write_req *sa_plcy);
 #endif /* _ROC_MCS_H_ */
diff --git a/drivers/common/cnxk/roc_mcs_sec_cfg.c 
b/drivers/common/cnxk/roc_mcs_sec_cfg.c
new file mode 100644
index 00..50f2352c20
--- /dev/null
+++ b/drivers/common/cnxk/roc_mcs_sec_cfg.c
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2023 Marvell.
+ */
+
+#include "roc_api.h"
+#include "roc_priv.h"
+
+int
+roc_mcs_alloc_rsrc(struct roc_mcs *mcs, struct roc_mcs_alloc_rsrc_req *req,
+  struct roc_mcs_alloc_rsrc_rsp *rsp)
+{
+   struct mcs_priv *priv = roc_mcs_to_mcs_priv(mcs);
+   struct mcs_alloc_rsrc_req *rsrc_req;
+   struct mcs_alloc_rsrc_rsp *rsrc_rsp;
+   int rc, i;

[PATCH 04/15] common/cnxk: add MACsec secy and flow configuration

2023-05-23 Thread Akhil Goyal
Added ROC APIs to configure MACsec secy policy and
flow entries.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_mbox.h|  38 +
 drivers/common/cnxk/roc_mcs.h |  37 +
 drivers/common/cnxk/roc_mcs_sec_cfg.c | 115 ++
 drivers/common/cnxk/version.map   |   5 ++
 4 files changed, 195 insertions(+)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 0673c31389..2f6ce958d8 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -280,10 +280,14 @@ struct mbox_msghdr {
M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, 
   \
  mcs_alloc_rsrc_rsp)   
   \
M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, 
msg_rsp)  \
+   M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, 
mcs_flowid_entry_write_req,  \
+ msg_rsp)  
   \
+   M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, 
mcs_secy_plcy_write_req, msg_rsp)  \
M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, 
mcs_rx_sc_cam_write_req, msg_rsp)  \
M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, 
msg_rsp)\
M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, 
mcs_tx_sc_sa_map, msg_rsp)   \
M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, 
mcs_rx_sc_sa_map, msg_rsp)   \
+   M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, 
mcs_flowid_ena_dis_entry, msg_rsp)   \
M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info)   
   \
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
@@ -709,6 +713,31 @@ struct mcs_free_rsrc_req {
uint64_t __io rsvd;
 };
 
+struct mcs_flowid_entry_write_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io data[4];
+   uint64_t __io mask[4];
+   uint64_t __io sci; /* CNF10K-B for tx_secy_mem_map */
+   uint8_t __io flow_id;
+   uint8_t __io secy_id; /* secyid for which flowid is mapped */
+   /* sc_id is Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+   uint8_t __io sc_id;
+   uint8_t __io ena; /* Enable tcam entry */
+   uint8_t __io ctr_pkt;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io plcy;
+   uint8_t __io secy_id;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
 /* RX SC_CAM mapping */
 struct mcs_rx_sc_cam_write_req {
struct mbox_msghdr hdr;
@@ -754,6 +783,15 @@ struct mcs_rx_sc_sa_map {
uint64_t __io rsvd;
 };
 
+struct mcs_flowid_ena_dis_entry {
+   struct mbox_msghdr hdr;
+   uint8_t __io flow_id;
+   uint8_t __io ena;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
 struct mcs_hw_info {
struct mbox_msghdr hdr;
uint8_t __io num_mcs_blks; /* Number of MCS blocks */
diff --git a/drivers/common/cnxk/roc_mcs.h b/drivers/common/cnxk/roc_mcs.h
index 2787d6a940..7e0a98e91a 100644
--- a/drivers/common/cnxk/roc_mcs.h
+++ b/drivers/common/cnxk/roc_mcs.h
@@ -32,6 +32,24 @@ struct roc_mcs_free_rsrc_req {
uint8_t all; /* Free all the cam resources */
 };
 
+struct roc_mcs_flowid_entry_write_req {
+   uint64_t data[4];
+   uint64_t mask[4];
+   uint64_t sci; /* 105N for tx_secy_mem_map */
+   uint8_t flow_id;
+   uint8_t secy_id; /* secyid for which flowid is mapped */
+   uint8_t sc_id;   /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+   uint8_t ena; /* Enable tcam entry */
+   uint8_t ctr_pkt;
+   uint8_t dir;
+};
+
+struct roc_mcs_secy_plcy_write_req {
+   uint64_t plcy;
+   uint8_t secy_id;
+   uint8_t dir;
+};
+
 /* RX SC_CAM mapping */
 struct roc_mcs_rx_sc_cam_write_req {
uint64_t sci; /* SCI */
@@ -64,6 +82,12 @@ struct roc_mcs_rx_sc_sa_map {
uint8_t an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
 };
 
+struct roc_mcs_flowid_ena_dis_entry {
+   uint8_t flow_id;
+   uint8_t ena;
+   uint8_t dir;
+};
+
 struct roc_mcs_hw_info {
uint8_t num_mcs_blks; /* Number of MCS blocks */
uint8_t tcam_entries; /* RX/TX Tcam entries per mcs block */
@@ -110,6 +134,11 @@ __roc_api int roc_mcs_rx_sc_cam_read(struct roc_mcs *mcs,
 struct roc_mcs_rx_sc_cam_write_req 
*rx_sc_cam);
 __roc_api int roc_mcs_rx_sc_cam_enable(struct roc_mcs *mcs,
   struct roc_mcs_rx_sc_cam_write_req 
*rx_sc_cam);
+/* SECY policy read and write */
+__roc_api int roc_mcs_secy_policy_w

[PATCH 05/15] common/cnxk: add MACsec PN and LMAC mode configuration

2023-05-23 Thread Akhil Goyal
Added ROC APIs for setting packet number and LMAC
related configurations.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_mbox.h| 56 +
 drivers/common/cnxk/roc_mcs.c | 71 +++
 drivers/common/cnxk/roc_mcs.h | 48 ++
 drivers/common/cnxk/roc_mcs_sec_cfg.c | 31 
 drivers/common/cnxk/version.map   |  5 ++
 5 files changed, 211 insertions(+)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 2f6ce958d8..9f9783ec92 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -288,7 +288,11 @@ struct mbox_msghdr {
M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, 
mcs_tx_sc_sa_map, msg_rsp)   \
M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, 
mcs_rx_sc_sa_map, msg_rsp)   \
M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, 
mcs_flowid_ena_dis_entry, msg_rsp)   \
+   M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, 
mcs_pn_table_write_req, msg_rsp) \
+   M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, 
mcs_set_active_lmac, msg_rsp)  \
M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info)   
   \
+   M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, 
msg_rsp)\
+   M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, 
mcs_set_pn_threshold, msg_rsp)   \
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES   
\
@@ -792,6 +796,34 @@ struct mcs_flowid_ena_dis_entry {
uint64_t __io rsvd;
 };
 
+struct mcs_pn_table_write_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io next_pn;
+   uint8_t __io pn_id;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+struct mcs_cam_entry_read_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io rsrc_type; /* TCAM/SECY/SC/SA/PN */
+   uint8_t __io rsrc_id;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+struct mcs_cam_entry_read_rsp {
+   struct mbox_msghdr hdr;
+   uint64_t __io reg_val[10];
+   uint8_t __io rsrc_type;
+   uint8_t __io rsrc_id;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
 struct mcs_hw_info {
struct mbox_msghdr hdr;
uint8_t __io num_mcs_blks; /* Number of MCS blocks */
@@ -802,6 +834,30 @@ struct mcs_hw_info {
uint64_t __io rsvd[16];
 };
 
+struct mcs_set_active_lmac {
+   struct mbox_msghdr hdr;
+   uint32_t __io lmac_bmap; /* bitmap of active lmac per mcs block */
+   uint8_t __io mcs_id;
+   uint16_t channel_base; /* MCS channel base */
+   uint64_t __io rsvd;
+};
+
+struct mcs_set_lmac_mode {
+   struct mbox_msghdr hdr;
+   uint8_t __io mode; /* '1' for internal bypass mode (passthrough), '0' 
for MCS processing */
+   uint8_t __io lmac_id;
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
+
+struct mcs_set_pn_threshold {
+   struct mbox_msghdr hdr;
+   uint64_t __io threshold;
+   uint8_t __io xpn; /* '1' for setting xpn threshold */
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
 
 /* NPA mbox message formats */
 
diff --git a/drivers/common/cnxk/roc_mcs.c b/drivers/common/cnxk/roc_mcs.c
index ce92a6cd47..b15933d362 100644
--- a/drivers/common/cnxk/roc_mcs.c
+++ b/drivers/common/cnxk/roc_mcs.c
@@ -42,6 +42,77 @@ roc_mcs_hw_info_get(struct roc_mcs_hw_info *hw_info)
return rc;
 }
 
+int
+roc_mcs_active_lmac_set(struct roc_mcs *mcs, struct roc_mcs_set_active_lmac 
*lmac)
+{
+   struct mcs_set_active_lmac *req;
+   struct msg_rsp *rsp;
+
+   /* Only needed for 105N */
+   if (!roc_model_is_cnf10kb())
+   return 0;
+
+   if (lmac == NULL)
+   return -EINVAL;
+
+   MCS_SUPPORT_CHECK;
+
+   req = mbox_alloc_msg_mcs_set_active_lmac(mcs->mbox);
+   if (req == NULL)
+   return -ENOMEM;
+
+   req->lmac_bmap = lmac->lmac_bmap;
+   req->channel_base = lmac->channel_base;
+   req->mcs_id = mcs->idx;
+
+   return mbox_process_msg(mcs->mbox, (void *)&rsp);
+}
+
+int
+roc_mcs_lmac_mode_set(struct roc_mcs *mcs, struct roc_mcs_set_lmac_mode *port)
+{
+   struct mcs_set_lmac_mode *req;
+   struct msg_rsp *rsp;
+
+   if (port == NULL)
+   return -EINVAL;
+
+   MCS_SUPPORT_CHECK;
+
+   req = mbox_alloc_msg_mcs_set_lmac_mode(mcs->mbox);
+   if (req == NULL)
+   return -ENOMEM;
+
+   req->lmac_id = port->lmac_id;
+   req->mcs_id = mcs->idx;
+   req->mode = port->mode;
+
+   return mbox_process_msg(mcs->mbox, (void *)&rsp);
+}
+
+int
+roc_mcs_pn_threshold_set(struct roc_mcs *mcs, st

[PATCH 06/15] common/cnxk: add MACsec stats

2023-05-23 Thread Akhil Goyal
Added ROC APIs for MACsec stats for SC/SECY/FLOW/PORT

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/meson.build |   1 +
 drivers/common/cnxk/roc_mbox.h  |  93 ++
 drivers/common/cnxk/roc_mcs.h   |  85 
 drivers/common/cnxk/roc_mcs_stats.c | 193 
 drivers/common/cnxk/version.map |   5 +
 5 files changed, 377 insertions(+)
 create mode 100644 drivers/common/cnxk/roc_mcs_stats.c

diff --git a/drivers/common/cnxk/meson.build b/drivers/common/cnxk/meson.build
index 589baf74fe..79e10bac74 100644
--- a/drivers/common/cnxk/meson.build
+++ b/drivers/common/cnxk/meson.build
@@ -28,6 +28,7 @@ sources = files(
 'roc_mbox.c',
 'roc_mcs.c',
 'roc_mcs_sec_cfg.c',
+'roc_mcs_stats.c',
 'roc_ml.c',
 'roc_model.c',
 'roc_nix.c',
diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 9f9783ec92..1cbe66cc0c 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -291,6 +291,11 @@ struct mbox_msghdr {
M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, 
mcs_pn_table_write_req, msg_rsp) \
M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, 
mcs_set_active_lmac, msg_rsp)  \
M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info)   
   \
+   M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, 
mcs_flowid_stats) \
+   M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, 
mcs_secy_stats)   \
+   M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, 
mcs_sc_stats) \
+   M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, 
mcs_port_stats)   \
+   M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp)   
   \
M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, 
msg_rsp)\
M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, 
mcs_set_pn_threshold, msg_rsp)   \
 
@@ -859,6 +864,94 @@ struct mcs_set_pn_threshold {
uint64_t __io rsvd;
 };
 
+struct mcs_stats_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io id;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+struct mcs_flowid_stats {
+   struct mbox_msghdr hdr;
+   uint64_t __io tcam_hit_cnt;
+   uint64_t __io rsvd;
+};
+
+struct mcs_secy_stats {
+   struct mbox_msghdr hdr;
+   uint64_t __io ctl_pkt_bcast_cnt;
+   uint64_t __io ctl_pkt_mcast_cnt;
+   uint64_t __io ctl_pkt_ucast_cnt;
+   uint64_t __io ctl_octet_cnt;
+   uint64_t __io unctl_pkt_bcast_cnt;
+   uint64_t __io unctl_pkt_mcast_cnt;
+   uint64_t __io unctl_pkt_ucast_cnt;
+   uint64_t __io unctl_octet_cnt;
+   /* Valid only for RX */
+   uint64_t __io octet_decrypted_cnt;
+   uint64_t __io octet_validated_cnt;
+   uint64_t __io pkt_port_disabled_cnt;
+   uint64_t __io pkt_badtag_cnt;
+   uint64_t __io pkt_nosa_cnt;
+   uint64_t __io pkt_nosaerror_cnt;
+   uint64_t __io pkt_tagged_ctl_cnt;
+   uint64_t __io pkt_untaged_cnt;
+   uint64_t __io pkt_ctl_cnt;   /* CN10K-B */
+   uint64_t __io pkt_notag_cnt; /* CNF10K-B */
+   /* Valid only for TX */
+   uint64_t __io octet_encrypted_cnt;
+   uint64_t __io octet_protected_cnt;
+   uint64_t __io pkt_noactivesa_cnt;
+   uint64_t __io pkt_toolong_cnt;
+   uint64_t __io pkt_untagged_cnt;
+   uint64_t __io rsvd[4];
+};
+
+struct mcs_port_stats {
+   struct mbox_msghdr hdr;
+   uint64_t __io tcam_miss_cnt;
+   uint64_t __io parser_err_cnt;
+   uint64_t __io preempt_err_cnt; /* CNF10K-B */
+   uint64_t __io sectag_insert_err_cnt;
+   uint64_t __io rsvd[4];
+};
+
+struct mcs_sc_stats {
+   struct mbox_msghdr hdr;
+   /* RX */
+   uint64_t __io hit_cnt;
+   uint64_t __io pkt_invalid_cnt;
+   uint64_t __io pkt_late_cnt;
+   uint64_t __io pkt_notvalid_cnt;
+   uint64_t __io pkt_unchecked_cnt;
+   uint64_t __io pkt_delay_cnt;  /* CNF10K-B */
+   uint64_t __io pkt_ok_cnt; /* CNF10K-B */
+   uint64_t __io octet_decrypt_cnt;  /* CN10K-B */
+   uint64_t __io octet_validate_cnt; /* CN10K-B */
+   /* TX */
+   uint64_t __io pkt_encrypt_cnt;
+   uint64_t __io pkt_protected_cnt;
+   uint64_t __io octet_encrypt_cnt;   /* CN10K-B */
+   uint64_t __io octet_protected_cnt; /* CN10K-B */
+   uint64_t __io rsvd[4];
+};
+
+struct mcs_clear_stats {
+   struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS  1
+#define MCS_SC_STATS2
+#define MCS_SA_STATS3
+#define MCS_PORT_STATS  4
+   uint8_t __io type; /* FLOWID, SECY, SC, SA, PORT */
+   /* type = PORT, If id = FF(invalid) port no is derived from pcifu

[PATCH 07/15] common/cnxk: add MACsec interrupt APIs

2023-05-23 Thread Akhil Goyal
Added ROC APIs to support various MACsec interrupts.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_dev.c  |  86 +
 drivers/common/cnxk/roc_mbox.h |  37 +++-
 drivers/common/cnxk/roc_mcs.c  | 117 +++
 drivers/common/cnxk/roc_mcs.h  | 144 +
 drivers/common/cnxk/roc_mcs_priv.h |   8 ++
 drivers/common/cnxk/version.map|   3 +
 6 files changed, 394 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/roc_dev.c b/drivers/common/cnxk/roc_dev.c
index 2388237186..199d37d703 100644
--- a/drivers/common/cnxk/roc_dev.c
+++ b/drivers/common/cnxk/roc_dev.c
@@ -500,6 +500,91 @@ pf_vf_mbox_send_up_msg(struct dev *dev, void *rec_msg)
}
 }
 
+static int
+mbox_up_handler_mcs_intr_notify(struct dev *dev, struct mcs_intr_info *info, 
struct msg_rsp *rsp)
+{
+   struct roc_mcs_event_desc desc = {0};
+   struct roc_mcs *mcs;
+
+   plt_base_dbg("pf:%d/vf:%d msg id 0x%x (%s) from: pf:%d/vf:%d", 
dev_get_pf(dev->pf_func),
+dev_get_vf(dev->pf_func), info->hdr.id, 
mbox_id2name(info->hdr.id),
+dev_get_pf(info->hdr.pcifunc), 
dev_get_vf(info->hdr.pcifunc));
+
+   mcs = roc_mcs_dev_get(info->mcs_id);
+   if (!mcs)
+   goto exit;
+
+   if (info->intr_mask) {
+   switch (info->intr_mask) {
+   case MCS_CPM_RX_SECTAG_V_EQ1_INT:
+   desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+   desc.subtype = ROC_MCS_EVENT_RX_SECTAG_V_EQ1;
+   break;
+   case MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT:
+   desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+   desc.subtype = ROC_MCS_EVENT_RX_SECTAG_E_EQ0_C_EQ1;
+   break;
+   case MCS_CPM_RX_SECTAG_SL_GTE48_INT:
+   desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+   desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SL_GTE48;
+   break;
+   case MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT:
+   desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+   desc.subtype = ROC_MCS_EVENT_RX_SECTAG_ES_EQ1_SC_EQ1;
+   break;
+   case MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT:
+   desc.type = ROC_MCS_EVENT_SECTAG_VAL_ERR;
+   desc.subtype = ROC_MCS_EVENT_RX_SECTAG_SC_EQ1_SCB_EQ1;
+   break;
+   case MCS_CPM_RX_PACKET_XPN_EQ0_INT:
+   desc.type = ROC_MCS_EVENT_RX_SA_PN_HARD_EXP;
+   desc.metadata.sa_idx = info->sa_id;
+   break;
+   case MCS_CPM_RX_PN_THRESH_REACHED_INT:
+   desc.type = ROC_MCS_EVENT_RX_SA_PN_SOFT_EXP;
+   desc.metadata.sa_idx = info->sa_id;
+   break;
+   case MCS_CPM_TX_PACKET_XPN_EQ0_INT:
+   desc.type = ROC_MCS_EVENT_TX_SA_PN_HARD_EXP;
+   desc.metadata.sa_idx = info->sa_id;
+   break;
+   case MCS_CPM_TX_PN_THRESH_REACHED_INT:
+   desc.type = ROC_MCS_EVENT_TX_SA_PN_SOFT_EXP;
+   desc.metadata.sa_idx = info->sa_id;
+   break;
+   case MCS_CPM_TX_SA_NOT_VALID_INT:
+   desc.type = ROC_MCS_EVENT_SA_NOT_VALID;
+   break;
+   case MCS_BBE_RX_DFIFO_OVERFLOW_INT:
+   case MCS_BBE_TX_DFIFO_OVERFLOW_INT:
+   desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
+   desc.subtype = ROC_MCS_EVENT_DATA_FIFO_OVERFLOW;
+   desc.metadata.lmac_id = info->lmac_id;
+   break;
+   case MCS_BBE_RX_PLFIFO_OVERFLOW_INT:
+   case MCS_BBE_TX_PLFIFO_OVERFLOW_INT:
+   desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
+   desc.subtype = ROC_MCS_EVENT_POLICY_FIFO_OVERFLOW;
+   desc.metadata.lmac_id = info->lmac_id;
+   break;
+   case MCS_PAB_RX_CHAN_OVERFLOW_INT:
+   case MCS_PAB_TX_CHAN_OVERFLOW_INT:
+   desc.type = ROC_MCS_EVENT_FIFO_OVERFLOW;
+   desc.subtype = ROC_MCS_EVENT_PKT_ASSM_FIFO_OVERFLOW;
+   desc.metadata.lmac_id = info->lmac_id;
+   break;
+   default:
+   goto exit;
+   }
+
+   mcs_event_cb_process(mcs, &desc);
+   }
+
+exit:
+   rsp->hdr.rc = 0;
+   return 0;
+}
+
 static int
 mbox_up_handler_cgx_link_event(struct dev *dev, struct cgx_link_info_msg *msg,
   struct msg_rsp *rsp)
@@ -588,6 +673,7 @@ mbox_process_msgs_up(struct dev *dev, struct

[PATCH 08/15] common/cnxk: add MACsec port configuration

2023-05-23 Thread Akhil Goyal
Added ROC APIs for MACsec port configurations

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_mbox.h  |  40 
 drivers/common/cnxk/roc_mcs.c   | 345 
 drivers/common/cnxk/roc_mcs.h   |  48 +
 drivers/common/cnxk/version.map |   4 +
 4 files changed, 437 insertions(+)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 6e2b32a43f..96515deafd 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -299,6 +299,9 @@ struct mbox_msghdr {
M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp)
   \
M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, 
msg_rsp)\
M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, 
mcs_set_pn_threshold, msg_rsp)   \
+   M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp)  
   \
+   M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, 
msg_rsp)   \
+   M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, 
mcs_port_cfg_get_rsp)  \
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES   
\
@@ -899,6 +902,43 @@ struct mcs_set_pn_threshold {
uint64_t __io rsvd;
 };
 
+struct mcs_port_cfg_set_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io cstm_tag_rel_mode_sel;
+   uint8_t __io custom_hdr_enb;
+   uint8_t __io fifo_skid;
+   uint8_t __io lmac_mode;
+   uint8_t __io lmac_id;
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io lmac_id;
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+   struct mbox_msghdr hdr;
+   uint8_t __io cstm_tag_rel_mode_sel;
+   uint8_t __io custom_hdr_enb;
+   uint8_t __io fifo_skid;
+   uint8_t __io lmac_mode;
+   uint8_t __io lmac_id;
+   uint8_t __io mcs_id;
+   uint64_t __io rsvd;
+};
+
+struct mcs_port_reset_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io reset;
+   uint8_t __io mcs_id;
+   uint8_t __io lmac_id;
+   uint64_t __io rsvd;
+};
+
 struct mcs_stats_req {
struct mbox_msghdr hdr;
uint8_t __io id;
diff --git a/drivers/common/cnxk/roc_mcs.c b/drivers/common/cnxk/roc_mcs.c
index c2f0a46f23..32cb8d106d 100644
--- a/drivers/common/cnxk/roc_mcs.c
+++ b/drivers/common/cnxk/roc_mcs.c
@@ -80,6 +80,25 @@ roc_mcs_active_lmac_set(struct roc_mcs *mcs, struct 
roc_mcs_set_active_lmac *lma
return mbox_process_msg(mcs->mbox, (void *)&rsp);
 }
 
+static int
+mcs_port_reset_set(struct roc_mcs *mcs, struct roc_mcs_port_reset_req *port, 
uint8_t reset)
+{
+   struct mcs_port_reset_req *req;
+   struct msg_rsp *rsp;
+
+   MCS_SUPPORT_CHECK;
+
+   req = mbox_alloc_msg_mcs_port_reset(mcs->mbox);
+   if (req == NULL)
+   return -ENOMEM;
+
+   req->reset = reset;
+   req->lmac_id = port->port_id;
+   req->mcs_id = mcs->idx;
+
+   return mbox_process_msg(mcs->mbox, (void *)&rsp);
+}
+
 int
 roc_mcs_lmac_mode_set(struct roc_mcs *mcs, struct roc_mcs_set_lmac_mode *port)
 {
@@ -125,6 +144,64 @@ roc_mcs_pn_threshold_set(struct roc_mcs *mcs, struct 
roc_mcs_set_pn_threshold *p
return mbox_process_msg(mcs->mbox, (void *)&rsp);
 }
 
+int
+roc_mcs_port_cfg_set(struct roc_mcs *mcs, struct roc_mcs_port_cfg_set_req *req)
+{
+   struct mcs_port_cfg_set_req *set_req;
+   struct msg_rsp *rsp;
+
+   MCS_SUPPORT_CHECK;
+
+   if (req == NULL)
+   return -EINVAL;
+
+   set_req = mbox_alloc_msg_mcs_port_cfg_set(mcs->mbox);
+   if (set_req == NULL)
+   return -ENOMEM;
+
+   set_req->cstm_tag_rel_mode_sel = req->cstm_tag_rel_mode_sel;
+   set_req->custom_hdr_enb = req->custom_hdr_enb;
+   set_req->fifo_skid = req->fifo_skid;
+   set_req->lmac_mode = req->port_mode;
+   set_req->lmac_id = req->port_id;
+   set_req->mcs_id = mcs->idx;
+
+   return mbox_process_msg(mcs->mbox, (void *)&rsp);
+}
+
+int
+roc_mcs_port_cfg_get(struct roc_mcs *mcs, struct roc_mcs_port_cfg_get_req *req,
+struct roc_mcs_port_cfg_get_rsp *rsp)
+{
+   struct mcs_port_cfg_get_req *get_req;
+   struct mcs_port_cfg_get_rsp *get_rsp;
+   int rc;
+
+   MCS_SUPPORT_CHECK;
+
+   if (req == NULL)
+   return -EINVAL;
+
+   get_req = mbox_alloc_msg_mcs_port_cfg_get(mcs->mbox);
+   if (get_req == NULL)
+   return -ENOMEM;
+
+   get_req->lmac_id = req->port_id;
+   get_req->mcs_id = mcs->idx;
+
+   rc = mbox_process_msg(mcs->mbox, (void *)&get_rsp);
+   if (rc)
+   return rc;
+
+   rsp->cstm_tag_rel_mode_sel = get_rsp->cstm_tag_re

[PATCH 09/15] common/cnxk: add MACsec control port configuration

2023-05-23 Thread Akhil Goyal
Added ROC APIs to configure MACsec control port.

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_mbox.h  |  72 
 drivers/common/cnxk/roc_mcs.c   | 117 
 drivers/common/cnxk/roc_mcs.h   |  65 ++
 drivers/common/cnxk/version.map |   4 ++
 4 files changed, 258 insertions(+)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index 96515deafd..ad97ceffb8 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -299,9 +299,17 @@ struct mbox_msghdr {
M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp)
   \
M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, 
msg_rsp)\
M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, 
mcs_set_pn_threshold, msg_rsp)   \
+   M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, 
mcs_alloc_ctrl_pkt_rule_req,   \
+ mcs_alloc_ctrl_pkt_rule_rsp)  
   \
+   M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, 
mcs_free_ctrl_pkt_rule_req,  \
+ msg_rsp)  
   \
+   M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, 
mcs_ctrl_pkt_rule_write_req,   \
+ msg_rsp)  
   \
M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp)  
   \
M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, 
msg_rsp)   \
M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, 
mcs_port_cfg_get_rsp)  \
+   M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, 
mcs_custom_tag_cfg_get_req,  \
+ mcs_custom_tag_cfg_get_rsp)   
   \
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES   
\
@@ -902,6 +910,53 @@ struct mcs_set_pn_threshold {
uint64_t __io rsvd;
 };
 
+enum mcs_ctrl_pkt_rule_type {
+   MCS_CTRL_PKT_RULE_TYPE_ETH,
+   MCS_CTRL_PKT_RULE_TYPE_DA,
+   MCS_CTRL_PKT_RULE_TYPE_RANGE,
+   MCS_CTRL_PKT_RULE_TYPE_COMBO,
+   MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io rule_type;
+   uint8_t __io mcs_id; /* MCS block ID */
+   uint8_t __io dir;/* Macsec ingress or egress side */
+   uint64_t __io rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+   struct mbox_msghdr hdr;
+   uint8_t __io rule_idx;
+   uint8_t __io rule_type;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io rule_idx;
+   uint8_t __io rule_type;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint8_t __io all; /* Free all the rule resources */
+   uint64_t __io rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io data0;
+   uint64_t __io data1;
+   uint64_t __io data2;
+   uint8_t __io rule_idx;
+   uint8_t __io rule_type;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
 struct mcs_port_cfg_set_req {
struct mbox_msghdr hdr;
uint8_t __io cstm_tag_rel_mode_sel;
@@ -931,6 +986,23 @@ struct mcs_port_cfg_get_rsp {
uint64_t __io rsvd;
 };
 
+struct mcs_custom_tag_cfg_get_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+   struct mbox_msghdr hdr;
+   uint16_t __io cstm_etype[8];
+   uint8_t __io cstm_indx[8];
+   uint8_t __io cstm_etype_en;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+   uint64_t __io rsvd;
+};
+
 struct mcs_port_reset_req {
struct mbox_msghdr hdr;
uint8_t __io reset;
diff --git a/drivers/common/cnxk/roc_mcs.c b/drivers/common/cnxk/roc_mcs.c
index 32cb8d106d..6536ca7fb7 100644
--- a/drivers/common/cnxk/roc_mcs.c
+++ b/drivers/common/cnxk/roc_mcs.c
@@ -144,6 +144,88 @@ roc_mcs_pn_threshold_set(struct roc_mcs *mcs, struct 
roc_mcs_set_pn_threshold *p
return mbox_process_msg(mcs->mbox, (void *)&rsp);
 }
 
+int
+roc_mcs_alloc_ctrl_pkt_rule(struct roc_mcs *mcs, struct 
roc_mcs_alloc_ctrl_pkt_rule_req *req,
+   struct roc_mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+   struct mcs_alloc_ctrl_pkt_rule_req *rule_req;
+   struct mcs_alloc_ctrl_pkt_rule_rsp *rule_rsp;
+   int rc;
+
+   MCS_SUPPORT_CHECK;
+
+   if (req == NULL || rsp == NULL)

[PATCH 10/15] common/cnxk: add MACsec FIPS mbox

2023-05-23 Thread Akhil Goyal
Added MACsec FIPS configuration mbox

Signed-off-by: Ankur Dwivedi 
Signed-off-by: Vamsi Attunuru 
Signed-off-by: Akhil Goyal 
---
 drivers/common/cnxk/roc_mbox.h | 74 ++
 drivers/common/cnxk/roc_mcs.h  | 69 +++
 2 files changed, 143 insertions(+)

diff --git a/drivers/common/cnxk/roc_mbox.h b/drivers/common/cnxk/roc_mbox.h
index ad97ceffb8..7057823112 100644
--- a/drivers/common/cnxk/roc_mbox.h
+++ b/drivers/common/cnxk/roc_mbox.h
@@ -310,6 +310,15 @@ struct mbox_msghdr {
M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, 
mcs_port_cfg_get_rsp)  \
M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, 
mcs_custom_tag_cfg_get_req,  \
  mcs_custom_tag_cfg_get_rsp)   
   \
+   M(MCS_FIPS_RESET, 0xa040, mcs_fips_reset, mcs_fips_req, msg_rsp)
   \
+   M(MCS_FIPS_MODE_SET, 0xa041, mcs_fips_mode_set, mcs_fips_mode_req, 
msg_rsp)\
+   M(MCS_FIPS_CTL_SET, 0xa042, mcs_fips_ctl_set, mcs_fips_ctl_req, 
msg_rsp)   \
+   M(MCS_FIPS_IV_SET, 0xa043, mcs_fips_iv_set, mcs_fips_iv_req, msg_rsp)   
   \
+   M(MCS_FIPS_CTR_SET, 0xa044, mcs_fips_ctr_set, mcs_fips_ctr_req, 
msg_rsp)   \
+   M(MCS_FIPS_KEY_SET, 0xa045, mcs_fips_key_set, mcs_fips_key_req, 
msg_rsp)   \
+   M(MCS_FIPS_BLOCK_SET, 0xa046, mcs_fips_block_set, mcs_fips_block_req, 
msg_rsp) \
+   M(MCS_FIPS_START, 0xa047, mcs_fips_start, mcs_fips_req, msg_rsp)
   \
+   M(MCS_FIPS_RESULT_GET, 0xa048, mcs_fips_result_get, mcs_fips_req, 
mcs_fips_result_rsp)
 
 /* Messages initiated by AF (range 0xC00 - 0xDFF) */
 #define MBOX_UP_CGX_MESSAGES   
\
@@ -1099,6 +1108,71 @@ struct mcs_clear_stats {
uint8_t __io all; /* All resources stats mapped to PF are cleared */
 };
 
+struct mcs_fips_req {
+   struct mbox_msghdr hdr;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_mode_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io mode;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_ctl_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io ctl;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_iv_req {
+   struct mbox_msghdr hdr;
+   uint32_t __io iv_bits95_64;
+   uint64_t __io iv_bits63_0;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_ctr_req {
+   struct mbox_msghdr hdr;
+   uint32_t __io fips_ctr;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_key_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io sak_bits255_192;
+   uint64_t __io sak_bits191_128;
+   uint64_t __io sak_bits127_64;
+   uint64_t __io sak_bits63_0;
+   uint64_t __io hashkey_bits127_64;
+   uint64_t __io hashkey_bits63_0;
+   uint8_t __io sak_len;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_block_req {
+   struct mbox_msghdr hdr;
+   uint64_t __io blk_bits127_64;
+   uint64_t __io blk_bits63_0;
+   uint8_t __io mcs_id;
+   uint8_t __io dir;
+};
+
+struct mcs_fips_result_rsp {
+   struct mbox_msghdr hdr;
+   uint64_t __io blk_bits127_64;
+   uint64_t __io blk_bits63_0;
+   uint64_t __io icv_bits127_64;
+   uint64_t __io icv_bits63_0;
+   uint8_t __io result_pass;
+};
+
 /* NPA mbox message formats */
 
 /* NPA mailbox error codes
diff --git a/drivers/common/cnxk/roc_mcs.h b/drivers/common/cnxk/roc_mcs.h
index c9b57ed1df..88c8f3da27 100644
--- a/drivers/common/cnxk/roc_mcs.h
+++ b/drivers/common/cnxk/roc_mcs.h
@@ -426,6 +426,56 @@ struct roc_mcs_event_desc {
union roc_mcs_event_data metadata;
 };
 
+struct roc_mcs_fips_req {
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_mode {
+   uint64_t mode;
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_ctl {
+   uint64_t ctl;
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_iv {
+   uint32_t iv_bits95_64;
+   uint64_t iv_bits63_0;
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_ctr {
+   uint32_t fips_ctr;
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_key {
+   uint64_t sak_bits255_192;
+   uint64_t sak_bits191_128;
+   uint64_t sak_bits127_64;
+   uint64_t sak_bits63_0;
+   uint64_t hashkey_bits127_64;
+   uint64_t hashkey_bits63_0;
+   uint8_t sak_len;
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_block {
+   uint64_t blk_bits127_64;
+   uint64_t blk_bits63_0;
+   uint8_t dir;
+};
+
+struct roc_mcs_fips_result_rsp {
+   uint64_t blk_bits127_64;
+   uint64_t blk_bits63_0;
+   uint64_t icv_bits127_64;
+   uint64_t icv_bits63_0;
+   uint8_t result_pass;
+};
+
 /** User application callback to be regist

  1   2   >