[PATCH 00/19] net/cpfl: support port representor

2023-08-09 Thread beilei . xing
From: Beilei Xing 

1. code refine for representor support
2. support port representor

Beilei Xing (19):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: add cp channel
  net/cpfl: enable vport mapping
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support vport list/info get
  net/cpfl: update vport info before creating representor
  net/cpfl: refine handle virtual channel message
  net/cpfl: add exceptional vport
  net/cpfl: support representor Rx/Tx queue setup
  net/cpfl: support link update for representor
  net/cpfl: add stats ops for representor
  common/idpf: refine inline function
  net/cpfl: support representor data path
  net/cpfl: support dispatch process
  net/cpfl: add dispatch service
  doc: update release notes for representor

 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/common/idpf/idpf_common_rxtx.c | 246 ---
 drivers/common/idpf/idpf_common_rxtx.h | 246 +++
 drivers/common/idpf/version.map|   3 +
 drivers/net/cpfl/cpfl_cpchnl.h | 313 +
 drivers/net/cpfl/cpfl_ethdev.c | 884 ---
 drivers/net/cpfl/cpfl_ethdev.h | 120 +++-
 drivers/net/cpfl/cpfl_representor.c| 935 +
 drivers/net/cpfl/cpfl_representor.h|  26 +
 drivers/net/cpfl/cpfl_rxtx.c   | 268 +++
 drivers/net/cpfl/cpfl_rxtx.h   |  19 +
 drivers/net/cpfl/cpfl_vchnl.c  |  72 ++
 drivers/net/cpfl/meson.build   |   4 +-
 13 files changed, 2783 insertions(+), 356 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1



[PATCH 01/19] net/cpfl: refine devargs parse and process

2023-08-09 Thread beilei . xing
From: Beilei Xing 

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++---
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void 
*args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter,
-  struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
struct rte_devargs *devargs = pci_dev->device.devargs;
+   struct cpfl_devargs *cpfl_args = &adapter->devargs;
struct rte_kvargs *kvlist;
-   int i, ret;
+   int ret;
 
cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adap
if (ret != 0)
goto fail;
 
-   /* check parsed devargs */
-   if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-   adapter->max_vport_nb) {
-   PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-adapter->max_vport_nb);
-   ret = -EINVAL;
-   goto fail;
-   }
-
-   for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-   if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-   PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 
~ %d",
-cpfl_args->req_vports[i], 
adapter->max_vport_nb - 1);
-   ret = -EINVAL;
-   goto fail;
-   }
-
-   if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-   PMD_INIT_LOG(ERR, "Vport %d has been requested",
-cpfl_args->req_vports[i]);
-   ret = -EINVAL;
-   goto fail;
-   }
-   }
-
 fail:
rte_kvargs_free(kvlist);
return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext 
*adapter)
adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+   struct cpfl_devargs *devargs = &adapter->devargs;
+   int i;
+
+   /* refine vport number, at least 1 vport */
+   if (devargs->req_vport_nb == 0) {
+   devargs->req_vport_nb = 1;
+   devargs->req_vports[0] = 0;
+   }
+
+   /* check parsed devargs */
+   if (adapter->cur_vport_nb + devargs->req_vport_nb >
+   adapter->max_vport_nb) {
+   PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+adapter->max_vport_nb);
+   return -EINVAL;
+   }
+
+   for (i = 0; i < devargs->req_vport_nb; i++) {
+   if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+   PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 
~ %d",
+devargs->req_vports[i], 
adapter->max_vport_nb - 1);
+   return -EINVAL;
+   }
+
+   if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+   PMD_INIT_LOG(ERR, "Vport %d has been requested",
+devargs->req_vports[i]);
+   return -EINVAL;
+   }
+   }
+
+   return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+{
+   struct cpfl_vport_param vport_param;
+   char name[RTE_ETH_NAME_MAX_LEN];
+   int ret, i;
+
+   for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+   vport_param.adapter = adapter;
+   vport_param.devarg_id = adapter->devargs.req_vports[i];
+   vport_param.idx = cpfl_vport_idx_alloc(adapter);
+   if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+   PMD_INIT_LOG(ERR, "No space for vport %u", 
vport_param.devarg_id);
+   break;
+   }
+   snprintf(name, sizeof(name), "net_%s_vport_%d",
+pci_dev->device.name,
+adapter->devargs.req_vports[i]);
+   ret = rte_eth_dev_create(&pci_dev->device, name,
+   sizeof(struct cpfl_vport),
+   NULL, NULL, cpfl_dev_vport_init,
+   &vport_param);
+   if (ret != 0)
+   P

[PATCH 02/19] net/cpfl: introduce interface structure

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Introduce cplf interface structure to distingush vport and port
representor.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 16 
 2 files changed, 19 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void 
*init_params)
goto err;
}
 
+   cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+   cpfl_vport->itf.adapter = adapter;
+   cpfl_vport->itf.data = dev->data;
adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..53e45035e8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,19 @@ struct p2p_queue_chunks_info {
uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+   CPFL_ITF_TYPE_VPORT,
+   CPFL_ITF_TYPE_REPRESENTOR
+};
+
+struct cpfl_itf {
+   enum cpfl_itf_type type;
+   struct cpfl_adapter_ext *adapter;
+   void *data;
+};
+
 struct cpfl_vport {
+   struct cpfl_itf itf;
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +136,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p) \
container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev) \
+   ((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)   \
+   ((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1



[PATCH 03/19] net/cpfl: add cp channel

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Add cpchnl header file.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_cpchnl.h | 313 +
 1 file changed, 313 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 00..5633fba15e
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (C) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief  Command Opcodes
+ *  Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+   /* vport info */
+   CPCHNL2_OP_GET_VPORT_LIST   = 0x8025,
+   CPCHNL2_OP_GET_VPORT_INFO   = 0x8026,
+
+   /* DPHMA Event notifications */
+   CPCHNL2_OP_EVENT= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT  8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS  6
+
+#define CPCHNL2_FUNC_TYPE_PF   0
+#define CPCHNL2_FUNC_TYPE_SRIOV1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED   0
+#define CPCHNL2_VPORT_STATUS_ENABLED   1
+#define CPCHNL2_VPORT_STATUS_DISABLED  2
+#define CPCHNL2_VPORT_STATUS_DESTROYED 3
+
+/* Queue Groups Extension */
+/**/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+struct cpchnl2_queue_chunk {
+   u32 type;  /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum 
nsl_lan_queue_type */
+   u32 start_queue_id;
+   u32 num_queues;
+   u8 pad[4];
+};
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+   u16 num_chunks;
+   u8 reserved[6];
+   struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+
+struct cpchnl2_rx_queue_group_info {
+   /* User can ask to update rss_lut size originally allocated
+* by CreateVport command. New size will be returned if allocation 
succeeded,
+* otherwise original rss_size from CreateVport will be returned.
+*/
+   u16 rss_lut_size;
+   u8 pad[6]; /*Future extension purpose*/
+};
+
+struct cpchnl2_tx_queue_group_info {
+   u8 tx_tc; /*TX TC queue group will be connected to*/
+   /* Each group can have its own priority, value 0-7, while each group 
with unique
+* priority is strict priority. It can be single set of queue groups 
which configured with
+* same priority, then they are assumed part of WFQ arbitration group 
and are expected to be
+* assigned with weight.
+*/
+   u8 priority;
+   u8 is_sp; /*Determines if queue group is expected to be Strict Priority 
according to its priority*/
+   u8 pad;
+   /* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration 
set.
+* The weights of the groups are independent of each other. Possible 
values: 1-200.
+*/
+   u16 pir_weight;
+   /* Future extension purpose for CIR only */
+   u8 cir_pad[2];
+   u8 pad2[8]; /* Future extension purpose*/
+};
+
+struct cpchnl2_queue_group_id {
+   /* Queue group ID - depended on it's type:
+* Data & p2p - is an index which is relative to Vport.
+* Config & Mailbox - is an ID which is relative to func.
+* This ID is used in future calls, i.e. delete.
+* Requested by host and assigned by Control plane.
+*/
+   u16 queue_group_id;
+   /* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+   u16 queue_group_type;
+   u8 pad[4];
+};
+
+struct cpchnl2_queue_group_info {
+   /* IN */
+   struct cpchnl2_queue_group_id qg_id;
+
+   /* IN, Number of queues of different types in the group. */
+   u16 num_tx_q;
+   u16 num_tx_complq;
+   u16 num_rx_q;
+   u16 num_rx_bufq;
+
+   struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+   struct cpchnl2_rx_queue_group_info rx_q_grp_info;
+
+   u8 egress_port;
+   u8 pad[39]; /*Future extension purpose*/
+   struct cpchnl2_queue_grp_chunks chunks;
+};
+
+struct cpchnl2_queue_groups {
+   u16 num_queue_groups; /* Number of queue groups in struct below */
+   u8 pad[6];
+   /* group information , number is determined by param above */
+   struct cpchnl2_queue_group_info groups[MAX_Q_VPORT_GROUPS];
+};
+
+/**
+ * @brief function types
+ */
+enum cpchnl2_func_type {
+   CPCHNL2_FTYPE_LAN_PF = 0,
+   CPCHNL2_FTYPE_LAN_VF = 1,
+   CPCHNL2_FTYPE_LAN_MAX
+};
+
+/**
+ * @brief containing vport id & type
+ */
+struct cpchnl2_vport

[PATCH 04/19] net/cpfl: enable vport mapping

2023-08-09 Thread beilei . xing
From: Beilei Xing 

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +
 drivers/net/cpfl/cpfl_ethdev.h |  21 -
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..17a69c16fe 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1492,6 +1493,108 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t 
*msg, uint16_t msglen)
}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vport_identity,
+  struct cpchnl2_vport_info *vport_info)
+{
+   struct cpfl_vport_info *info = NULL;
+   int ret;
+
+   rte_spinlock_lock(&adapter->vport_map_lock);
+   ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
+   if (ret >= 0) {
+   PMD_DRV_LOG(WARNING, "vport already exist, overwrite info 
anyway");
+   /* overwrite info */
+   if (info)
+   info->vport_info = *vport_info;
+   goto fini;
+   }
+
+   info = rte_zmalloc(NULL, sizeof(*info), 0);
+   if (info == NULL) {
+   PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+   ret = -ENOMEM;
+   goto err;
+   }
+
+   info->vport_info = *vport_info;
+
+   ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, 
info);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+   rte_free(info);
+   goto err;
+   }
+
+fini:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return 0;
+err:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id 
*vport_identity)
+{
+   struct cpfl_vport_info *info;
+   int ret;
+
+   rte_spinlock_lock(&adapter->vport_map_lock);
+   ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "vport id not exist");
+   goto err;
+   }
+
+   rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   rte_free(info);
+
+   return 0;
+
+err:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, 
uint16_t msglen)
+{
+   struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info 
*)msg;
+   struct cpchnl2_vport_info *info;
+   struct cpfl_vport_id vport_identity = { 0 };
+
+   if (msglen < sizeof(struct cpchnl2_event_info)) {
+   PMD_DRV_LOG(ERR, "Error event");
+   return;
+   }
+
+   switch (cpchnl2_event->header.type) {
+   case CPCHNL2_EVENT_VPORT_CREATED:
+   vport_identity.vport_id = 
cpchnl2_event->data.vport_created.vport.vport_id;
+   info = &cpchnl2_event->data.vport_created.info;
+   vport_identity.func_type = info->func_type;
+   vport_identity.pf_id = info->pf_id;
+   vport_identity.vf_id = info->vf_id;
+   if (cpfl_vport_info_create(adapter, &vport_identity, info))
+   PMD_DRV_LOG(WARNING, "Failed to handle 
CPCHNL2_EVENT_VPORT_CREATED");
+   break;
+   case CPCHNL2_EVENT_VPORT_DESTROYED:
+   vport_identity.vport_id = 
cpchnl2_event->data.vport_destroyed.vport.vport_id;
+   vport_identity.func_type = 
cpchnl2_event->data.vport_destroyed.func.func_type;
+   vport_identity.pf_id = 
cpchnl2_event->data.vport_destroyed.func.pf_id;
+   vport_identity.vf_id = 
cpchnl2_event->data.vport_destroyed.func.vf_id;
+   if (cpfl_vport_info_destroy(adapter, &vport_identity))
+   PMD_DRV_LOG(WARNING, "Failed to handle 
CPCHNL2_EVENT_VPORT_DESTROY");
+   break;
+   default:
+   PMD_DRV_LOG(ERR, " unknown event received %u", 
cpchnl2_event->header.type);
+   break;
+   }
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
}
cpfl_handle_event_msg(vport, base->mbx_resp,
 

[PATCH 05/19] net/cpfl: parse representor devargs

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Format:

[[c]pf]vf

  control_id:

  0 : xeon (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- xeon > apf > vf 0,1,2,3
 same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- xeon> apf
 same as pf0 if omit default value.

representor=c1pf0
  -- acc > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on xeon APF and one
representor for acc APF.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 179 +
 drivers/net/cpfl/cpfl_ethdev.h |   8 ++
 2 files changed, 187 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 17a69c16fe..a820528a0d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include 
 
 #include "cpfl_ethdev.h"
+#include 
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR   "representor"
 #define CPFL_TX_SINGLE_Q   "tx_single"
 #define CPFL_RX_SINGLE_Q   "rx_single"
 #define CPFL_VPORT "vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+   CPFL_REPRESENTOR,
CPFL_TX_SINGLE_Q,
CPFL_RX_SINGLE_Q,
CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void 
*args)
return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t 
val)
+{
+   uint16_t i;
+
+   for (i = 0; i < *len_list; i++) {
+   if (list[i] == val)
+   return 0;
+   }
+   if (*len_list >= max_list)
+   return -1;
+   list[(*len_list)++] = val;
+   return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+   const uint16_t max_list)
+{
+   uint16_t lo, hi, val;
+   int result, n = 0;
+   const char *pos = str;
+
+   result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+   if (result == 1) {
+   if (enlist(list, len_list, max_list, lo) != 0)
+   return NULL;
+   } else if (result == 2) {
+   if (lo > hi)
+   return NULL;
+   for (val = lo; val <= hi; val++) {
+   if (enlist(list, len_list, max_list, val) != 0)
+   return NULL;
+   }
+   } else {
+   return NULL;
+   }
+   return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const 
uint16_t max_list)
+{
+   const char *pos = str;
+
+   if (*pos == '[')
+   pos++;
+   while (1) {
+   pos = process_range(pos, list, len_list, max_list);
+   if (pos == NULL)
+   return NULL;
+   if (*pos != ',') /* end of list */
+   break;
+   pos++;
+   }
+   if (*str == '[' && *pos != ']')
+   return NULL;
+   if (*pos == ']')
+   pos++;
+   return pos;
+}
+
+static int
+parse_repr(const char *key __rte_unused, const char *value, void *args)
+{
+   struct cpfl_devargs *devargs = args;
+   struct rte_eth_devargs *eth_da;
+   const char *str = value;
+
+   if (devargs->repr_args_num == CPFL_REPR_ARG_NUM_MAX)
+   return -EINVAL;
+
+   eth_da = &devargs->repr_args[devargs->repr_args_num];
+
+   if (str[0] == 'c') {
+   str += 1;
+   str = process_list(str, eth_da->mh_controllers,
+   ð_da->nb_mh_controllers,
+   RTE_DIM(eth_da->mh_controllers));
+   if (str == NULL)
+   goto done;
+   }
+   if (str[0] == 'p' && str[1] == 'f') {
+   eth_da->type = RTE_ETH_REPRESENTOR_PF;
+   str += 2;
+   str = process_list(str, eth_da->ports,
+   ð_da->nb_ports, RTE_DIM(eth_da->ports));
+   if (str == NULL || str[0] == '\0')
+   goto done;
+   } else if (eth_da->nb_mh_controllers > 0) {
+   /* 'c' must followed by 'pf'. */
+   str = NULL;
+   goto done;
+   }
+   if (str[0] == 'v' && str[1] == 'f') {
+   eth_da->type = RTE_ETH_REPRESENTOR_VF;
+   str += 2;
+   } else if (str[0] == 's' && str[1] == 'f') {
+   eth_da->type = RTE_ETH_REPRESENTOR_SF;
+   str += 2;
+   } else {
+   /* 'pf' must followed by 'vf' or 'sf'. */
+   if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
+   str = NULL;
+   goto done;
+   }
+   eth_da-

[PATCH 06/19] net/cpfl: support probe again

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++---
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a820528a0d..09015fbb08 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
CPFL_REPRESENTOR,
CPFL_TX_SINGLE_Q,
CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+   CPFL_REPRESENTOR,
+   NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
RTE_ETH_SPEED_NUM_NONE,
RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char 
*value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter, bool first)
 {
struct rte_devargs *devargs = pci_dev->device.devargs;
struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adap
if (devargs == NULL)
return 0;
 
-   kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+   kvlist = rte_kvargs_parse(devargs->args,
+   first ? cpfl_valid_args_first : cpfl_valid_args_again);
if (kvlist == NULL) {
PMD_INIT_LOG(ERR, "invalid kvargs key");
return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adap
if (ret != 0)
goto fail;
 
+   if (!first)
+   return 0;
+
ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 cpfl_args);
if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-  struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
struct cpfl_adapter_ext *adapter;
int retval;
 
-   if (!cpfl_adapter_list_init) {
-   rte_spinlock_init(&cpfl_adapter_lock);
-   TAILQ_INIT(&cpfl_adapter_list);
-   cpfl_adapter_list_init = true;
-   }
-
adapter = rte_zmalloc("cpfl_adapter_ext",
  sizeof(struct cpfl_adapter_ext), 0);
if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
return -ENOMEM;
}
 
-   retval = cpfl_parse_devargs(pci_dev, adapter);
+   retval = cpfl_parse_devargs(pci_dev, adapter, true);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to parse private devargs");
return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+{
+   int ret;
+
+   ret = cpfl_parse_devargs(pci_dev, adapter, false);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+   return ret;
+   }
+
+   ret = cpfl_repr_devargs_process(adapter);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+   return ret;
+   }
+
+   return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+  struct rte_pci_device *pci_dev)
+{
+   struct cpfl_adapter_ext *adapter;
+
+   if (!cpfl_adapter_list_init) {
+   rte_spinlock_init(&cpfl_adapter_lock);
+   TAILQ_INIT(&cpfl_adapter_list);
+   cpfl_adapter_list_init = true;
+   }
+
+   adapter = cpfl_find_adapter_ext(pci_dev);
+
+   if (adapter == NULL)
+   return cpfl_pci_probe_first(pci_dev);
+   else
+   return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
.id_table   = pci_id_cpfl_map,
-   .drv_flags  = RTE_PCI_DRV_NEED_MAPPING,
+   .drv_flags  = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_PROBE_AGAIN,
.probe  = cpfl_pci_probe,
  

[PATCH 07/19] net/cpfl: create port representor

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Track representor request in a whitelist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu 
Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  | 107 ---
 drivers/net/cpfl/cpfl_ethdev.h  |  34 +++
 drivers/net/cpfl/cpfl_representor.c | 448 
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build|   1 +
 5 files changed, 573 insertions(+), 43 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 09015fbb08..949a2c8069 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_whitelist_init(struct cpfl_adapter_ext *adapter)
+{
+   char hname[32];
+
+   snprintf(hname, 32, "%s-repr_wl", adapter->name);
+
+   rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+   struct rte_hash_parameters params = {
+   .name = hname,
+   .entries = CPFL_REPR_HASH_ENTRY_NUM,
+   .key_len = sizeof(struct cpfl_repr_id),
+   .hash_func = rte_hash_crc,
+   .socket_id = SOCKET_ID_ANY,
+   };
+
+   adapter->repr_whitelist_hash = rte_hash_create(¶ms);
+
+   if (adapter->repr_whitelist_hash == NULL) {
+   PMD_INIT_LOG(ERR, "Failed to create repr whitelist hash");
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static void
+cpfl_repr_whitelist_uninit(struct cpfl_adapter_ext *adapter)
+{
+   rte_hash_free(adapter->repr_whitelist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *a
goto err_vport_map_init;
}
 
+   ret = cpfl_repr_whitelist_init(adapter);
+   if (ret) {
+   PMD_INIT_LOG(ERR, "Failed to init representor whitelist");
+   goto err_repr_whitelist_init;
+   }
+
rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
adapter->max_vport_nb = adapter->base.caps.max_vports > 
CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *a
 
 err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+   cpfl_repr_whitelist_uninit(adapter);
+err_repr_whitelist_init:
cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext 
*adapter)
return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-   struct cpfl_devargs *devargs = &adapter->devargs;
-   int i, j;
-
-   /* check and refine repr args */
-   for (i = 0; i < devargs->repr_args_num; i++) {
-   struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-   /* set default host_id to xeon host */
-   if (eth_da->nb_mh_controllers == 0) {
-   eth_da->nb_mh_controllers = 1;
-   eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-   } else {
-   for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-   if (eth_da->mh_controllers[j] > 
CPFL_HOST_ID_ACC) {
-   PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-eth_da->mh_controllers[j]);
-   return -EINVAL;
-   }
-   }
-   }
-
-   /* set default pf to APF */
-   if (eth_da->nb_ports == 0) {
-   eth_da->nb_ports = 1;
-   eth_da->ports[0] = CPFL_PF_TYPE_APF;
-   } else {
-   for (j = 0; j < eth_da->nb_ports; j++) {
-   if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-   PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-eth_da->ports[j]);
-   return -EINVAL;
-   }
-   }
-   }
-   }
-
-   return 0;
-}
-
 static int
 cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
@@ -2304,6 +2306,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
struct cpfl_adapter_ext *adapter;
int retval;
+   uint16_t port_id;
 
adapter = rte_zmalloc("cp

[PATCH 08/19] net/cpfl: support vport list/info get

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 1f5c3a39b8..4b8c0da632 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -189,6 +189,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)   \
RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p) \
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 00..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include 
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_list_response *response)
+{
+   struct cpchnl2_get_vport_list_request request;
+   struct idpf_cmd_info args;
+   int err;
+
+   memset(&request, 0, sizeof(request));
+   request.func_type = vi->func_type;
+   request.pf_id = vi->pf_id;
+   request.vf_id = vi->vf_id;
+
+   memset(&args, 0, sizeof(args));
+   args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+   args.in_args = (uint8_t *)&request;
+   args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+   args.out_buffer = adapter->base.mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(&adapter->base, &args);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to execute command of 
CPCHNL2_OP_GET_VPORT_LIST");
+   return err;
+   }
+
+   rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+   return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_info_response *response)
+{
+   struct cpchnl2_get_vport_info_request request;
+   struct idpf_cmd_info args;
+   int err;
+
+   request.vport.vport_id = vport_id->vport_id;
+   request.vport.vport_type = vport_id->vport_type;
+   request.func.func_type = vi->func_type;
+   request.func.pf_id = vi->pf_id;
+   request.func.vf_id = vi->vf_id;
+
+   memset(&args, 0, sizeof(args));
+   args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+   args.in_args = (uint8_t *)&request;
+   args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+   args.out_buffer = adapter->base.mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(&adapter->base, &args);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to execute command of 
CPCHNL2_OP_GET_VPORT_INFO");
+   return err;
+   }
+
+   rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+   return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 1d963e5fd1..fb075c6860 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -17,6 +17,7 @@ sources = files(
 'cpfl_ethdev.c',
 'cpfl_rxtx.c',
 'cpfl_representor.c',
+'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1



[PATCH 09/19] net/cpfl: update vport info before creating representor

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Get port representor's vport list and update vport_map_hash
before creating the port representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  |   2 +-
 drivers/net/cpfl/cpfl_ethdev.h  |   3 +
 drivers/net/cpfl/cpfl_representor.c | 124 
 3 files changed, 128 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 949a2c8069..fc0ebc6fb7 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1633,7 +1633,7 @@ cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t 
*msg, uint16_t msglen)
}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
   struct cpfl_vport_id *vport_identity,
   struct cpchnl2_vport_info *vport_info)
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 4b8c0da632..9cc96839ed 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -189,6 +189,9 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vport_identity,
+  struct cpchnl2_vport_info *vport_info);
 int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
   struct cpfl_vport_id *vi,
   struct cpchnl2_get_vport_list_response *response);
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 4d91d7311d..dcc01d0669 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -368,6 +368,86 @@ match_repr_with_vport(const struct cpfl_repr_id *repr_id,
return false;
 }
 
+static int
+cpfl_repr_vport_list_query(struct cpfl_adapter_ext *adapter,
+  const struct cpfl_repr_id *repr_id,
+  struct cpchnl2_get_vport_list_response *response)
+{
+   struct cpfl_vport_id vi;
+   int ret;
+
+   if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+   /* PF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+   vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+   vi.vf_id = 0;
+   } else {
+   /* VF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+   vi.pf_id = HOST0_APF;
+   vi.vf_id = repr_id->vf_id;
+   }
+
+   ret = cpfl_cc_vport_list_get(adapter, &vi, response);
+
+   return ret;
+}
+
+static int
+cpfl_repr_vport_info_query(struct cpfl_adapter_ext *adapter,
+  const struct cpfl_repr_id *repr_id,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpchnl2_get_vport_info_response *response)
+{
+   struct cpfl_vport_id vi;
+   int ret;
+
+   if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+   /* PF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+   vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+   vi.vf_id = 0;
+   } else {
+   /* VF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+   vi.pf_id = HOST0_APF;
+   vi.vf_id = repr_id->vf_id;
+   }
+
+   ret = cpfl_cc_vport_info_get(adapter, vport_id, &vi, response);
+
+   return ret;
+}
+
+static int
+cpfl_repr_vport_map_update(struct cpfl_adapter_ext *adapter,
+  const struct cpfl_repr_id *repr_id, uint32_t 
vport_id,
+  struct cpchnl2_get_vport_info_response *response)
+{
+   struct cpfl_vport_id vi;
+   int ret;
+
+   vi.vport_id = vport_id;
+   if (repr_id->type == RTE_ETH_REPRESENTOR_PF) {
+   /* PF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+   vi.pf_id = cpfl_func_id_get(repr_id->host_id, repr_id->pf_id);
+   } else {
+   /* VF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+   vi.pf_id = HOST0_APF;
+   vi.vf_id = repr_id->vf_id;
+   }
+
+   ret = cpfl_vport_info_create(adapter, &vi, &response->info);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Fail to update vport map hash for 
representor.");
+   return ret;
+   }
+
+   return 0;
+}
+
 int
 cpfl_repr_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
@@ -375,8 +455,14 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adapte
uint32_t iter = 0;
const struct cpfl_repr_id *repr_id;
const struct cpfl_vport_id *vp_id;
+   struct cpchnl2_get_vport_list_response *vlist_resp;
+   struct cpchnl2_get_vport_info_response vinfo_resp;
int ret;
 
+   vlist_resp = rte_zmalloc(NULL, IDPF_DFLT_MBX_BUF_SIZE, 0);
+   

[PATCH 10/19] net/cpfl: refine handle virtual channel message

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Refine handle virtual channel event message.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 46 --
 1 file changed, 22 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index fc0ebc6fb7..88c1479f3a 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1591,40 +1591,50 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adap
return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-   struct idpf_vport *vport = NULL;
+   struct cpfl_vport *vport = NULL;
int i;
 
for (i = 0; i < adapter->cur_vport_nb; i++) {
-   vport = &adapter->vports[i]->base;
-   if (vport->vport_id != vport_id)
+   vport = adapter->vports[i];
+   if (vport->base.vport_id != vport_id)
continue;
else
return vport;
}
 
-   return vport;
+   return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, 
uint16_t msglen)
 {
struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-   struct rte_eth_dev_data *data = vport->dev_data;
-   struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+   struct cpfl_vport *vport;
+   struct rte_eth_dev_data *data;
+   struct rte_eth_dev *dev;
 
if (msglen < sizeof(struct virtchnl2_event)) {
PMD_DRV_LOG(ERR, "Error event");
return;
}
 
+   vport = cpfl_find_vport(adapter, vc_event->vport_id);
+   if (!vport) {
+   PMD_DRV_LOG(ERR, "Can't find vport.");
+   return;
+   }
+
+   data = vport->itf.data;
+   dev = &rte_eth_devices[data->port_id];
+
switch (vc_event->event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-   vport->link_up = !!(vc_event->link_status);
-   vport->link_speed = vc_event->link_speed;
+   vport->base.link_up = !!(vc_event->link_status);
+   vport->base.link_speed = vc_event->link_speed;
cpfl_dev_link_update(dev, 0);
break;
default:
@@ -1741,10 +1751,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext 
*adapter)
struct idpf_adapter *base = &adapter->base;
struct idpf_dma_mem *dma_mem = NULL;
struct idpf_hw *hw = &base->hw;
-   struct virtchnl2_event *vc_event;
struct idpf_ctlq_msg ctlq_msg;
enum idpf_mbx_opc mbx_op;
-   struct idpf_vport *vport;
uint16_t pending = 1;
uint32_t vc_op;
int ret;
@@ -1766,18 +1774,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext 
*adapter)
switch (mbx_op) {
case idpf_mbq_opc_send_msg_to_peer_pf:
if (vc_op == VIRTCHNL2_OP_EVENT) {
-   if (ctlq_msg.data_len < sizeof(struct 
virtchnl2_event)) {
-   PMD_DRV_LOG(ERR, "Error event");
-   return;
-   }
-   vc_event = (struct virtchnl2_event 
*)base->mbx_resp;
-   vport = cpfl_find_vport(adapter, 
vc_event->vport_id);
-   if (!vport) {
-   PMD_DRV_LOG(ERR, "Can't find vport.");
-   return;
-   }
-   cpfl_handle_event_msg(vport, base->mbx_resp,
- ctlq_msg.data_len);
+   cpfl_handle_vchnl_event_msg(adapter, 
adapter->base.mbx_resp,
+   ctlq_msg.data_len);
} else if (vc_op == CPCHNL2_OP_EVENT) {
cpfl_handle_cpchnl_event_msg(adapter, 
adapter->base.mbx_resp,
 ctlq_msg.data_len);
-- 
2.34.1



[PATCH 11/19] net/cpfl: add exceptional vport

2023-08-09 Thread beilei . xing
From: Beilei Xing 

This patch creates exceptional vport when there's port representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 107 ++---
 drivers/net/cpfl/cpfl_ethdev.h |   8 +++
 drivers/net/cpfl/cpfl_rxtx.c   |  16 +
 drivers/net/cpfl/cpfl_rxtx.h   |   7 +++
 4 files changed, 131 insertions(+), 7 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 88c1479f3a..f674d93050 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1023,8 +1023,13 @@ cpfl_dev_start(struct rte_eth_dev *dev)
goto err_startq;
}
 
-   cpfl_set_rx_function(dev);
-   cpfl_set_tx_function(dev);
+   if (cpfl_vport->exceptional) {
+   dev->rx_pkt_burst = cpfl_dummy_recv_pkts;
+   dev->tx_pkt_burst = cpfl_dummy_xmit_pkts;
+   } else {
+   cpfl_set_rx_function(dev);
+   cpfl_set_tx_function(dev);
+   }
 
ret = idpf_vc_vport_ena_dis(vport, true);
if (ret != 0) {
@@ -1098,13 +1103,15 @@ cpfl_dev_close(struct rte_eth_dev *dev)
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
 
+   if (!cpfl_vport->exceptional) {
+   adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
+   adapter->cur_vport_nb--;
+   adapter->vports[vport->sw_idx] = NULL;
+   }
+
idpf_vport_deinit(vport);
rte_free(cpfl_vport->p2p_q_chunks_info);
-
-   adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
-   adapter->cur_vport_nb--;
dev->data->dev_private = NULL;
-   adapter->vports[vport->sw_idx] = NULL;
rte_free(cpfl_vport);
 
return 0;
@@ -1621,6 +1628,11 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext 
*adapter, uint8_t *msg, uint
return;
}
 
+   /* ignore if it is exceptional vport */
+   if (adapter->exceptional_vport &&
+   adapter->exceptional_vport->base.vport_id == vc_event->vport_id)
+   return;
+
vport = cpfl_find_vport(adapter, vc_event->vport_id);
if (!vport) {
PMD_DRV_LOG(ERR, "Can't find vport.");
@@ -2192,6 +2204,56 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void 
*init_params)
return ret;
 }
 
+static int
+cpfl_exceptional_vport_init(struct rte_eth_dev *dev, void *init_params)
+{
+   struct cpfl_vport *cpfl_vport = CPFL_DEV_TO_VPORT(dev);
+   struct idpf_vport *vport = &cpfl_vport->base;
+   struct cpfl_adapter_ext *adapter = init_params;
+   /* for sending create vport virtchnl msg prepare */
+   struct virtchnl2_create_vport create_vport_info;
+   int ret = 0;
+
+   dev->dev_ops = &cpfl_eth_dev_ops;
+   vport->adapter = &adapter->base;
+
+   memset(&create_vport_info, 0, sizeof(create_vport_info));
+   ret = idpf_vport_info_init(vport, &create_vport_info);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to init exceptional vport req_info.");
+   goto err;
+   }
+
+   ret = idpf_vport_init(vport, &create_vport_info, dev->data);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to init exceptional vport.");
+   goto err;
+   }
+
+   cpfl_vport->itf.adapter = adapter;
+   cpfl_vport->itf.data = dev->data;
+   cpfl_vport->exceptional = TRUE;
+
+   dev->data->mac_addrs = rte_zmalloc(NULL, RTE_ETHER_ADDR_LEN, 0);
+   if (dev->data->mac_addrs == NULL) {
+   PMD_INIT_LOG(ERR, "Cannot allocate mac_addr for exceptional 
vport.");
+   ret = -ENOMEM;
+   goto err_mac_addrs;
+   }
+
+   rte_ether_addr_copy((struct rte_ether_addr *)vport->default_mac_addr,
+   &dev->data->mac_addrs[0]);
+
+   adapter->exceptional_vport = cpfl_vport;
+
+   return 0;
+
+err_mac_addrs:
+   idpf_vport_deinit(vport);
+err:
+   return ret;
+}
+
 static const struct rte_pci_id pci_id_cpfl_map[] = {
{ RTE_PCI_DEVICE(IDPF_INTEL_VENDOR_ID, IDPF_DEV_ID_CPF) },
{ .vendor_id = 0, /* sentinel */ },
@@ -2299,6 +2361,23 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adapt
return 0;
 }
 
+static int
+cpfl_exceptional_vport_create(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adapter)
+{
+   char name[RTE_ETH_NAME_MAX_LEN];
+   int ret;
+
+   snprintf(name, sizeof(name), "cpfl_%s_exceptional_vport", 
pci_dev->name);
+   ret = rte_eth_dev_create(&pci_dev->device, name,
+sizeof(struct cpfl_vport),
+NULL, NULL, cpfl_exceptional_vport_init,
+adapter);
+   if (ret != 0)
+   PMD_DRV_LOG(ERR, "Failed to create exceptional vport");
+
+   return ret;
+}
+
 static int
 cpfl_pci_probe_first(struct rte_pc

[PATCH 12/19] net/cpfl: support representor Rx/Tx queue setup

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Support Rx/Tx queue setup for port representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h  |  11 +++
 drivers/net/cpfl/cpfl_representor.c | 126 
 2 files changed, 137 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b0fb05c7b9..8a8721bbe9 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -170,6 +170,17 @@ struct cpfl_repr {
struct cpfl_vport_info *vport_info;
 };
 
+struct cpfl_repr_rx_queue {
+   struct cpfl_repr *repr;
+   struct rte_mempool *mb_pool;
+   struct rte_ring *rx_ring;
+};
+
+struct cpfl_repr_tx_queue {
+   struct cpfl_repr *repr;
+   struct cpfl_tx_queue *txq;
+};
+
 struct cpfl_adapter_ext {
TAILQ_ENTRY(cpfl_adapter_ext) next;
struct idpf_adapter base;
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index dcc01d0669..19c7fb4cb9 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -285,12 +285,138 @@ cpfl_repr_dev_stop(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+cpfl_repr_rx_queue_setup(struct rte_eth_dev *dev,
+uint16_t queue_id,
+uint16_t nb_desc,
+unsigned int socket_id,
+__rte_unused const struct rte_eth_rxconf *conf,
+struct rte_mempool *pool)
+{
+   struct cpfl_repr *repr = CPFL_DEV_TO_REPR(dev);
+   struct cpfl_repr_rx_queue *rxq;
+   char ring_name[RTE_RING_NAMESIZE];
+   struct rte_ring *rx_ring;
+
+   if (!(dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+   PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+   return -EINVAL;
+   }
+
+   if (!RTE_IS_POWER_OF_2(nb_desc) ||
+   nb_desc > CPFL_MAX_RING_DESC ||
+   nb_desc < CPFL_MIN_RING_DESC) {
+   PMD_INIT_LOG(ERR, "nb_desc should < %u, > %u and power of 2)",
+CPFL_MAX_RING_DESC, CPFL_MIN_RING_DESC);
+   return -EINVAL;
+   }
+
+   /* Free memory if needed */
+   rxq = dev->data->rx_queues[queue_id];
+   if (rxq) {
+   rte_ring_free(rxq->rx_ring);
+   rte_free(rxq);
+   dev->data->rx_queues[queue_id] = NULL;
+   }
+
+   /* Allocate rx queue data structure */
+   rxq = rte_zmalloc_socket("cpfl representor rx queue",
+sizeof(struct cpfl_repr_rx_queue),
+RTE_CACHE_LINE_SIZE,
+socket_id);
+   if (!rxq) {
+   PMD_INIT_LOG(ERR, "Failed to allocate memory for representor rx 
queue");
+   return -ENOMEM;
+   }
+
+   /* use rte_ring as rx queue of representor */
+   if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF)
+   snprintf(ring_name, sizeof(ring_name), 
"cpfl_repr_c%dpf%dvf%d_rx",
+repr->repr_id.host_id, repr->repr_id.pf_id, 
repr->repr_id.vf_id);
+   else
+   snprintf(ring_name, sizeof(ring_name), "cpfl_repr_c%dpf%d_rx",
+repr->repr_id.host_id, repr->repr_id.pf_id);
+   rx_ring = rte_ring_lookup(ring_name);
+   if (rx_ring) {
+   PMD_INIT_LOG(ERR, "rte_ring %s is occuriped.", ring_name);
+   rte_free(rxq);
+   return -EEXIST;
+   }
+
+   rx_ring = rte_ring_create(ring_name, nb_desc, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+   if (!rx_ring) {
+   PMD_INIT_LOG(ERR, "Failed to create ring %s.", ring_name);
+   rte_free(rxq);
+   return -EINVAL;
+   }
+
+   rxq->mb_pool = pool;
+   rxq->repr = repr;
+   rxq->rx_ring = rx_ring;
+   dev->data->rx_queues[queue_id] = rxq;
+
+   return 0;
+}
+
+static int
+cpfl_repr_tx_queue_setup(struct rte_eth_dev *dev,
+uint16_t queue_id,
+__rte_unused uint16_t nb_desc,
+unsigned int socket_id,
+__rte_unused const struct rte_eth_txconf *conf)
+{
+   struct cpfl_repr *repr = CPFL_DEV_TO_REPR(dev);
+   struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+   struct cpfl_repr_tx_queue *txq;
+   struct cpfl_vport *vport;
+
+   if (!(dev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+   PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+   return -EINVAL;
+   }
+
+   txq = dev->data->tx_queues[queue_id];
+   if (txq) {
+   rte_free(txq);
+   dev->data->rx_queues[queue_id] = NULL;
+   }
+   txq = rte_zmalloc_socket("cpfl representor tx queue",
+sizeof(struct cpfl_repr_tx_queue),
+RTE_CACHE_LINE_SIZE,
+ 

[PATCH 13/19] net/cpfl: support link update for representor

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Add link update ops for representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h  |  1 +
 drivers/net/cpfl/cpfl_representor.c | 20 
 2 files changed, 21 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 8a8721bbe9..7813b9173e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -168,6 +168,7 @@ struct cpfl_repr {
struct cpfl_repr_id repr_id;
struct rte_ether_addr mac_addr;
struct cpfl_vport_info *vport_info;
+   bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_repr_rx_queue {
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 19c7fb4cb9..862464602f 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -408,6 +408,23 @@ cpfl_repr_tx_queue_setup(struct rte_eth_dev *dev,
return 0;
 }
 
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+ __rte_unused int wait_to_complete)
+{
+   struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+   struct rte_eth_link *dev_link = ðdev->data->dev_link;
+
+   if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+   PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+   return -EINVAL;
+   }
+   dev_link->link_status = repr->func_up ?
+   RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+   return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
.dev_start  = cpfl_repr_dev_start,
.dev_stop   = cpfl_repr_dev_stop,
@@ -417,6 +434,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
.rx_queue_setup = cpfl_repr_rx_queue_setup,
.tx_queue_setup = cpfl_repr_tx_queue_setup,
+   .link_update= cpfl_repr_link_update,
 };
 
 static int
@@ -431,6 +449,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
repr->itf.adapter = adapter;
repr->itf.data = eth_dev->data;
+   if (repr->vport_info->vport_info.vport_status == 
CPCHNL2_VPORT_STATUS_ENABLED)
+   repr->func_up = true;
 
eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
-- 
2.34.1



[PATCH 14/19] net/cpfl: add stats ops for representor

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Support stats_get and stats_reset ops fot port representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h  |  8 +
 drivers/net/cpfl/cpfl_representor.c | 54 +
 2 files changed, 62 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7813b9173e..33e810408b 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -171,15 +171,23 @@ struct cpfl_repr {
bool func_up; /* If the represented function is up */
 };
 
+struct cpfl_repr_stats {
+   uint64_t packets;
+   uint64_t bytes;
+   uint64_t errors;
+};
+
 struct cpfl_repr_rx_queue {
struct cpfl_repr *repr;
struct rte_mempool *mb_pool;
struct rte_ring *rx_ring;
+   struct cpfl_repr_stats stats; /* Statistics */
 };
 
 struct cpfl_repr_tx_queue {
struct cpfl_repr *repr;
struct cpfl_tx_queue *txq;
+   struct cpfl_repr_stats stats; /* Statistics */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 862464602f..79cb7f76d4 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -425,6 +425,58 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
return 0;
 }
 
+static int
+idpf_repr_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+   struct cpfl_repr_tx_queue *txq;
+   struct cpfl_repr_rx_queue *rxq;
+   uint16_t i;
+
+   for (i = 0; i < dev->data->nb_tx_queues; i++) {
+   txq = dev->data->tx_queues[i];
+   if (!txq)
+   continue;
+   stats->opackets += __atomic_load_n(&txq->stats.packets, 
__ATOMIC_RELAXED);
+   stats->obytes += __atomic_load_n(&txq->stats.bytes, 
__ATOMIC_RELAXED);
+   }
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (!rxq)
+   continue;
+   stats->ipackets += __atomic_load_n(&rxq->stats.packets, 
__ATOMIC_RELAXED);
+   stats->ibytes += __atomic_load_n(&rxq->stats.bytes, 
__ATOMIC_RELAXED);
+   stats->ierrors += __atomic_load_n(&rxq->stats.errors, 
__ATOMIC_RELAXED);
+   }
+   stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+   return 0;
+}
+
+static int
+idpf_repr_stats_reset(struct rte_eth_dev *dev)
+{
+   struct cpfl_repr_tx_queue *txq;
+   struct cpfl_repr_rx_queue *rxq;
+   uint16_t i;
+
+   for (i = 0; i < dev->data->nb_tx_queues; i++) {
+   txq = dev->data->tx_queues[i];
+   if (!txq)
+   continue;
+   __atomic_store_n(&txq->stats.packets, 0, __ATOMIC_RELAXED);
+   __atomic_store_n(&txq->stats.bytes, 0, __ATOMIC_RELAXED);
+   __atomic_store_n(&txq->stats.errors, 0, __ATOMIC_RELAXED);
+   }
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (!rxq)
+   continue;
+   __atomic_store_n(&rxq->stats.packets, 0, __ATOMIC_RELAXED);
+   __atomic_store_n(&rxq->stats.bytes, 0, __ATOMIC_RELAXED);
+   __atomic_store_n(&rxq->stats.errors, 0, __ATOMIC_RELAXED);
+   }
+   return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
.dev_start  = cpfl_repr_dev_start,
.dev_stop   = cpfl_repr_dev_stop,
@@ -435,6 +487,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
.rx_queue_setup = cpfl_repr_rx_queue_setup,
.tx_queue_setup = cpfl_repr_tx_queue_setup,
.link_update= cpfl_repr_link_update,
+   .stats_get  = idpf_repr_stats_get,
+   .stats_reset= idpf_repr_stats_reset,
 };
 
 static int
-- 
2.34.1



[PATCH 15/19] common/idpf: refine inline function

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Move some static inline functions to header file.

Signed-off-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_rxtx.c | 246 -
 drivers/common/idpf/idpf_common_rxtx.h | 246 +
 drivers/common/idpf/version.map|   3 +
 3 files changed, 249 insertions(+), 246 deletions(-)

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
index fc87e3e243..50465e76ea 100644
--- a/drivers/common/idpf/idpf_common_rxtx.c
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -442,188 +442,6 @@ idpf_qc_split_rxq_mbufs_alloc(struct idpf_rx_queue *rxq)
return 0;
 }
 
-#define IDPF_TIMESYNC_REG_WRAP_GUARD_BAND  1
-/* Helper function to convert a 32b nanoseconds timestamp to 64b. */
-static inline uint64_t
-idpf_tstamp_convert_32b_64b(struct idpf_adapter *ad, uint32_t flag,
-   uint32_t in_timestamp)
-{
-#ifdef RTE_ARCH_X86_64
-   struct idpf_hw *hw = &ad->hw;
-   const uint64_t mask = 0x;
-   uint32_t hi, lo, lo2, delta;
-   uint64_t ns;
-
-   if (flag != 0) {
-   IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, 
PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-   IDPF_WRITE_REG(hw, GLTSYN_CMD_SYNC_0_0, 
PF_GLTSYN_CMD_SYNC_EXEC_CMD_M |
-  PF_GLTSYN_CMD_SYNC_SHTIME_EN_M);
-   lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-   hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-   /*
-* On typical system, the delta between lo and lo2 is ~1000ns,
-* so 1 seems a large-enough but not overly-big guard band.
-*/
-   if (lo > (UINT32_MAX - IDPF_TIMESYNC_REG_WRAP_GUARD_BAND))
-   lo2 = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-   else
-   lo2 = lo;
-
-   if (lo2 < lo) {
-   lo = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_L_0);
-   hi = IDPF_READ_REG(hw, PF_GLTSYN_SHTIME_H_0);
-   }
-
-   ad->time_hw = ((uint64_t)hi << 32) | lo;
-   }
-
-   delta = (in_timestamp - (uint32_t)(ad->time_hw & mask));
-   if (delta > (mask / 2)) {
-   delta = ((uint32_t)(ad->time_hw & mask) - in_timestamp);
-   ns = ad->time_hw - delta;
-   } else {
-   ns = ad->time_hw + delta;
-   }
-
-   return ns;
-#else /* !RTE_ARCH_X86_64 */
-   RTE_SET_USED(ad);
-   RTE_SET_USED(flag);
-   RTE_SET_USED(in_timestamp);
-   return 0;
-#endif /* RTE_ARCH_X86_64 */
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S   \
-   (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \
-RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \
-RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |\
-RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
-
-static inline uint64_t
-idpf_splitq_rx_csum_offload(uint8_t err)
-{
-   uint64_t flags = 0;
-
-   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
-   return flags;
-
-   if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
-   flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
- RTE_MBUF_F_RX_L4_CKSUM_GOOD);
-   return flags;
-   }
-
-   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
-   flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
-   else
-   flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
-
-   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
-   flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
-   else
-   flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
-
-   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
-   flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
-
-   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
-   flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
-   else
-   flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
-
-   return flags;
-}
-
-#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
-#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
-#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
-
-static inline uint64_t
-idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
-  volatile struct virtchnl2_rx_flex_desc_adv_nic_3 
*rx_desc)
-{
-   uint8_t status_err0_qw0;
-   uint64_t flags = 0;
-
-   status_err0_qw0 = rx_desc->status_err0_qw0;
-
-   if ((status_err0_qw0 & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
-   flags |= RTE_MBUF_F_RX_RSS_HASH;
-   mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
-   IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
-

[PATCH 16/19] net/cpfl: support representor data path

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Add Rx/Tx burst for port representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_representor.c |  83 +++
 drivers/net/cpfl/cpfl_rxtx.c| 121 
 drivers/net/cpfl/cpfl_rxtx.h|   4 +
 3 files changed, 208 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 79cb7f76d4..51b70ea346 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -491,6 +491,87 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
.stats_reset= idpf_repr_stats_reset,
 };
 
+#define MAX_IDPF_REPRENSENTOR_BURST  128
+static uint16_t
+cpfl_repr_rx_burst(void *rxq,
+  struct rte_mbuf **rx_pkts,
+  uint16_t nb_pkts)
+{
+   struct cpfl_repr_rx_queue *rx_queue = rxq;
+   struct rte_ring *ring = rx_queue->rx_ring;
+   struct rte_mbuf *mbuf[MAX_IDPF_REPRENSENTOR_BURST] = {NULL};
+   unsigned int nb_recv;
+   uint16_t i;
+
+   if (unlikely(!ring))
+   return 0;
+
+   nb_recv = rte_ring_dequeue_burst(ring, (void **)mbuf,
+RTE_MIN(nb_pkts, 
MAX_IDPF_REPRENSENTOR_BURST), NULL);
+   for (i = 0; i < nb_recv; i++) {
+   if (mbuf[i]->pool != rx_queue->mb_pool) {
+   /* need copy if mpools used for vport and represntor 
queue are different */
+   rx_pkts[i] = rte_pktmbuf_copy(mbuf[i], 
rx_queue->mb_pool, 0, UINT32_MAX);
+   rte_pktmbuf_free(mbuf[i]);
+   } else {
+   rx_pkts[i] = mbuf[i];
+   }
+   }
+
+   __atomic_fetch_add(&rx_queue->stats.packets, nb_recv, __ATOMIC_RELAXED);
+   /* TODO: bytes stats */
+   return nb_recv;
+}
+
+static uint16_t
+cpfl_get_vsi_from_vf_representor(struct cpfl_repr *repr)
+{
+   return repr->vport_info->vport_info.vsi_id;
+}
+
+static uint16_t
+cpfl_repr_tx_burst(void *txq,
+  struct rte_mbuf **tx_pkts,
+  uint16_t nb_pkts)
+{
+   struct cpfl_repr_tx_queue *tx_queue = txq;
+   struct idpf_tx_queue *hw_txq = &tx_queue->txq->base;
+   struct cpfl_repr *repr;
+   uint16_t vsi_id;
+   uint16_t nb;
+
+   if (unlikely(!tx_queue->txq))
+   return 0;
+
+   repr = tx_queue->repr;
+
+   if (!hw_txq) {
+   PMD_INIT_LOG(ERR, "No Queue associated with representor 
host_id: %d, %s %d",
+repr->repr_id.host_id,
+(repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? 
"vf" : "pf",
+(repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) ? 
repr->repr_id.vf_id :
+repr->repr_id.pf_id);
+   return 0;
+   }
+
+   if (repr->repr_id.type == RTE_ETH_REPRESENTOR_VF) {
+   vsi_id = cpfl_get_vsi_from_vf_representor(repr);
+   } else {
+   /* TODO: RTE_ETH_REPRESENTOR_PF */
+   PMD_INIT_LOG(ERR, "Get vsi from pf representor is not 
supported.");
+   return 0;
+   }
+
+   rte_spinlock_lock(&tx_queue->txq->lock);
+   nb = cpfl_xmit_pkts_to_vsi(tx_queue->txq, tx_pkts, nb_pkts, vsi_id);
+   rte_spinlock_unlock(&tx_queue->txq->lock);
+
+   __atomic_fetch_add(&tx_queue->stats.packets, nb, __ATOMIC_RELAXED);
+   __atomic_fetch_add(&tx_queue->stats.errors, nb, __ATOMIC_RELAXED);
+   /* TODO: bytes stats */
+   return nb;
+}
+
 static int
 cpfl_repr_init(struct rte_eth_dev *eth_dev, void *init_param)
 {
@@ -507,6 +588,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
repr->func_up = true;
 
eth_dev->dev_ops = &cpfl_repr_dev_ops;
+   eth_dev->rx_pkt_burst = cpfl_repr_rx_burst;
+   eth_dev->tx_pkt_burst = cpfl_repr_tx_burst;
 
eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
/* bit[15:14] type
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index df6a8c1940..882efe04cf 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -616,6 +616,9 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
txq->ops = &def_txq_ops;
cpfl_vport->nb_data_txq++;
txq->q_set = true;
+
+   rte_spinlock_init(&cpfl_txq->lock);
+
dev->data->tx_queues[queue_idx] = cpfl_txq;
 
return 0;
@@ -1409,6 +1412,124 @@ cpfl_stop_queues(struct rte_eth_dev *dev)
}
 }
 
+static inline void
+cpfl_set_tx_switch_ctx(uint16_t vsi_id, bool is_vsi,
+  volatile union idpf_flex_tx_ctx_desc *ctx_desc)
+{
+   uint16_t cmd_dtype;
+
+   /* Use TX Native TSO Context Descriptor to carry VSI
+* so TSO is not supported
+*/
+   if (is_vsi) {
+   cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+   IDPF_TX_FLEX_CTX_DESC_CMD_SWT

[PATCH 17/19] net/cpfl: support dispatch process

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Add dispatch process cpfl_packets_dispatch function.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  |  39 -
 drivers/net/cpfl/cpfl_ethdev.h  |   1 +
 drivers/net/cpfl/cpfl_representor.c |  80 +
 drivers/net/cpfl/cpfl_rxtx.c| 131 
 drivers/net/cpfl/cpfl_rxtx.h|   8 ++
 5 files changed, 257 insertions(+), 2 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index f674d93050..8569a0b81d 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -129,6 +129,13 @@ static const struct rte_cpfl_xstats_name_off 
rte_cpfl_stats_strings[] = {
 
 #define CPFL_NB_XSTATS RTE_DIM(rte_cpfl_stats_strings)
 
+static const struct rte_mbuf_dynfield cpfl_source_metadata_param = {
+   .name = "cpfl_source_metadata",
+   .size = sizeof(uint16_t),
+   .align = __alignof__(uint16_t),
+   .flags = 0,
+};
+
 static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
 __rte_unused int wait_to_complete)
@@ -2382,7 +2389,7 @@ static int
 cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
struct cpfl_adapter_ext *adapter;
-   int retval;
+   int retval, offset;
uint16_t port_id;
 
adapter = rte_zmalloc("cpfl_adapter_ext",
@@ -2432,7 +2439,22 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
PMD_INIT_LOG(ERR, "Failed to create exceptional vport. 
");
goto close_ethdev;
}
+
+   /* register dynfield to carry src_vsi
+* TODO: is this a waste to use dynfield? Can we redefine a 
recv func like
+* below to carry src vsi directly by src_vsi[]?
+* idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
+* uint16_t src_vsi[], uint16_t nb_pkts)
+*/
+   offset = 
rte_mbuf_dynfield_register(&cpfl_source_metadata_param);
+   if (unlikely(offset == -1)) {
+   retval = -rte_errno;
+   PMD_INIT_LOG(ERR, "source metadata is disabled in 
mbuf");
+   goto close_ethdev;
+   }
+   cpfl_dynfield_source_metadata_offset = offset;
}
+
retval = cpfl_repr_create(pci_dev, adapter);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to create representors ");
@@ -2458,7 +2480,7 @@ cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 static int
 cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
-   int ret;
+   int ret, offset;
 
ret = cpfl_parse_devargs(pci_dev, adapter, false);
if (ret != 0) {
@@ -2478,6 +2500,19 @@ cpfl_pci_probe_again(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *ad
PMD_INIT_LOG(ERR, "Failed to create exceptional vport. 
");
return ret;
}
+
+   /* register dynfield to carry src_vsi
+* TODO: is this a waste to use dynfield? Can we redefine a 
recv func like
+* below to carry src vsi directly by src_vsi[]?
+* idpf_exceptioanl_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
+* uint16_t src_vsi[], uint16_t nb_pkts)
+*/
+   offset = 
rte_mbuf_dynfield_register(&cpfl_source_metadata_param);
+   if (unlikely(offset == -1)) {
+   PMD_INIT_LOG(ERR, "source metadata is disabled in 
mbuf");
+   return -rte_errno;
+   }
+   cpfl_dynfield_source_metadata_offset = offset;
}
 
ret = cpfl_repr_create(pci_dev, adapter);
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 33e810408b..5bd6f930b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -227,6 +227,7 @@ int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
   struct cpchnl2_vport_id *vport_id,
   struct cpfl_vport_id *vi,
   struct cpchnl2_get_vport_info_response *response);
+int cpfl_packets_dispatch(void *arg);
 
 #define CPFL_DEV_TO_PCI(eth_dev)   \
RTE_DEV_TO_PCI((eth_dev)->device)
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 51b70ea346..a781cff403 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,7 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_ethdev.h"
 
 static int
 cpfl_repr_whitelist_update(struct cpfl_adapter_ext *adapter,
@@ -853,3 +854,82 @@ cpfl_repr_create(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adapte
 
return 0;
 }
+
+static struct cpfl_repr *
+cpfl_get_repr_by_v

[PATCH 18/19] net/cpfl: add dispatch service

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Add dispatch service for port representor.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 129 +
 1 file changed, 129 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 8569a0b81d..8dbc175749 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -11,6 +11,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "cpfl_ethdev.h"
 #include 
@@ -136,6 +137,107 @@ static const struct rte_mbuf_dynfield 
cpfl_source_metadata_param = {
.flags = 0,
 };
 
+static int
+cpfl_dispatch_service_register(struct rte_eth_dev *dev)
+{
+   struct cpfl_vport *vport = dev->data->dev_private;
+   struct rte_service_spec service_params;
+   uint32_t service_core_list[RTE_MAX_LCORE];
+   uint32_t num_service_cores;
+   uint32_t service_core_id;
+   int ret;
+
+   num_service_cores = rte_service_lcore_count();
+   if (num_service_cores <= 0) {
+   PMD_DRV_LOG(ERR, "Fail to register dispatch service, no service 
core found.");
+   return -ENOTSUP;
+   }
+
+   ret = rte_service_lcore_list(service_core_list, num_service_cores);
+   if (ret <= 0) {
+   PMD_DRV_LOG(ERR, "Fail to get service core list");
+   return -ENOTSUP;
+   }
+   /* use the first lcore by default */
+   service_core_id = service_core_list[0];
+
+   memset(&service_params, 0, sizeof(struct rte_service_spec));
+   snprintf(service_params.name, sizeof(service_params.name), "Dispatch 
service");
+   service_params.callback = cpfl_packets_dispatch;
+   service_params.callback_userdata = dev;
+   service_params.capabilities = 0;
+   service_params.socket_id = rte_lcore_to_socket_id(service_core_id);
+
+   ret = rte_service_component_register(&service_params, 
&vport->dispatch_service_id);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to register %s component", 
service_params.name);
+   return ret;
+   }
+
+   ret = rte_service_map_lcore_set(vport->dispatch_service_id, 
service_core_id, 1);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to map service %s to lcore %d",
+   service_params.name, service_core_id);
+   return ret;
+   }
+
+   vport->dispatch_core_id = service_core_id;
+
+   return 0;
+}
+
+static void
+cpfl_dispatch_service_unregister(struct rte_eth_dev *dev)
+{
+   struct cpfl_vport *vport = dev->data->dev_private;
+
+   PMD_DRV_LOG(DEBUG, "Unregister service %s",
+   rte_service_get_name(vport->dispatch_service_id));
+   rte_service_map_lcore_set(vport->dispatch_service_id,
+ vport->dispatch_core_id, 0);
+   rte_service_component_unregister(vport->dispatch_service_id);
+}
+
+static int
+cpfl_dispatch_service_start(struct rte_eth_dev *dev)
+{
+   struct cpfl_vport *vport = dev->data->dev_private;
+   int ret;
+
+   ret = rte_service_component_runstate_set(vport->dispatch_service_id, 1);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to start %s component",
+   rte_service_get_name(vport->dispatch_service_id));
+   return ret;
+   }
+   ret = rte_service_runstate_set(vport->dispatch_service_id, 1);
+   if (ret) {
+   PMD_DRV_LOG(ERR, "Fail to start service %s",
+   rte_service_get_name(vport->dispatch_service_id));
+   return ret;
+   }
+   return 0;
+}
+
+static void
+cpfl_dispatch_service_stop(struct rte_eth_dev *dev)
+{
+   struct cpfl_vport *vport = dev->data->dev_private;
+   int ret;
+
+   /* Service core may be shared and don't stop it here*/
+
+   ret = rte_service_runstate_set(vport->dispatch_service_id, 0);
+   if (ret)
+   PMD_DRV_LOG(WARNING, "Fail to stop service %s",
+   rte_service_get_name(vport->dispatch_service_id));
+
+   ret = rte_service_component_runstate_set(vport->dispatch_service_id, 0);
+   if (ret)
+   PMD_DRV_LOG(WARNING, "Fail to stop %s component",
+   rte_service_get_name(vport->dispatch_service_id));
+}
+
 static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
 __rte_unused int wait_to_complete)
@@ -1031,6 +1133,14 @@ cpfl_dev_start(struct rte_eth_dev *dev)
}
 
if (cpfl_vport->exceptional) {
+   /* No pkt_burst function setting on exceptional vport,
+* start dispatch service instead
+*/
+   if (cpfl_dispatch_service_start(dev)) {
+   PMD_DRV_LOG(ERR, "Fail to start Dispatch service on %s",
+   dev->device->name);
+   goto err_serv_start;
+   }
dev->rx_pkt_burst = cpfl_dummy_re

[PATCH 19/19] doc: update release notes for representor

2023-08-09 Thread beilei . xing
From: Beilei Xing 

Add support for port representor.

Signed-off-by: Beilei Xing 
---
 doc/guides/rel_notes/release_23_11.rst | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/doc/guides/rel_notes/release_23_11.rst 
b/doc/guides/rel_notes/release_23_11.rst
index 6b4dd21fd0..688bee4d6d 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -55,6 +55,9 @@ New Features
  Also, make sure to start the actual text at the margin.
  ===
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -
-- 
2.34.1



[PATCH 0/5] support item NSH matching

2023-08-09 Thread Haifei Luo
NSH can be matched using the existed item: RTE_FLOW_ITEM_TYPE_NSH.
NSH fields matching is not supported.  
Add support for configuring VXLAN-GPE's next protocol.
The CLI is: vxlan-gpe protocol is .
Add support for matching item NSH. The CLI is: nsh
Add support for HCA attribute query of NSH.
Enhance the validation for the case matching item NSH is supported. 
Add NSH support in net/mlx5. 


Haifei Luo (5):
  app/testpmd: support for VXLAN-GPE's next protocol
  common/mlx5: extend HCA attribute query for NSH
  net/mlx5: enhance the validation for item VXLAN-GPE
  app/testpmd: support for NSH flow item
  net/mlx5: add support for item NSH

 app/test-pmd/cmdline_flow.c  | 26 ++
 drivers/common/mlx5/mlx5_devx_cmds.c |  3 ++
 drivers/common/mlx5/mlx5_devx_cmds.h |  1 +
 drivers/common/mlx5/mlx5_prm.h   |  4 ++-
 drivers/net/mlx5/mlx5_flow.c | 52 
 drivers/net/mlx5/mlx5_flow.h |  6 
 drivers/net/mlx5/mlx5_flow_dv.c  | 13 ++-
 7 files changed, 96 insertions(+), 9 deletions(-)

-- 
2.34.1



[PATCH 1/5] app/testpmd: support for VXLAN-GPE's next protocol

2023-08-09 Thread Haifei Luo
Add support for configuring VXLAN-GPE's next protocol.
The CLI is: vxlan-gpe protocol is .

Example:
flow create 0 transfer group 1 pattern eth / ipv6 / udp dst is 4790
/ vxlan-gpe protocol is 0x04 / eth / ipv4  / tcp / end
actions port_id id 1 / end

Signed-off-by: Jiawei Wang 
Signed-off-by: Haifei Luo 
---
 app/test-pmd/cmdline_flow.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index 94827bcc4a..d25f941259 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -385,6 +385,7 @@ enum index {
ITEM_GENEVE_OPTLEN,
ITEM_VXLAN_GPE,
ITEM_VXLAN_GPE_VNI,
+   ITEM_VXLAN_GPE_PROTO,
ITEM_ARP_ETH_IPV4,
ITEM_ARP_ETH_IPV4_SHA,
ITEM_ARP_ETH_IPV4_SPA,
@@ -1758,6 +1759,7 @@ static const enum index item_geneve[] = {
 
 static const enum index item_vxlan_gpe[] = {
ITEM_VXLAN_GPE_VNI,
+   ITEM_VXLAN_GPE_PROTO,
ITEM_NEXT,
ZERO,
 };
@@ -4804,6 +4806,14 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
 hdr.vni)),
},
+   [ITEM_VXLAN_GPE_PROTO] = {
+   .name = "protocol",
+   .help = "VXLAN-GPE next protocol",
+   .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(COMMON_UNSIGNED),
+item_param),
+   .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
+protocol)),
+   },
[ITEM_ARP_ETH_IPV4] = {
.name = "arp_eth_ipv4",
.help = "match ARP header for Ethernet/IPv4",
-- 
2.34.1



[PATCH 2/5] common/mlx5: extend HCA attribute query for NSH

2023-08-09 Thread Haifei Luo
Add NSH supporting field in two places:
1. New HCA capability indicating NSH is supported
2. New field in "mlx5_ifc_per_protocol_networking_offload_caps_bits"
   structure

Signed-off-by: Haifei Luo 
---
 drivers/common/mlx5/mlx5_devx_cmds.c | 3 +++
 drivers/common/mlx5/mlx5_devx_cmds.h | 1 +
 drivers/common/mlx5/mlx5_prm.h   | 4 +++-
 3 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c 
b/drivers/common/mlx5/mlx5_devx_cmds.c
index 66a77159a0..830199212e 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -1313,6 +1313,9 @@ mlx5_devx_cmd_query_hca_attr(void *ctx,
attr->tunnel_stateless_gtp = MLX5_GET
(per_protocol_networking_offload_caps,
 hcattr, tunnel_stateless_gtp);
+   attr->tunnel_stateless_vxlan_gpe_nsh = MLX5_GET
+   (per_protocol_networking_offload_caps,
+hcattr, 
tunnel_stateless_vxlan_gpe_nsh);
attr->rss_ind_tbl_cap = MLX5_GET
(per_protocol_networking_offload_caps,
 hcattr, rss_ind_tbl_cap);
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h 
b/drivers/common/mlx5/mlx5_devx_cmds.h
index e071cd841f..11772431ae 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -196,6 +196,7 @@ struct mlx5_hca_attr {
uint32_t tunnel_stateless_geneve_rx:1;
uint32_t geneve_max_opt_len:1; /* 0x0: 14DW, 0x1: 63DW */
uint32_t tunnel_stateless_gtp:1;
+   uint32_t tunnel_stateless_vxlan_gpe_nsh:1;
uint32_t max_lso_cap;
uint32_t scatter_fcs:1;
uint32_t lro_cap:1;
diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 51f426c614..f005877dd7 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -1964,7 +1964,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits 
{
u8 swp_lso[0x1];
u8 reserved_at_23[0x8];
u8 tunnel_stateless_gtp[0x1];
-   u8 reserved_at_25[0x4];
+   u8 reserved_at_25[0x2];
+   u8 tunnel_stateless_vxlan_gpe_nsh[0x1];
+   u8 reserved_at_28[0x1];
u8 max_vxlan_udp_ports[0x8];
u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
-- 
2.34.1



[PATCH 3/5] net/mlx5: enhance the validation for item VXLAN-GPE

2023-08-09 Thread Haifei Luo
Enhance the validation so that configuring vxlan-gpe's next protocol as NSH
is supported.

1. The spec's protocol can have value and nic_mask's protocol is 0xff.

Signed-off-by: Haifei Luo 
---
 drivers/net/mlx5/mlx5_flow.c | 13 ++---
 1 file changed, 6 insertions(+), 7 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index e91eb636d0..7de6640ecd 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -3198,6 +3198,11 @@ mlx5_flow_validate_item_vxlan_gpe(const struct 
rte_flow_item *item,
uint8_t vni[4];
} id = { .vlan_id = 0, };
 
+   struct rte_flow_item_vxlan_gpe nic_mask = {
+   .vni = "\xff\xff\xff",
+   .protocol = 0xff,
+   };
+
if (!priv->sh->config.l3_vxlan_en)
return rte_flow_error_set(error, ENOTSUP,
  RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -3221,18 +3226,12 @@ mlx5_flow_validate_item_vxlan_gpe(const struct 
rte_flow_item *item,
mask = &rte_flow_item_vxlan_gpe_mask;
ret = mlx5_flow_item_acceptable
(item, (const uint8_t *)mask,
-(const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
+(const uint8_t *)&nic_mask,
 sizeof(struct rte_flow_item_vxlan_gpe),
 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
if (ret < 0)
return ret;
if (spec) {
-   if (spec->hdr.proto)
-   return rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ITEM,
- item,
- "VxLAN-GPE protocol"
- " not supported");
memcpy(&id.vni[1], spec->hdr.vni, 3);
memcpy(&id.vni[1], mask->hdr.vni, 3);
}
-- 
2.34.1



[PATCH 4/5] app/testpmd: support for NSH flow item

2023-08-09 Thread Haifei Luo
Add support for item NSH. The CLI is: nsh
Example:
flow create 0 transfer group 1
 pattern eth / ipv6 / udp dst is 4790 /
 vxlan-gpe / nsh / eth / ipv4 / tcp / end
 actions port_id id 1 / end

Signed-off-by: Haifei Luo 
---
 app/test-pmd/cmdline_flow.c | 16 
 1 file changed, 16 insertions(+)

diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c
index d25f941259..bf93d649e1 100644
--- a/app/test-pmd/cmdline_flow.c
+++ b/app/test-pmd/cmdline_flow.c
@@ -525,6 +525,7 @@ enum index {
ITEM_IB_BTH_PSN,
ITEM_IPV6_PUSH_REMOVE_EXT,
ITEM_IPV6_PUSH_REMOVE_EXT_TYPE,
+   ITEM_NSH,
 
/* Validate/create actions. */
ACTIONS,
@@ -1562,6 +1563,7 @@ static const enum index next_item[] = {
ITEM_AGGR_AFFINITY,
ITEM_TX_QUEUE,
ITEM_IB_BTH,
+   ITEM_NSH,
END_SET,
ZERO,
 };
@@ -2081,6 +2083,11 @@ static const enum index item_ib_bth[] = {
ZERO,
 };
 
+static const enum index item_nsh[] = {
+   ITEM_NEXT,
+   ZERO,
+};
+
 static const enum index next_action[] = {
ACTION_END,
ACTION_VOID,
@@ -5837,6 +5844,15 @@ static const struct token token_list[] = {
.args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ib_bth,
 hdr.psn)),
},
+   [ITEM_NSH] = {
+   .name = "nsh",
+   .help = "match NSH header",
+   .priv = PRIV_ITEM(NSH,
+ sizeof(struct rte_flow_item_nsh)),
+   .next = NEXT(item_nsh),
+   .call = parse_vc,
+   },
+
/* Validate/create actions. */
[ACTIONS] = {
.name = "actions",
-- 
2.34.1



[PATCH 5/5] net/mlx5: add support for item NSH

2023-08-09 Thread Haifei Luo
1. Add validation for item NSH.
   It will fail if HCA cap for NSH is false.
2. Add item_flags for NSH.
3. For vxlan-gpe if next header is NSH, set next_protocol as NSH.

Signed-off-by: Haifei Luo 
---
 drivers/net/mlx5/mlx5_flow.c| 39 +
 drivers/net/mlx5/mlx5_flow.h|  6 +
 drivers/net/mlx5/mlx5_flow_dv.c | 13 ++-
 3 files changed, 57 insertions(+), 1 deletion(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 7de6640ecd..0e241acd62 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -3905,6 +3905,45 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item 
*item,
 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
 }
 
+/**
+ * Validate the NSH item.
+ *
+ * @param[in] dev
+ *   Pointer to Ethernet device on which flow rule is being created on.
+ * @param[out] error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
+   const struct rte_flow_item *item,
+   struct rte_flow_error *error)
+{
+   struct mlx5_priv *priv = dev->data->dev_private;
+
+   if (item->mask) {
+   return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "NSH fields matching is not 
supported");
+   }
+
+   if (!priv->sh->config.dv_flow_en) {
+   return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "NSH support requires DV flow 
interface");
+   }
+
+   if (!priv->sh->cdev->config.hca_attr.tunnel_stateless_vxlan_gpe_nsh) {
+   return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Current FW does not support matching 
on NSH");
+   }
+
+   return 0;
+}
+
 static int
 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
   const struct rte_flow_attr *attr __rte_unused,
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index 3a97975d69..ccb416e497 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -233,6 +233,9 @@ enum mlx5_feature_name {
 /* IB BTH ITEM. */
 #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
 
+/* NSH ITEM */
+#define MLX5_FLOW_ITEM_NSH (1ull << 53)
+
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
@@ -2453,6 +2456,9 @@ int mlx5_flow_validate_item_ecpri(const struct 
rte_flow_item *item,
  uint16_t ether_type,
  const struct rte_flow_item_ecpri *acc_mask,
  struct rte_flow_error *error);
+int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
+   const struct rte_flow_item *item,
+   struct rte_flow_error *error);
 int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
  struct mlx5_flow_meter_info *fm,
  uint32_t mtr_idx,
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index a8dd9920e6..4a46793758 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -7815,6 +7815,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
 
last_item = MLX5_FLOW_ITEM_IB_BTH;
break;
+   case RTE_FLOW_ITEM_TYPE_NSH:
+   ret = mlx5_flow_validate_item_nsh(dev, items, error);
+   if (ret < 0)
+   return ret;
+   last_item = MLX5_FLOW_ITEM_NSH;
+   break;
default:
return rte_flow_error_set(error, ENOTSUP,
  RTE_FLOW_ERROR_TYPE_ITEM,
@@ -9720,7 +9726,9 @@ flow_dv_translate_item_vxlan_gpe(void *key, const struct 
rte_flow_item *item,
v_protocol = vxlan_v->hdr.protocol;
if (!m_protocol) {
/* Force next protocol to ensure next headers parsing. */
-   if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
+   if (pattern_flags & MLX5_FLOW_ITEM_NSH)
+   v_protocol = RTE_VXLAN_GPE_TYPE_NSH;
+   else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
v_protocol = RTE_VXLAN_GPE_TYPE_ETH;
else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
v_protocol = RTE_VXLAN_GPE_TYPE_IPV4;
@@ -13910,6 +13918,9 @@ flow_dv_translate_items(stru

RE: C11 atomics adoption blocked

2023-08-09 Thread Morten Brørup
> From: Tyler Retzlaff [mailto:roret...@linux.microsoft.com]
> Sent: Tuesday, 8 August 2023 22.50
> 
> On Tue, Aug 08, 2023 at 10:22:09PM +0200, Morten Brørup wrote:
> > > From: Tyler Retzlaff [mailto:roret...@linux.microsoft.com]
> > > Sent: Tuesday, 8 August 2023 21.20
> > >
> > > On Tue, Aug 08, 2023 at 07:23:41PM +0100, Bruce Richardson wrote:
> > > > On Tue, Aug 08, 2023 at 10:53:03AM -0700, Tyler Retzlaff wrote:
> > > > > Hi folks,
> > > > >
> > > > > Moving this discussion to the dev mailing list for broader
> comment.
> > > > >
> > > > > Unfortunately, we've hit a roadblock with integrating C11
> atomics
> > > > > for DPDK.  The main issue is that GNU C++ prior to -std=c++23
> > > explicitly
> > > > > cannot be integrated with C11 stdatomic.h.  Basically, you can't
> > > include
> > > > > the header and you can't use `_Atomic' type specifier to declare
> > > atomic
> > > > > types. This is not a problem with LLVM or MSVC as they both
> allow
> > > > > integration with C11 stdatomic.h, but going forward with C11
> atomics
> > > > > would break using DPDK in C++ programs when building with GNU
> g++.
> > > > >
> > > > > Essentially you cannot compile the following with g++.
> > > > >
> > > > >   #include 
> > > > >
> > > > >   int main(int argc, char *argv[]) { return 0; }
> > > > >
> > > > >   In file included from atomic.cpp:1:
> > > > >   /usr/lib/gcc/x86_64-pc-cygwin/11/include/stdatomic.h:40:9:
> error:
> > > > >   ‘_Atomic’ does not name a type
> > > > >  40 | typedef _Atomic _Bool atomic_bool;
> > > > >
> > > > >   ... more errors of same ...
> > > > >
> > > > > It's also acknowledged as something known and won't fix by GNU
> g++
> > > > > maintainers.
> > > > >
> > > > > https://gcc.gnu.org/bugzilla/show_bug.cgi?id=60932
> > > > >
> > > > > Given the timeframe I would like to propose the minimally
> invasive,
> > > > > lowest risk solution as follows.
> > > > >
> > > > > 1.  Adopt stdatomic.h for all Windows targets, leave all
> Linux/BSD
> > > targets
> > > > > using GCC builtin C++11 memory model atomics.
> > > > > 2.  Introduce a macro that allows _Atomic type specifier to be
> > > applied to
> > > > > function parameter, structure field types and variable
> > > declarations.
> > > > >
> > > > > * The macro would expand empty for Linux/BSD targets.
> > > > > * The macro would expand to C11 _Atomic keyword for Windows
> > > targets.
> > > > >
> > > > > 3.  Introduce basic macro that allows __atomic_xxx  for
> normalized
> > > use
> > > > > internal to DPDK.
> > > > >
> > > > > * The macro would not be defined for Linux/BSD targets.
> > > > > * The macro would expand __atomic_xxx to corresponding
> > > stdatomic.h
> > > > >   atomic_xxx operations for Windows targets.
> > > > >
> >
> > Regarding naming of these macros (suggested in 2. and 3.), they should
> probably bear the rte_ prefix instead of overlapping existing names, so
> applications can also use them directly.
> >
> > E.g.:
> > #define rte_atomic for _Atomic or nothing,
> > #define rte_atomic_fetch_add() for atomic_fetch_add() or
> __atomic_fetch_add(), and
> > #define RTE_MEMORY_ORDER_SEQ_CST for memory_order_seq_cst or
> __ATOMIC_SEQ_CST.
> >
> > Maybe that is what you meant already. I'm not sure of the scope and
> details of your suggestion here.
> 
> I'm shy to do anything in the rte_ namespace because I don't want to
> formalize it as an API.
> 
> I was envisioning the following.
> 
> Internally DPDK code just uses __atomic_fetch_add directly, the macros
> are provided for Windows targets to expand to __atomic_fetch_add.
> 
> Externally DPDK applications that don't care about being portable may
> use __atomic_fetch_add (BSD/Linux) or atomic_fetch_add (Windows)
> directly.
> 
> Externally DPDK applications that care to be portable may do what is
> done Internally and <> the __atomic_fetch_add directly. By
> including say rte_stdatomic.h indirectly (Windows) gets the macros
> expanded to atomic_fetch_add and for BSD/Linux it's a noop include.
> 
> Basically I'm placing a little ugly into Windows built code and in trade
> we don't end up with a bunch of rte_ APIs that were strongly objected to
> previously.
> 
> It's a compromise.

OK, we probably need to offer a public header file to wrap the atomics, using 
either names prefixed with rte_ or names similar to the gcc builtin atomics.

I guess the objections were based on the assumption that we were switching to 
C11 atomics with DPDK 23.11, so the rte_ prefixed atomic APIs would be very 
short lived (DPDK 23.07 to 23.11 only). But with this new information about GNU 
C++ incompatibility, that seems not to be the case, so the naming discussion 
can be reopened.

If we don't introduce such a wrapper header, all portable code needs to 
surround the use of atomics with #ifdef USE_STDATOMIC_H.

BTW: Can the compilers that understand both builtin atomics and C11 
stdatomics.h handle code with #define __atomic_fetch_add atomic_fetch_add and 
#define 

RE: [RFC PATCH] dmadev: offload to free source buffer

2023-08-09 Thread Morten Brørup
> From: Amit Prakash Shukla [mailto:amitpraka...@marvell.com]
> Sent: Wednesday, 9 August 2023 08.09
> 
> This changeset adds support in DMA library to free source DMA buffer by
> hardware. On a supported hardware, application can pass on the mempool
> information as part of vchan config when the DMA transfer direction is
> configured as RTE_DMA_DIR_MEM_TO_DEV.

Isn't the DMA source buffer a memory area, and what needs to be freed is the 
mbuf holding the memory area, i.e. two different pointers?

I like the concept. Something similar might also be useful for 
RTE_DMA_DIR_MEM_TO_MEM, e.g. packet capture. Although such a use case might 
require decrementing the mbuf refcount instead of freeing the mbuf directly to 
the mempool.

PS: It has been a while since I looked at the DMA library, so ignore my 
comments if I got this wrong.



RE: [PATCH v6] pci: read amd iommu virtual address width

2023-08-09 Thread Michael Piszczek
Hi David,

Sorry I do not have a solution for this. Can you close the patch?

Michael Piszczek

-Original Message-
From: David Marchand  
Sent: Tuesday, August 8, 2023 3:31 AM
To: Michael Piszczek 
Cc: dev@dpdk.org; Ferruh Yigit 
Subject: Re: [PATCH v6] pci: read amd iommu virtual address width

On Tue, Oct 25, 2022 at 1:54 PM David Marchand  
wrote:
>
> On Mon, Oct 24, 2022 at 5:35 PM Michael Piszczek  wrote:
> >
> > Add code to read the virtual address width for AMD processors.
> > Updated pci_device_iommu_support_va() to use glob to find iommu 
> > capability files.
> >
> > Signed-off-by: Michael Piszczek 
>
> Please have a look at the ci.
>
> Unit tests are failing with this patch because the default is changed 
> from VA to PA.
> A quick way to reproduce is to run vdev unit tests as a normal user in 
> no huge mode.
>
> Before change:
> $ DPDK_TEST=vdev_autotest ./build-gcc/app/test/dpdk-test --no-huge -m
> 2048 --log-level=*:debug
> ...
> EAL: Bus vdev wants IOVA as 'DC'
> EAL: Bus pci wants IOVA as 'DC'
> EAL: Buses did not request a specific IOVA mode.
> EAL: Physical addresses are unavailable, selecting IOVA as VA mode.
> EAL: Selected IOVA mode 'VA'
>
> After change:
> $ DPDK_TEST=vdev_autotest ./build-gcc/app/test/dpdk-test --no-huge -m
> 2048 --log-level=*:debug
> ...
> EAL: Bus vdev wants IOVA as 'DC'
> EAL: Bus pci wants IOVA as 'PA'
> EAL: FATAL: Cannot use IOVA as 'PA' since physical addresses are not 
> available
> EAL: Cannot use IOVA as 'PA' since physical addresses are not 
> available

I got no reply since a few months, marking as "Changes requested" in patchwork.
If you think this is still worth pursuing, please provide a new revision that 
does not break IOVA default mode (and the CI).

Cc: Ferruh, for info

--
David Marchand



[PATCH] app: fix silent enqueue fail in test_mbuf test_refcnt_iter

2023-08-09 Thread jhascoet
In case of ring full state, we retry the enqueue
operation in order to avoid mbuf loss.

Fixes: af75078fece ("first public release")

Signed-off-by: Julien Hascoet 
---
 app/test/test_mbuf.c | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index efac01806b..be114e3302 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -1033,12 +1033,17 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
tref += ref;
if ((ref & 1) != 0) {
rte_pktmbuf_refcnt_update(m, ref);
-   while (ref-- != 0)
-   rte_ring_enqueue(refcnt_mbuf_ring, m);
+   while (ref-- != 0) {
+   /* retry in case of failure */
+   while (rte_ring_enqueue(refcnt_mbuf_ring, m) != 
0)
+   ;
+   }
} else {
while (ref-- != 0) {
rte_pktmbuf_refcnt_update(m, 1);
-   rte_ring_enqueue(refcnt_mbuf_ring, m);
+   /* retry in case of failure */
+   while (rte_ring_enqueue(refcnt_mbuf_ring, m) != 
0)
+   ;
}
}
rte_pktmbuf_free(m);
-- 
2.34.1



Re: [PATCH] app: fix silent enqueue fail in test_mbuf test_refcnt_iter

2023-08-09 Thread David Marchand
On Wed, Aug 9, 2023 at 7:39 AM jhascoet  wrote:
>
> In case of ring full state, we retry the enqueue
> operation in order to avoid mbuf loss.
>
> Fixes: af75078fece ("first public release")

Not sure we need to backport, but in doubt, I would mark it
Cc: sta...@dpdk.org


>
> Signed-off-by: Julien Hascoet 

Olivier, the patch lgtm, can you have a look?
Thanks.


-- 
David Marchand



Re: [PATCH v7 2/3] log: separate logging functions out of EAL

2023-08-09 Thread Bruce Richardson
On Mon, Jul 31, 2023 at 05:29:04PM +0100, Bruce Richardson wrote:
> On Mon, Jul 31, 2023 at 06:22:40PM +0200, David Marchand wrote:
> > On Mon, Jul 31, 2023 at 5:39 PM Bruce Richardson
> >  wrote:
> > > diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
> > > index 1a4210b948..40f7dd2877 100644
> > > --- a/doc/api/doxy-api.conf.in
> > > +++ b/doc/api/doxy-api.conf.in
> > > @@ -52,6 +52,7 @@ INPUT   = 
> > > @TOPDIR@/doc/api/doxy-api-index.md \
> > >@TOPDIR@/lib/kni \
> > >@TOPDIR@/lib/kvargs \
> > >@TOPDIR@/lib/latencystats \
> > > +  @TOPDIR@/lib/log \
> > >@TOPDIR@/lib/lpm \
> > >@TOPDIR@/lib/mbuf \
> > >@TOPDIR@/lib/member \
> > 
> > In case you are wondering about the reason.. the Intel CI reports an
> > error on building the documentation:
> > 
> > *Build Failed #1:
> > OS: UB2204-64
> > Target: x86_64-native-linuxapp-doc
> > FAILED: doc/api/html
> > /usr/bin/python3 ../doc/api/generate_doxygen.py doc/api/html
> > /usr/bin/doxygen doc/api/doxy-api.conf
> > /root/UB2204-64_K5.15.0_GCC11.3.0/x86_64-native-linuxapp-doc/29045/dpdk/doc/api/doxy-api-index.md:230:
> > error: unable to resolve reference to 'rte_log.h' for \ref command
> > (warning treated as error, aborting now)
> > Traceback (most recent call last):
> >   File 
> > "/root/UB2204-64_K5.15.0_GCC11.3.0/x86_64-native-linuxapp-doc/29045/dpdk/x86_64-native-linuxapp-doc/../doc/api/generate_doxygen.py",
> > line 13, in 
> > subprocess.run(doxygen_command, check=True, stdout=out)
> >   File "/usr/lib/python3.10/subprocess.py", line 524, in run
> > raise CalledProcessError(retcode, process.args,
> > subprocess.CalledProcessError: Command '['/usr/bin/doxygen',
> > 'doc/api/doxy-api.conf']' returned non-zero exit status 1.
> > [2772/2774] Compiling C object app/test/dpdk-test.p/test_ring.c.o
> > [2773/2774] Generating doc/guides/html_guides with a custom command
> > ninja: build stopped
> > 
> > IIRC Intel CI excludes changes on the doc/ directory when applying patches.
> > http://inbox.dpdk.org/dev/cy5pr11mb618735108d3e9d90c67eef96f5...@cy5pr11mb6187.namprd11.prod.outlook.com/
> > 
> > .. which seems to match the failure reported here, as doxygen would
> > fail to find rte_log.h.
> > 
> Yes, I was very much wondering about the reason for failure, since I could
> find no issues with building the docs on my own system!
> 
Is this patchset ok for merge then, or is there something else outstanding
holding it back?

Thanks,
/Bruce


[PATCH 0/2] crypto/scheduler: add support for security protocols

2023-08-09 Thread David Coyle
This patchset adds support to the cryptodev scheduler PMD and unit
tests for the existing security protocols in the security library,
namely IPSec, MACSec, PDCP and DOCSIS.

David Coyle (2):
  crypto/scheduler: support security protocols
  test/crypto: add security tests for cryptodev scheduler

 app/test/test_cryptodev.c |  14 +-
 doc/guides/rel_notes/release_23_11.rst|   3 +
 drivers/crypto/scheduler/meson.build  |   2 +-
 .../scheduler/rte_cryptodev_scheduler.c   | 228 ++-
 drivers/crypto/scheduler/scheduler_failover.c |  12 +-
 .../crypto/scheduler/scheduler_multicore.c|  10 +-
 .../scheduler/scheduler_pkt_size_distr.c  |  54 +--
 drivers/crypto/scheduler/scheduler_pmd.c  |  32 ++
 drivers/crypto/scheduler/scheduler_pmd_ops.c  | 374 +-
 .../crypto/scheduler/scheduler_pmd_private.h  | 148 ---
 .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-
 11 files changed, 653 insertions(+), 230 deletions(-)

-- 
2.25.1



[PATCH 1/2] crypto/scheduler: support security protocols

2023-08-09 Thread David Coyle
Add support to the cryptodev scheduler PMD for the existing security
protocols in the security library, namely IPSec, MACSec, PDCP and
DOCSIS. This includes adding the following:
- synchronization of worker's security capabilities
- retreival of the scheduler's synchronized security capabilities
- retrieval of the security session size i.e. maximum session size
  across all workers
- creation of security sessions on each worker
- deletion of security sessions on each worker

Signed-off-by: David Coyle 
Signed-off-by: Kevin O'Sullivan 
---
 doc/guides/rel_notes/release_23_11.rst|   3 +
 drivers/crypto/scheduler/meson.build  |   2 +-
 .../scheduler/rte_cryptodev_scheduler.c   | 228 ++-
 drivers/crypto/scheduler/scheduler_failover.c |  12 +-
 .../crypto/scheduler/scheduler_multicore.c|  10 +-
 .../scheduler/scheduler_pkt_size_distr.c  |  54 +--
 drivers/crypto/scheduler/scheduler_pmd.c  |  32 ++
 drivers/crypto/scheduler/scheduler_pmd_ops.c  | 374 +-
 .../crypto/scheduler/scheduler_pmd_private.h  | 148 ---
 .../crypto/scheduler/scheduler_roundrobin.c   |   6 +-
 10 files changed, 640 insertions(+), 229 deletions(-)

diff --git a/doc/guides/rel_notes/release_23_11.rst 
b/doc/guides/rel_notes/release_23_11.rst
index 4411bb32c1..6f2a11552f 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -72,6 +72,9 @@ New Features
  Also, make sure to start the actual text at the margin.
  ===
 
+* **Updated Cryptodev Scheduler PMD.**
+
+  Added support for security protocols through the ``rte_security`` API 
callbacks.
 
 Removed Items
 -
diff --git a/drivers/crypto/scheduler/meson.build 
b/drivers/crypto/scheduler/meson.build
index cd18efc791..752d655415 100644
--- a/drivers/crypto/scheduler/meson.build
+++ b/drivers/crypto/scheduler/meson.build
@@ -7,7 +7,7 @@ if is_windows
 subdir_done()
 endif
 
-deps += ['bus_vdev', 'reorder']
+deps += ['bus_vdev', 'reorder', 'security']
 sources = files(
 'rte_cryptodev_scheduler.c',
 'scheduler_failover.c',
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c 
b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 258d6f8c43..21fab828c1 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -10,6 +10,8 @@
 #include "rte_cryptodev_scheduler.h"
 #include "scheduler_pmd_private.h"
 
+#define MAX_CAPS 256
+
 /** update the scheduler pmd's capability with attaching device's
  *  capability.
  *  For each device to be attached, the scheduler's capability should be
@@ -59,7 +61,6 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
cap->sym.auth.digest_size.max ?
s_cap->sym.auth.digest_size.max :
cap->sym.auth.digest_size.max;
-
}
 
if (s_cap->sym.xform_type ==
@@ -81,25 +82,184 @@ sync_caps(struct rte_cryptodev_capabilities *caps,
 
memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
sync_nb_caps--;
+   i--;
}
 
return sync_nb_caps;
 }
 
+#define CMP_SEC_CAP_PROTO(proto) \
+   memcmp(&sec_cap1->proto, &sec_cap2->proto, sizeof(sec_cap1->proto))
+
 static int
-update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+check_sec_cap_equal(const struct rte_security_capability *sec_cap1,
+   struct rte_security_capability *sec_cap2)
+{
+   if (sec_cap1->action != sec_cap2->action ||
+   sec_cap1->protocol != sec_cap2->protocol ||
+   sec_cap1->ol_flags != sec_cap2->ol_flags)
+   return 0;
+
+   if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
+   return !CMP_SEC_CAP_PROTO(ipsec);
+   else if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_MACSEC)
+   return !CMP_SEC_CAP_PROTO(macsec);
+   else if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_PDCP)
+   return !CMP_SEC_CAP_PROTO(pdcp);
+   else if (sec_cap1->protocol == RTE_SECURITY_PROTOCOL_DOCSIS)
+   return !CMP_SEC_CAP_PROTO(docsis);
+   else
+   return 0;
+}
+
+#define SET_SEC_CAP_PROTO(proto) (dst_sec_cap->proto = src_sec_cap->proto)
+
+static void
+copy_sec_cap(struct rte_security_capability *dst_sec_cap,
+   struct rte_security_capability *src_sec_cap)
+{
+   dst_sec_cap->action = src_sec_cap->action;
+   dst_sec_cap->protocol = src_sec_cap->protocol;
+   if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_IPSEC)
+   SET_SEC_CAP_PROTO(ipsec);
+   else if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_MACSEC)
+   SET_SEC_CAP_PROTO(macsec);
+   else if (src_sec_cap->protocol == RTE_SECURITY_PROTOCOL_PDCP)
+   SET_SEC_C

[PATCH 2/2] test/crypto: add security tests for cryptodev scheduler

2023-08-09 Thread David Coyle
Add IPSec, PDCP and DOCSIS security test cases to the cryptodev
scheduler test suite.

Signed-off-by: David Coyle 
Signed-off-by: Kevin O'Sullivan 
---
 app/test/test_cryptodev.c | 14 +-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index fb2af40b99..3ee217314c 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -17391,6 +17391,14 @@ test_cryptodev_scheduler(void)
&scheduler_config,
&end_testsuite
};
+   struct unit_test_suite *sched_mode_static_suites[] = {
+#ifdef RTE_LIB_SECURITY
+   &ipsec_proto_testsuite,
+   &pdcp_proto_testsuite,
+   &docsis_proto_testsuite,
+#endif
+   &end_testsuite
+   };
static struct unit_test_suite ts = {
.suite_name = "Scheduler Unit Test Suite",
.setup = scheduler_testsuite_setup,
@@ -17416,9 +17424,13 @@ test_cryptodev_scheduler(void)
uint8_t blk_i = 0;
sched_mode_suites[sched_i]->unit_test_suites = malloc(sizeof
(struct unit_test_suite *) *
-   (RTE_DIM(blk_suites) + 1));
+   (RTE_DIM(blk_suites) +
+   RTE_DIM(sched_mode_static_suites) + 1));
ADD_BLOCKCIPHER_TESTSUITE(blk_i, (*sched_mode_suites[sched_i]),
blk_suites, RTE_DIM(blk_suites));
+   ADD_STATIC_TESTSUITE(blk_i, (*sched_mode_suites[sched_i]),
+   sched_mode_static_suites,
+   RTE_DIM(sched_mode_static_suites));
sched_mode_suites[sched_i]->unit_test_suites[blk_i] = 
&end_testsuite;
}
 
-- 
2.25.1



Re: [PATCH v7 1/3] eal/windows: move fnmatch function to header file

2023-08-09 Thread David Marchand
On Mon, Jul 31, 2023 at 5:39 PM Bruce Richardson
 wrote:
>
> To allow the fnmatch function to be shared between libraries, without
> having to export it into the public namespace (since it's not prefixed
> with "rte"), we can convert fnmatch.c to replace fnmatch.h. This allows
> fnmatch function to be static and limited in scope to the current file,
> preventing duplicate definitions if it is used by two libraries, while
> also not requiring export for sharing.

Overall, it lgtm.

I am surprised those 3 static symbols (see below) do not require being
marked "inline" (to avoid "unused symbols" warnings).
The CI looks ok, so probably I am just paranoid.

I have also a comment on sccsid, see below.


>
> Signed-off-by: Bruce Richardson 
> Acked-by: Morten Brørup 
> Acked-by: Tyler Retzlaff 
> ---

[snip]

> diff --git a/lib/eal/windows/include/fnmatch.h 
> b/lib/eal/windows/include/fnmatch.h
> index c6b226bd5d..fbf1eef21c 100644
> --- a/lib/eal/windows/include/fnmatch.h
> +++ b/lib/eal/windows/include/fnmatch.h
> @@ -1,20 +1,25 @@
>  /* SPDX-License-Identifier: BSD-3-Clause
> - * Copyright(c) 2019 Intel Corporation
> + * Copyright (c) 1989, 1993, 1994
> + * The Regents of the University of California.  All rights reserved.
> + *
> + * This code is derived from software contributed to Berkeley by
> + * Guido van Rossum.
>   */
> -
>  #ifndef _FNMATCH_H_
>  #define _FNMATCH_H_
>
> -/**
> - * This file is required to support the common code in eal_common_log.c
> - * as Microsoft libc does not contain fnmatch.h. This may be removed in
> - * future releases.
> +#if defined(LIBC_SCCS) && !defined(lint)
> +static const char sccsid[] = "@(#)fnmatch.c8.2 (Berkeley) 4/16/94";
> +#endif /* LIBC_SCCS and not lint */

Strange to keep this (what looks to be a canary) symbol in a header file.

[snip]

> @@ -25,6 +30,10 @@ extern "C" {
>  #define FNM_CASEFOLD 0x10
>  #define FNM_PREFIX_DIRS 0x20
>
> +#define FNM_EOS'\0'
> +
> +static const char *fnm_rangematch(const char *, char, int);
> +
>  /**
>   * This function is used for searching a given string source
>   * with the given regular expression pattern.

[snip]


> @@ -41,10 +50,150 @@ extern "C" {
>   * @return
>   * if the pattern is found then return 0 or else FNM_NOMATCH
>   */
> -int fnmatch(const char *pattern, const char *string, int flags);
> +static int
> +fnmatch(const char *pattern, const char *string, int flags)
> +{
> +   const char *stringstart;
> +   char c, test;


-- 
David Marchand



Re: [PATCH v7 2/3] log: separate logging functions out of EAL

2023-08-09 Thread David Marchand
Some nits on the doc (that I can fix when applying if you are ok with them) :

On Mon, Jul 31, 2023 at 5:39 PM Bruce Richardson
 wrote:
> diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst 
> b/doc/guides/prog_guide/env_abstraction_layer.rst
> index 93c8a031be..8033f6cebd 100644
> --- a/doc/guides/prog_guide/env_abstraction_layer.rst
> +++ b/doc/guides/prog_guide/env_abstraction_layer.rst
> @@ -443,9 +443,7 @@ Per-lcore variables are implemented using *Thread Local 
> Storage* (TLS) to provid
>  Logs
>  
>
> -A logging API is provided by EAL.
> -By default, in a Linux application, logs are sent to syslog and also to the 
> console.
> -However, the log function can be overridden by the user to use a different 
> logging mechanism.
> +While originally part of EAL, DPDK logging functionality is now provided by 
> the :ref:`Log_Library`.

:doc:`log_lib`

>
>  Trace and Debug Functions
>  ^
> diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
> index d89cd3edb6..ac91060992 100644
> --- a/doc/guides/prog_guide/index.rst
> +++ b/doc/guides/prog_guide/index.rst
> @@ -12,6 +12,7 @@ Programmer's Guide
>  overview
>  source_org
>  env_abstraction_layer
> +log_lib
>  service_cores
>  trace_lib
>  rcu_lib
> diff --git a/doc/guides/prog_guide/log_lib.rst 
> b/doc/guides/prog_guide/log_lib.rst
> new file mode 100644
> index 00..706ddcfef3
> --- /dev/null
> +++ b/doc/guides/prog_guide/log_lib.rst
> @@ -0,0 +1,115 @@
> +..  SPDX-License-Identifier: BSD-3-Clause
> +Copyright(c) 2023 Intel Corporation.
> +
> +.. _log_library:

Since we can directly point at this part of the documentation via
:doc:`/path/to/log_lib`, no need for an anchor at the top of the file.


> +
> +Log Library
> +

nit: too long by one =.

> +
> +The DPDK Log library provides the logging functionality for other DPDK 
> libraries and drivers.
> +By default, in a Linux application, logs are sent to syslog and also to the 
> console.
> +On FreeBSD and Windows applications, logs are sent only to the console.
> +However, the log function can be overridden by the user to use a different 
> logging mechanism.
> +
> +Log Levels
> +---

Idem.

> +
> +Log messages from apps and libraries are reported with a given level of 
> severity.
> +These levels, specified in ``rte_log.h`` are (from most to least important):
> +
> +#. Emergency
> +#. Alert
> +#. Critical
> +#. Error
> +#. Warning
> +#. Notice
> +#. Information
> +#. Debug
> +
> +At runtime, only messages of a configured level or above (i.e. of higher 
> importance)
> +will be emitted by the application to the log output.
> +That level can be configured either by the application calling the relevant 
> APIs from the logging library,
> +or by the user passing the ``--log-level`` parameter to the EAL via the 
> application.
> +
> +Setting Global Log Level
> +~

Idem.

> +
> +To adjust the global log level for an application,
> +just pass a numeric level or a level name to the ``--log-level`` EAL 
> parameter.
> +For example::
> +
> +   /path/to/app --log-level=error
> +
> +   /path/to/app --log-level=debug
> +
> +   /path/to/app --log-level=5   # warning
> +
> +Within an application, the log level can be similarly set using the 
> ``rte_log_set_global_level`` API.
> +
> +Setting Log Level for a Component
> +~~

Idem.

> +
> +In some cases, for example, for debugging purposes,
> +it may be desirable to increase or decrease the log level for only a 
> specific component, or set of components.
> +To facilitate this, the ``--log-level`` argument also accepts an, optionally 
> wildcarded, component name,
> +along with the desired level for that component.
> +For example::
> +
> +   /path/to/app --log-level=lib.eal:crit
> +
> +   /path/to/app --log-level=lib.*:warning
> +
> +Within an application, the same result can be got using the 
> ``rte_log_set_level_pattern()`` or ``rte_log_set_level_regex()`` APIs.
> +
> +Using Logging APIs to Generate Log Messages
> +

Idem.

> +
> +To output log messages, ``rte_log()`` API function should be used.
> +As well as the log message, ``rte_log()`` takes two additional parameters:
> +
> +* The log level
> +* The log component type
> +


-- 
David Marchand



RE: [PATCH] ethdev: add new symmetric hash function

2023-08-09 Thread Xueming(Steven) Li


> -Original Message-
> From: fengchengwen 
> Sent: 8/8/2023 9:43
> To: Ivan Malov ; Xueming(Steven) Li
> 
> Cc: Ori Kam ; dev@dpdk.org
> Subject: Re: [PATCH] ethdev: add new symmetric hash function
> 
> On 2023/8/8 6:32, Ivan Malov wrote:
> > Hi,
> >
> > Please see my notes below.
> >
> > On Mon, 7 Aug 2023, Xueming Li wrote:
> >
> >> The new symmetric hash function swap src/dst L3 address and
> >> L4 ports automatically by sorting.
> >>
> >> Signed-off-by: Xueming Li 
> >> ---
> >> lib/ethdev/rte_flow.h | 5 +
> >> 1 file changed, 5 insertions(+)
> >>
> >> diff --git a/lib/ethdev/rte_flow.h b/lib/ethdev/rte_flow.h index
> >> 86ed98c562..ec6dd170b5 100644
> >> --- a/lib/ethdev/rte_flow.h
> >> +++ b/lib/ethdev/rte_flow.h
> >> @@ -3204,6 +3204,11 @@ enum rte_eth_hash_function {
> >>  * src or dst address will xor with zero pair.
> >>  */
> >> RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ,
> >> +    /**
> >> + * Symmetric Toeplitz: src, dst will be swapped
> >> + * automatically by sorting.
> >
> > This is very vague. Consider:
> >
> > For symmetric Toeplitz, four inputs are prepared as follows:
> > - src_addr | dst_addr
> > - src_addr ^ dst_addr
> > - src_port | dst_port
> > - src_port ^ dst_port
> > and then passed to the regular Toeplitz function.
> >
> > It is important to be as specific as possible so that readers don't
> > have to guess.
> 
> +1 for this, I try to understand and google it, but can't find useful info.
> 
> Also, how this new algo with src/dst only ?
> 

Thanks for taking care of this. 
When set the L3 and the L4 fields are sorted prior to the hash function.
  If src_ip > dst_ip, swap src_ip and dst_ip.
  If src_port > dst_port, swap src_port and dst_port.

> >
> > Thank you.
> >
> >> + */
> >> +    RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT,
> >> RTE_ETH_HASH_FUNCTION_MAX,
> 
> The new value will break the definition of MAX (maybe ABI compatible).
> but I found only hns3 drivers use RTE_ETH_HASH_FUNCTION_MAX, not sure
> the application will use it.
> 
> >> };
> >>
> >> --
> >> 2.25.1
> >>
> >>
> >
> > .


Re: [PATCH v7 2/3] log: separate logging functions out of EAL

2023-08-09 Thread David Marchand
Sorry, two additional comments after running more checks.

lib/log files are not referenced in MAINTAINERS, can you add a new block?

> diff --git a/lib/log/meson.build b/lib/log/meson.build
> new file mode 100644
> index 00..6baff83ee5
> --- /dev/null
> +++ b/lib/log/meson.build
> @@ -0,0 +1,9 @@
> +# SPDX-License-Identifier: BSD-3-Clause
> +# Copyright(c) 2022 Intel Corporation
> +
> +includes += global_inc
> +sources = files(
> +'log.c',
> +'log_' + exec_env + '.c'
> +)

Missing a , (reported by check-meson.py).

> +headers = files('rte_log.h')


-- 
David Marchand



Re: [PATCH v7 2/3] log: separate logging functions out of EAL

2023-08-09 Thread Bruce Richardson
On Wed, Aug 09, 2023 at 02:24:09PM +0200, David Marchand wrote:
> Sorry, two additional comments after running more checks.
> 
> lib/log files are not referenced in MAINTAINERS, can you add a new block?
> 

Yep.
> > diff --git a/lib/log/meson.build b/lib/log/meson.build
> > new file mode 100644
> > index 00..6baff83ee5
> > --- /dev/null
> > +++ b/lib/log/meson.build
> > @@ -0,0 +1,9 @@
> > +# SPDX-License-Identifier: BSD-3-Clause
> > +# Copyright(c) 2022 Intel Corporation
> > +
> > +includes += global_inc
> > +sources = files(
> > +'log.c',
> > +'log_' + exec_env + '.c'
> > +)
> 
> Missing a , (reported by check-meson.py).
> 
> > +headers = files('rte_log.h')
> 
Will review all feedback and spin a new revision.
thanks,
/Bruce


Re: [PATCH v7 1/3] eal/windows: move fnmatch function to header file

2023-08-09 Thread Bruce Richardson
On Wed, Aug 09, 2023 at 01:18:56PM +0200, David Marchand wrote:
> On Mon, Jul 31, 2023 at 5:39 PM Bruce Richardson
>  wrote:
> >
> > To allow the fnmatch function to be shared between libraries, without
> > having to export it into the public namespace (since it's not prefixed
> > with "rte"), we can convert fnmatch.c to replace fnmatch.h. This allows
> > fnmatch function to be static and limited in scope to the current file,
> > preventing duplicate definitions if it is used by two libraries, while
> > also not requiring export for sharing.
> 
> Overall, it lgtm.
> 
> I am surprised those 3 static symbols (see below) do not require being
> marked "inline" (to avoid "unused symbols" warnings).
> The CI looks ok, so probably I am just paranoid.

Only the functions should need the inline, and I suspect that we don't get
any warnings, since the only files including the header always use the
functions. Will mark them as inline just in case in next version.

> 
> I have also a comment on sccsid, see below.
> 
> 
> >
> > Signed-off-by: Bruce Richardson 
> > Acked-by: Morten Brørup 
> > Acked-by: Tyler Retzlaff 
> > ---
> 
> [snip]
> 
> > diff --git a/lib/eal/windows/include/fnmatch.h 
> > b/lib/eal/windows/include/fnmatch.h
> > index c6b226bd5d..fbf1eef21c 100644
> > --- a/lib/eal/windows/include/fnmatch.h
> > +++ b/lib/eal/windows/include/fnmatch.h
> > @@ -1,20 +1,25 @@
> >  /* SPDX-License-Identifier: BSD-3-Clause
> > - * Copyright(c) 2019 Intel Corporation
> > + * Copyright (c) 1989, 1993, 1994
> > + * The Regents of the University of California.  All rights reserved.
> > + *
> > + * This code is derived from software contributed to Berkeley by
> > + * Guido van Rossum.
> >   */
> > -
> >  #ifndef _FNMATCH_H_
> >  #define _FNMATCH_H_
> >
> > -/**
> > - * This file is required to support the common code in eal_common_log.c
> > - * as Microsoft libc does not contain fnmatch.h. This may be removed in
> > - * future releases.
> > +#if defined(LIBC_SCCS) && !defined(lint)
> > +static const char sccsid[] = "@(#)fnmatch.c8.2 (Berkeley) 4/16/94";
> > +#endif /* LIBC_SCCS and not lint */
> 
> Strange to keep this (what looks to be a canary) symbol in a header file.
> 
> [snip]
> 

Yeah, missed that, can probably be removed.

> > @@ -25,6 +30,10 @@ extern "C" {
> >  #define FNM_CASEFOLD 0x10
> >  #define FNM_PREFIX_DIRS 0x20
> >
> > +#define FNM_EOS'\0'
> > +
> > +static const char *fnm_rangematch(const char *, char, int);
> > +
> >  /**
> >   * This function is used for searching a given string source
> >   * with the given regular expression pattern.
> 
> [snip]
> 
> 
> > @@ -41,10 +50,150 @@ extern "C" {
> >   * @return
> >   * if the pattern is found then return 0 or else FNM_NOMATCH
> >   */
> > -int fnmatch(const char *pattern, const char *string, int flags);
> > +static int
> > +fnmatch(const char *pattern, const char *string, int flags)
> > +{
> > +   const char *stringstart;
> > +   char c, test;
> 
> 
> -- 
> David Marchand
> 


[PATCH v8 0/3] Split logging functionality out of EAL

2023-08-09 Thread Bruce Richardson
There is a general desire to reduce the size and scope of EAL. To this
end, this patchset makes a (very) small step in that direction by taking
the logging functionality out of EAL and putting it into its own library
that can be built and maintained separately.

As with the first RFC for this, the main obstacle is the "fnmatch"
function which is needed by both EAL and the new log function when
building on windows. While the function cannot stay in EAL - or we would
have a circular dependency, moving it to a new library or just putting
it in the log library have the disadvantages that it then "leaks" into
the public namespace without an rte_prefix, which could cause issues.
Since only a single function is involved, subsequent versions take a
different approach to v1, and just moves the offending function to be a
static function in a header file. This allows use by multiple libs
without conflicting names or making it public.

The other complication, as explained in v1 RFC was that of multiple
implementations for different OS's. This is solved here in the same
way as v1, by including the OS in the name and having meson pick the
correct file for each build. Since only one file is involved, there
seemed little need for replicating EAL's separate subdirectories
per-OS.

V8:
* Added "inline" to static functions in fnmatch header
* Removed SCCS tag as unneeded carryover from .c file
* Corrected doc cross-references and headers
* Added maintainers entry

V7:
* re-submit to re-run CI with ABI checks disabled

v6:
* Updated ABI version to DPDK_24 for new log library for 23.11 release.

v5:
* rebased to latest main branch
* fixed trailing whitespace issues in new doc section

v4:
* Fixed windows build error, due to missing strdup (_strdup on windows)
* Added doc updates to programmers guide.

v3:
* Fixed missing log file for BSD
* Removed "eal" from the filenames of files in the log directory
* added prefixes to elements in the fnmatch header to avoid conflicts
* fixed space indentation in new lines in telemetry.c (checkpatch)
* removed "extern int logtype" definition in telemetry.c (checkpatch)
* added log directory to list for doxygen scanning

Bruce Richardson (3):
  eal/windows: move fnmatch function to header file
  log: separate logging functions out of EAL
  telemetry: use standard logging

 MAINTAINERS   |   6 +-
 doc/api/doxy-api.conf.in  |   1 +
 .../prog_guide/env_abstraction_layer.rst  |   4 +-
 doc/guides/prog_guide/index.rst   |   1 +
 doc/guides/prog_guide/log_lib.rst | 113 
 lib/eal/common/eal_common_options.c   |   2 +-
 lib/eal/common/eal_private.h  |   7 -
 lib/eal/common/meson.build|   1 -
 lib/eal/freebsd/eal.c |   6 +-
 lib/eal/include/meson.build   |   1 -
 lib/eal/linux/eal.c   |   8 +-
 lib/eal/linux/meson.build |   1 -
 lib/eal/meson.build   |   2 +-
 lib/eal/version.map   |  17 --
 lib/eal/windows/eal.c |   2 +-
 lib/eal/windows/fnmatch.c | 172 --
 lib/eal/windows/include/fnmatch.h | 169 +++--
 lib/eal/windows/meson.build   |   2 -
 lib/kvargs/meson.build|   3 +-
 .../common/eal_common_log.c => log/log.c} |   7 +-
 lib/log/log_freebsd.c |  12 ++
 .../common/eal_log.h => log/log_internal.h}   |  18 +-
 lib/{eal/linux/eal_log.c => log/log_linux.c}  |   2 +-
 .../windows/eal_log.c => log/log_windows.c}   |   2 +-
 lib/log/meson.build   |   9 +
 lib/{eal/include => log}/rte_log.h|   0
 lib/log/version.map   |  34 
 lib/meson.build   |   1 +
 lib/telemetry/meson.build |   3 +-
 lib/telemetry/telemetry.c |  11 +-
 lib/telemetry/telemetry_internal.h|   3 +-
 31 files changed, 367 insertions(+), 253 deletions(-)
 create mode 100644 doc/guides/prog_guide/log_lib.rst
 delete mode 100644 lib/eal/windows/fnmatch.c
 rename lib/{eal/common/eal_common_log.c => log/log.c} (99%)
 create mode 100644 lib/log/log_freebsd.c
 rename lib/{eal/common/eal_log.h => log/log_internal.h} (69%)
 rename lib/{eal/linux/eal_log.c => log/log_linux.c} (97%)
 rename lib/{eal/windows/eal_log.c => log/log_windows.c} (93%)
 create mode 100644 lib/log/meson.build
 rename lib/{eal/include => log}/rte_log.h (100%)
 create mode 100644 lib/log/version.map

--
2.39.2



[PATCH v8 1/3] eal/windows: move fnmatch function to header file

2023-08-09 Thread Bruce Richardson
To allow the fnmatch function to be shared between libraries, without
having to export it into the public namespace (since it's not prefixed
with "rte"), we can convert fnmatch.c to replace fnmatch.h. This allows
fnmatch function to be static and limited in scope to the current file,
preventing duplicate definitions if it is used by two libraries, while
also not requiring export for sharing.

Signed-off-by: Bruce Richardson 
Acked-by: Morten Brørup 
Acked-by: Tyler Retzlaff 
---
 lib/eal/windows/fnmatch.c | 172 --
 lib/eal/windows/include/fnmatch.h | 169 ++---
 lib/eal/windows/meson.build   |   1 -
 3 files changed, 156 insertions(+), 186 deletions(-)
 delete mode 100644 lib/eal/windows/fnmatch.c

diff --git a/lib/eal/windows/fnmatch.c b/lib/eal/windows/fnmatch.c
deleted file mode 100644
index f622bf54c5..00
--- a/lib/eal/windows/fnmatch.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 1989, 1993, 1994
- * The Regents of the University of California.  All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Guido van Rossum.
- */
-
-#if defined(LIBC_SCCS) && !defined(lint)
-static const char sccsid[] = "@(#)fnmatch.c8.2 (Berkeley) 4/16/94";
-#endif /* LIBC_SCCS and not lint */
-
-/*
- * Function fnmatch() as specified in POSIX 1003.2-1992, section B.6.
- * Compares a filename or pathname to a pattern.
- */
-
-#include 
-#include 
-#include 
-
-#include "fnmatch.h"
-
-#define EOS'\0'
-
-static const char *rangematch(const char *, char, int);
-
-int
-fnmatch(const char *pattern, const char *string, int flags)
-{
-   const char *stringstart;
-   char c, test;
-
-   for (stringstart = string;;)
-   switch (c = *pattern++) {
-   case EOS:
-   if ((flags & FNM_LEADING_DIR) && *string == '/')
-   return (0);
-   return (*string == EOS ? 0 : FNM_NOMATCH);
-   case '?':
-   if (*string == EOS)
-   return (FNM_NOMATCH);
-   if (*string == '/' && (flags & FNM_PATHNAME))
-   return (FNM_NOMATCH);
-   if (*string == '.' && (flags & FNM_PERIOD) &&
-   (string == stringstart ||
-   ((flags & FNM_PATHNAME) && *(string - 1) == '/')))
-   return (FNM_NOMATCH);
-   ++string;
-   break;
-   case '*':
-   c = *pattern;
-   /* Collapse multiple stars. */
-   while (c == '*')
-   c = *++pattern;
-
-   if (*string == '.' && (flags & FNM_PERIOD) &&
-   (string == stringstart ||
-   ((flags & FNM_PATHNAME) && *(string - 1) == '/')))
-   return (FNM_NOMATCH);
-
-   /* Optimize for pattern with * at end or before /. */
-   if (c == EOS)
-   if (flags & FNM_PATHNAME)
-   return ((flags & FNM_LEADING_DIR) ||
-   strchr(string, '/') == NULL ?
-   0 : FNM_NOMATCH);
-   else
-   return (0);
-   else if (c == '/' && flags & FNM_PATHNAME) {
-   string = strchr(string, '/');
-   if (string == NULL)
-   return (FNM_NOMATCH);
-   break;
-   }
-
-   /* General case, use recursion. */
-   while ((test = *string) != EOS) {
-   if (!fnmatch(pattern, string,
-   flags & ~FNM_PERIOD))
-   return (0);
-   if (test == '/' && flags & FNM_PATHNAME)
-   break;
-   ++string;
-   }
-   return (FNM_NOMATCH);
-   case '[':
-   if (*string == EOS)
-   return (FNM_NOMATCH);
-   if (*string == '/' && flags & FNM_PATHNAME)
-   return (FNM_NOMATCH);
-   pattern = rangematch(pattern, *string, flags);
-   if (pattern == NULL)
-   return (FNM_NOMATCH);
-   ++string;
-   break;
-   case '\\':
-   if (!(flags & FNM_NOESCAPE)) {
-   c

[PATCH v8 3/3] telemetry: use standard logging

2023-08-09 Thread Bruce Richardson
Now that logging is moved out of EAL, we don't need injection of the
logtype and logging function from EAL to telemetry library, simplifying
things.

Signed-off-by: Bruce Richardson 
Acked-by: Morten Brørup 
Acked-by: Tyler Retzlaff 
---
 lib/eal/freebsd/eal.c  |  6 +-
 lib/eal/linux/eal.c|  6 +-
 lib/telemetry/telemetry.c  | 11 +++
 lib/telemetry/telemetry_internal.h |  3 +--
 4 files changed, 6 insertions(+), 20 deletions(-)

diff --git a/lib/eal/freebsd/eal.c b/lib/eal/freebsd/eal.c
index dfe973e6fd..0175d89e4b 100644
--- a/lib/eal/freebsd/eal.c
+++ b/lib/eal/freebsd/eal.c
@@ -898,13 +898,9 @@ rte_eal_init(int argc, char **argv)
return -1;
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY && 
!internal_conf->no_telemetry) {
-   int tlog = rte_log_register_type_and_pick_level(
-   "lib.telemetry", RTE_LOG_WARNING);
-   if (tlog < 0)
-   tlog = RTE_LOGTYPE_EAL;
if (rte_telemetry_init(rte_eal_get_runtime_dir(),
rte_version(),
-   &internal_conf->ctrl_cpuset, rte_log, tlog) != 
0)
+   &internal_conf->ctrl_cpuset) != 0)
return -1;
}
 
diff --git a/lib/eal/linux/eal.c b/lib/eal/linux/eal.c
index 477ad03d38..ed50576d99 100644
--- a/lib/eal/linux/eal.c
+++ b/lib/eal/linux/eal.c
@@ -1314,13 +1314,9 @@ rte_eal_init(int argc, char **argv)
return -1;
}
if (rte_eal_process_type() == RTE_PROC_PRIMARY && 
!internal_conf->no_telemetry) {
-   int tlog = rte_log_register_type_and_pick_level(
-   "lib.telemetry", RTE_LOG_WARNING);
-   if (tlog < 0)
-   tlog = RTE_LOGTYPE_EAL;
if (rte_telemetry_init(rte_eal_get_runtime_dir(),
rte_version(),
-   &internal_conf->ctrl_cpuset, rte_log, tlog) != 
0)
+   &internal_conf->ctrl_cpuset) != 0)
return -1;
}
 
diff --git a/lib/telemetry/telemetry.c b/lib/telemetry/telemetry.c
index 590720bfa6..7d0488a6d7 100644
--- a/lib/telemetry/telemetry.c
+++ b/lib/telemetry/telemetry.c
@@ -54,11 +54,9 @@ static struct socket v1_socket; /* socket for v1 telemetry */
 static const char *telemetry_version; /* save rte_version */
 static const char *socket_dir;/* runtime directory */
 static rte_cpuset_t *thread_cpuset;
-static rte_log_fn rte_log_ptr;
-static uint32_t logtype;
 
-#define TMTY_LOG(l, ...) \
-rte_log_ptr(RTE_LOG_ ## l, logtype, "TELEMETRY: " __VA_ARGS__)
+RTE_LOG_REGISTER_DEFAULT(logtype, WARNING);
+#define TMTY_LOG(l, ...) rte_log(RTE_LOG_ ## l, logtype, "TELEMETRY: " 
__VA_ARGS__)
 
 /* list of command callbacks, with one command registered by default */
 static struct cmd_callback *callbacks;
@@ -627,14 +625,11 @@ telemetry_v2_init(void)
 #endif /* !RTE_EXEC_ENV_WINDOWS */
 
 int32_t
-rte_telemetry_init(const char *runtime_dir, const char *rte_version, 
rte_cpuset_t *cpuset,
-   rte_log_fn log_fn, uint32_t registered_logtype)
+rte_telemetry_init(const char *runtime_dir, const char *rte_version, 
rte_cpuset_t *cpuset)
 {
telemetry_version = rte_version;
socket_dir = runtime_dir;
thread_cpuset = cpuset;
-   rte_log_ptr = log_fn;
-   logtype = registered_logtype;
 
 #ifndef RTE_EXEC_ENV_WINDOWS
if (telemetry_v2_init() != 0)
diff --git a/lib/telemetry/telemetry_internal.h 
b/lib/telemetry/telemetry_internal.h
index 80e2bd3a49..b331e9458f 100644
--- a/lib/telemetry/telemetry_internal.h
+++ b/lib/telemetry/telemetry_internal.h
@@ -108,7 +108,6 @@ typedef int (*rte_log_fn)(uint32_t level, uint32_t logtype, 
const char *format,
  */
 __rte_internal
 int
-rte_telemetry_init(const char *runtime_dir, const char *rte_version, 
rte_cpuset_t *cpuset,
-   rte_log_fn log_fn, uint32_t registered_logtype);
+rte_telemetry_init(const char *runtime_dir, const char *rte_version, 
rte_cpuset_t *cpuset);
 
 #endif
-- 
2.39.2



[PATCH v8 2/3] log: separate logging functions out of EAL

2023-08-09 Thread Bruce Richardson
Move the logging capability to a separate library, free from EAL. Rename
files as appropriate, and use meson.build to select the correct file to
be built for each operating system, rather than having a subdir per-os.
Add new documentation section in programmers guide to cover logging in
more detail.

Signed-off-by: Bruce Richardson 
Acked-by: Morten Brørup 
Acked-by: Tyler Retzlaff 
---
 MAINTAINERS   |   6 +-
 doc/api/doxy-api.conf.in  |   1 +
 .../prog_guide/env_abstraction_layer.rst  |   4 +-
 doc/guides/prog_guide/index.rst   |   1 +
 doc/guides/prog_guide/log_lib.rst | 113 ++
 lib/eal/common/eal_common_options.c   |   2 +-
 lib/eal/common/eal_private.h  |   7 --
 lib/eal/common/meson.build|   1 -
 lib/eal/include/meson.build   |   1 -
 lib/eal/linux/eal.c   |   2 +-
 lib/eal/linux/meson.build |   1 -
 lib/eal/meson.build   |   2 +-
 lib/eal/version.map   |  17 ---
 lib/eal/windows/eal.c |   2 +-
 lib/eal/windows/meson.build   |   1 -
 lib/kvargs/meson.build|   3 +-
 .../common/eal_common_log.c => log/log.c} |   7 +-
 lib/log/log_freebsd.c |  12 ++
 .../common/eal_log.h => log/log_internal.h}   |  18 ++-
 lib/{eal/linux/eal_log.c => log/log_linux.c}  |   2 +-
 .../windows/eal_log.c => log/log_windows.c}   |   2 +-
 lib/log/meson.build   |   9 ++
 lib/{eal/include => log}/rte_log.h|   0
 lib/log/version.map   |  34 ++
 lib/meson.build   |   1 +
 lib/telemetry/meson.build |   3 +-
 26 files changed, 205 insertions(+), 47 deletions(-)
 create mode 100644 doc/guides/prog_guide/log_lib.rst
 rename lib/{eal/common/eal_common_log.c => log/log.c} (99%)
 create mode 100644 lib/log/log_freebsd.c
 rename lib/{eal/common/eal_log.h => log/log_internal.h} (69%)
 rename lib/{eal/linux/eal_log.c => log/log_linux.c} (97%)
 rename lib/{eal/windows/eal_log.c => log/log_windows.c} (93%)
 create mode 100644 lib/log/meson.build
 rename lib/{eal/include => log}/rte_log.h (100%)
 create mode 100644 lib/log/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index 6345e7f8a6..8c3f2c993f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -164,7 +164,6 @@ F: app/test/test_devargs.c
 F: app/test/test_eal*
 F: app/test/test_errno.c
 F: app/test/test_lcores.c
-F: app/test/test_logs.c
 F: app/test/test_memcpy*
 F: app/test/test_per_lcore.c
 F: app/test/test_pflock.c
@@ -177,6 +176,11 @@ F: app/test/test_tailq.c
 F: app/test/test_threads.c
 F: app/test/test_version.c
 
+Logging
+F: lib/log/
+F: doc/guides/prog_guide/log_lib.rst
+F: app/test/test_logs.c
+
 Trace - EXPERIMENTAL
 M: Jerin Jacob 
 M: Sunil Kumar Kori 
diff --git a/doc/api/doxy-api.conf.in b/doc/api/doxy-api.conf.in
index 31885039c7..a88accd907 100644
--- a/doc/api/doxy-api.conf.in
+++ b/doc/api/doxy-api.conf.in
@@ -50,6 +50,7 @@ INPUT   = @TOPDIR@/doc/api/doxy-api-index.md \
   @TOPDIR@/lib/jobstats \
   @TOPDIR@/lib/kvargs \
   @TOPDIR@/lib/latencystats \
+  @TOPDIR@/lib/log \
   @TOPDIR@/lib/lpm \
   @TOPDIR@/lib/mbuf \
   @TOPDIR@/lib/member \
diff --git a/doc/guides/prog_guide/env_abstraction_layer.rst 
b/doc/guides/prog_guide/env_abstraction_layer.rst
index 5d382fdd90..89014789de 100644
--- a/doc/guides/prog_guide/env_abstraction_layer.rst
+++ b/doc/guides/prog_guide/env_abstraction_layer.rst
@@ -443,9 +443,7 @@ Per-lcore variables are implemented using *Thread Local 
Storage* (TLS) to provid
 Logs
 
 
-A logging API is provided by EAL.
-By default, in a Linux application, logs are sent to syslog and also to the 
console.
-However, the log function can be overridden by the user to use a different 
logging mechanism.
+While originally part of EAL, DPDK logging functionality is now provided by 
the :doc:`log_lib`.
 
 Trace and Debug Functions
 ^
diff --git a/doc/guides/prog_guide/index.rst b/doc/guides/prog_guide/index.rst
index 2c47d9d010..52a6d9e7aa 100644
--- a/doc/guides/prog_guide/index.rst
+++ b/doc/guides/prog_guide/index.rst
@@ -12,6 +12,7 @@ Programmer's Guide
 overview
 source_org
 env_abstraction_layer
+log_lib
 service_cores
 trace_lib
 rcu_lib
diff --git a/doc/guides/prog_guide/log_lib.rst 
b/doc/guides/prog_guide/log_lib.rst
new file mode 100644
index 00..19e295fc9b
--- /dev/null
+++ b/doc/guides/prog_guide/log_lib.rst
@@ -0,0 +1,113 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+Copyright(c) 2023 Intel Corporation.
+
+Log Library
+===
+
+The DPDK Log library p

[PATCH] app: fix silent enqueue fail in test_mbuf test_refcnt_iter

2023-08-09 Thread jhascoet
From: Julien Hascoet 

In case of ring full state, we retry the enqueue
operation in order to avoid mbuf loss.

Fixes: af75078fece ("first public release")

Signed-off-by: Julien Hascoet 
---
 app/test/test_mbuf.c | 11 ---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
index efac01806b..be114e3302 100644
--- a/app/test/test_mbuf.c
+++ b/app/test/test_mbuf.c
@@ -1033,12 +1033,17 @@ test_refcnt_iter(unsigned int lcore, unsigned int iter,
tref += ref;
if ((ref & 1) != 0) {
rte_pktmbuf_refcnt_update(m, ref);
-   while (ref-- != 0)
-   rte_ring_enqueue(refcnt_mbuf_ring, m);
+   while (ref-- != 0) {
+   /* retry in case of failure */
+   while (rte_ring_enqueue(refcnt_mbuf_ring, m) != 
0)
+   ;
+   }
} else {
while (ref-- != 0) {
rte_pktmbuf_refcnt_update(m, 1);
-   rte_ring_enqueue(refcnt_mbuf_ring, m);
+   /* retry in case of failure */
+   while (rte_ring_enqueue(refcnt_mbuf_ring, m) != 
0)
+   ;
}
}
rte_pktmbuf_free(m);
-- 
2.34.1



Re: [PATCH v2 2/2] kni: remove deprecated kernel network interface

2023-08-09 Thread Patrick Robb
On Fri, Aug 4, 2023 at 9:19 AM David Marchand 
wrote:

> Hello CI people,
>
> On Tue, Aug 1, 2023 at 6:05 PM Stephen Hemminger
>  wrote:
> >  kernel/linux/kni/Kbuild   |   6 -
> >  kernel/linux/kni/compat.h | 157 
> >  kernel/linux/kni/kni_dev.h| 137 ---
> >  kernel/linux/kni/kni_fifo.h   |  87 --
> >  kernel/linux/kni/kni_misc.c   | 719 --
> >  kernel/linux/kni/kni_net.c| 878 --
> >  kernel/linux/kni/meson.build  |  41 -
> >  kernel/linux/meson.build  |   2 +-
>
> This is a heads up for KNI removal in the *main* branch.
>
> With this removal, there is no remaining out of tree Linux kernel
> module to compile/test in DPDK sources.
> This means that jobs (like the one in UNH lab that was compile-testing
> KNI against the latest Linux kernel sources) can be disabled.
>
> Important note: this mail does not ask for any change to LTS releases
> testing.
> If KNI was built and tested with LTS releases, you should continue to do
> so.
>
>
> --
> David Marchand
>
> Sorry, I didn't see this at first. We are currently running kmods testing
1x/day for the periodic testing on the main branch, per the original
request from last year. I am disabling this now.

It makes sense to me that this testing should be shifted to 22.11 LTS
periodic runs, but I will leave it up to the community to decide whether
that is valuable or not.


[RFC 0/3] Introduce event link profiles

2023-08-09 Thread pbhagavatula
From: Pavan Nikhilesh 

A collection of event queues linked to an event port can be associated
with unique identifier called as a profile, multiple such profiles can
be configured based on the event device capability using the function
`rte_event_port_link_with_profile` which takes arguments similar to
`rte_event_port_link` in addition to the profile identifier.

The maximum link profiles that are supported by an event device is
advertised through the structure member
`rte_event_dev_info::max_profiles_per_port`.

By default, event ports are configured to use the link profile 0 on
initialization.

Once multiple link profiles are set up and the event device is started, the
application can use the function `rte_event_port_change_profile` to change
the currently active profile on an event port. This effects the next
`rte_event_dequeue_burst` call, where the event queues associated with the
newly active link profile will participate in scheduling.

Rudementary work flow would something like:

Config path:

uint8_t lowQ[4] = {4, 5, 6, 7};
uint8_t highQ[4] = {0, 1, 2, 3};

if (rte_event_dev_info.max_profiles_per_port < 2)
return -ENOTSUP;

rte_event_port_link_with_profile(0, 0, highQ, NULL, 4, 0);
rte_event_port_link_with_profile(0, 0, lowQ, NULL, 4, 1);

Worker path:

empty_high_deq = 0;
empty_low_deq = 0;
is_low_deq = 0;
while (1) {
deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0);
if (deq == 0) {
/**
 * Change link profile based on work activity on current
 * active profile
 */
if (is_low_deq) {
empty_low_deq++;
if (empty_low_deq == MAX_LOW_RETRY) {
rte_event_port_change_profile(0, 0, 0);
is_low_deq = 0;
empty_low_deq = 0;
}
continue;
}

if (empty_high_deq == MAX_HIGH_RETRY) {
rte_event_port_change_profile(0, 0, 1);
is_low_deq = 1;
empty_high_deq = 0;
}
continue;
}

// Process the event received.

if (is_low_deq++ == MAX_LOW_EVENTS) {
rte_event_port_change_profile(0, 0, 0);
is_low_deq = 0;
}
}

An application could use heuristic data of load/activity of a given event
port and change its active profile to adapt to the traffic pattern.

An unlink function `rte_event_port_unlink_with_profile` is provided to
modify the links associated to a profile, and
`rte_event_port_links_get_with_profile` can be used to retrieve the links
associated with a profile.

Pavan Nikhilesh (3):
  eventdev: introduce link profiles
  event/cnxk: implement event link profiles
  test/event: add event link profile test

 app/test/test_eventdev.c   | 110 ++
 config/rte_config.h|   1 +
 doc/guides/eventdevs/cnxk.rst  |   1 +
 doc/guides/prog_guide/eventdev.rst |  58 ++
 drivers/common/cnxk/roc_nix_inl_dev.c  |   4 +-
 drivers/common/cnxk/roc_sso.c  |  18 +-
 drivers/common/cnxk/roc_sso.h  |   8 +-
 drivers/common/cnxk/roc_sso_priv.h |   4 +-
 drivers/event/cnxk/cn10k_eventdev.c|  45 ++--
 drivers/event/cnxk/cn10k_worker.c  |  11 +
 drivers/event/cnxk/cn10k_worker.h  |   1 +
 drivers/event/cnxk/cn9k_eventdev.c |  72 ---
 drivers/event/cnxk/cn9k_worker.c   |  22 ++
 drivers/event/cnxk/cn9k_worker.h   |   2 +
 drivers/event/cnxk/cnxk_eventdev.c |  34 ++--
 drivers/event/cnxk/cnxk_eventdev.h |  10 +-
 drivers/event/dlb2/dlb2.c  |   1 +
 drivers/event/dpaa/dpaa_eventdev.c |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.c   |   2 +-
 drivers/event/dsw/dsw_evdev.c  |   1 +
 drivers/event/octeontx/ssovf_evdev.c   |   2 +-
 drivers/event/opdl/opdl_evdev.c|   1 +
 drivers/event/skeleton/skeleton_eventdev.c |   1 +
 drivers/event/sw/sw_evdev.c|   1 +
 lib/eventdev/eventdev_pmd.h|  59 +-
 lib/eventdev/eventdev_private.c|   9 +
 lib/eventdev/eventdev_trace.h  |  22 ++
 lib/eventdev/eventdev_trace_points.c   |   6 +
 lib/eventdev/rte_eventdev.c| 146 ++---
 lib/eventdev/rte_eventdev.h| 226 +
 lib/eventdev/rte_eventdev_core.h   |   4 +
 lib/eventdev/rte_eventdev_trace_fp.h   |   8 +
 lib/eventdev/version.map   |   5 +
 33 files changed, 788 insertions(+), 108 deletions(-)

--
2.25.1



[RFC 1/3] eventdev: introduce link profiles

2023-08-09 Thread pbhagavatula
From: Pavan Nikhilesh 

A collection of event queues linked to an event port can be
associated with a unique identifier called as a profile, multiple
such profiles can be created based on the event device capability
using the function `rte_event_port_link_with_profile` which takes
arguments similar to `rte_event_port_link` in addition to the profile
identifier.

The maximum link profiles that are supported by an event device
is advertised through the structure member
`rte_event_dev_info::max_profiles_per_port`.
By default, event ports are configured to use the link profile 0
on initialization.

Once multiple link profiles are set up and the event device is started,
the application can use the function `rte_event_port_change_profile`
to change the currently active profile on an event port. This effects
the next `rte_event_dequeue_burst` call, where the event queues
associated with the newly active link profile will participate in
scheduling.

An unlink function `rte_event_port_unlink_with_profile` is provided
to modify the links associated to a profile, and
`rte_event_port_links_get_with_profile`can be used to retrieve the
links associated with a profile.

Signed-off-by: Pavan Nikhilesh 
---
 config/rte_config.h|   1 +
 doc/guides/prog_guide/eventdev.rst |  58 ++
 drivers/event/cnxk/cnxk_eventdev.c |   3 +-
 drivers/event/dlb2/dlb2.c  |   1 +
 drivers/event/dpaa/dpaa_eventdev.c |   1 +
 drivers/event/dpaa2/dpaa2_eventdev.c   |   2 +-
 drivers/event/dsw/dsw_evdev.c  |   1 +
 drivers/event/octeontx/ssovf_evdev.c   |   2 +-
 drivers/event/opdl/opdl_evdev.c|   1 +
 drivers/event/skeleton/skeleton_eventdev.c |   1 +
 drivers/event/sw/sw_evdev.c|   1 +
 lib/eventdev/eventdev_pmd.h|  59 +-
 lib/eventdev/eventdev_private.c|   9 +
 lib/eventdev/eventdev_trace.h  |  22 ++
 lib/eventdev/eventdev_trace_points.c   |   6 +
 lib/eventdev/rte_eventdev.c| 146 ++---
 lib/eventdev/rte_eventdev.h| 226 +
 lib/eventdev/rte_eventdev_core.h   |   4 +
 lib/eventdev/rte_eventdev_trace_fp.h   |   8 +
 lib/eventdev/version.map   |   5 +
 20 files changed, 527 insertions(+), 30 deletions(-)

diff --git a/config/rte_config.h b/config/rte_config.h
index 400e44e3cf..d43b3eecb8 100644
--- a/config/rte_config.h
+++ b/config/rte_config.h
@@ -73,6 +73,7 @@
 #define RTE_EVENT_MAX_DEVS 16
 #define RTE_EVENT_MAX_PORTS_PER_DEV 255
 #define RTE_EVENT_MAX_QUEUES_PER_DEV 255
+#define RTE_EVENT_MAX_PROFILES_PER_PORT 8
 #define RTE_EVENT_TIMER_ADAPTER_NUM_MAX 32
 #define RTE_EVENT_ETH_INTR_RING_SIZE 1024
 #define RTE_EVENT_CRYPTO_ADAPTER_MAX_INSTANCE 32
diff --git a/doc/guides/prog_guide/eventdev.rst 
b/doc/guides/prog_guide/eventdev.rst
index 2c83176846..3a1016543c 100644
--- a/doc/guides/prog_guide/eventdev.rst
+++ b/doc/guides/prog_guide/eventdev.rst
@@ -317,6 +317,64 @@ can be achieved like this:
 }
 int links_made = rte_event_port_link(dev_id, tx_port_id, 
&single_link_q, &priority, 1);
 
+An application can also use link profiles if supported by the underlying event 
device to setup up
+multiple link profile per port and change them run time depending up on 
heuristic data.
+
+An Example use case could be as follows.
+
+Config path:
+
+.. code-block:: c
+
+uint8_t lowQ[4] = {4, 5, 6, 7};
+uint8_t highQ[4] = {0, 1, 2, 3};
+
+if (rte_event_dev_info.max_profiles_per_port < 2)
+return -ENOTSUP;
+
+rte_event_port_link_with_profile(0, 0, highQ, NULL, 4, 0);
+rte_event_port_link_with_profile(0, 0, lowQ, NULL, 4, 1);
+
+Worker path:
+
+.. code-block:: c
+
+uint8_t empty_high_deq = 0;
+uint8_t empty_low_deq = 0;
+uint8_t is_low_deq = 0;
+while (1) {
+deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0);
+if (deq == 0) {
+/**
+ * Change link profile based on work activity on current
+ * active profile
+ */
+if (is_low_deq) {
+empty_low_deq++;
+if (empty_low_deq == MAX_LOW_RETRY) {
+rte_event_port_change_profile(0, 0, 0);
+is_low_deq = 0;
+empty_low_deq = 0;
+}
+continue;
+}
+
+if (empty_high_deq == MAX_HIGH_RETRY) {
+rte_event_port_change_profile(0, 0, 1);
+is_low_deq = 1;
+empty_high_deq = 0;
+}
+continue;
+}
+
+// Process the event received.
+
+if (is_low_deq++ == MAX_LOW_EVENTS) {
+rte_event_port_change_profile(0, 0, 0);
+is_low_deq = 0;
+}
+}
+
 Startin

[RFC 2/3] event/cnxk: implement event link profiles

2023-08-09 Thread pbhagavatula
From: Pavan Nikhilesh 

Implement event link profiles support on CN10K and CN9K.
Both the platforms support up to 2 link profiles.

Signed-off-by: Pavan Nikhilesh 
---
 doc/guides/eventdevs/cnxk.rst |  1 +
 drivers/common/cnxk/roc_nix_inl_dev.c |  4 +-
 drivers/common/cnxk/roc_sso.c | 18 +++
 drivers/common/cnxk/roc_sso.h |  8 +--
 drivers/common/cnxk/roc_sso_priv.h|  4 +-
 drivers/event/cnxk/cn10k_eventdev.c   | 45 +++--
 drivers/event/cnxk/cn10k_worker.c | 11 
 drivers/event/cnxk/cn10k_worker.h |  1 +
 drivers/event/cnxk/cn9k_eventdev.c| 72 ---
 drivers/event/cnxk/cn9k_worker.c  | 22 
 drivers/event/cnxk/cn9k_worker.h  |  2 +
 drivers/event/cnxk/cnxk_eventdev.c| 35 +++--
 drivers/event/cnxk/cnxk_eventdev.h| 10 ++--
 13 files changed, 153 insertions(+), 80 deletions(-)

diff --git a/doc/guides/eventdevs/cnxk.rst b/doc/guides/eventdevs/cnxk.rst
index 1a59233282..cccb8a0304 100644
--- a/doc/guides/eventdevs/cnxk.rst
+++ b/doc/guides/eventdevs/cnxk.rst
@@ -48,6 +48,7 @@ Features of the OCTEON cnxk SSO PMD are:
 - HW managed event vectorization on CN10K for packets enqueued from ethdev to
   eventdev configurable per each Rx queue in Rx adapter.
 - Event vector transmission via Tx adapter.
+- Up to 2 event link profiles.
 
 Prerequisites and Compilation procedure
 ---
diff --git a/drivers/common/cnxk/roc_nix_inl_dev.c 
b/drivers/common/cnxk/roc_nix_inl_dev.c
index d76158e30d..690d47c045 100644
--- a/drivers/common/cnxk/roc_nix_inl_dev.c
+++ b/drivers/common/cnxk/roc_nix_inl_dev.c
@@ -285,7 +285,7 @@ nix_inl_sso_setup(struct nix_inl_dev *inl_dev)
}
 
/* Setup hwgrp->hws link */
-   sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, true);
+   sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, true);
 
/* Enable HWGRP */
plt_write64(0x1, inl_dev->sso_base + SSO_LF_GGRP_QCTL);
@@ -315,7 +315,7 @@ nix_inl_sso_release(struct nix_inl_dev *inl_dev)
nix_inl_sso_unregister_irqs(inl_dev);
 
/* Unlink hws */
-   sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, false);
+   sso_hws_link_modify(0, inl_dev->ssow_base, NULL, hwgrp, 1, 0, false);
 
/* Release XAQ aura */
sso_hwgrp_release_xaq(&inl_dev->dev, 1);
diff --git a/drivers/common/cnxk/roc_sso.c b/drivers/common/cnxk/roc_sso.c
index a5f48d5bbc..f063184565 100644
--- a/drivers/common/cnxk/roc_sso.c
+++ b/drivers/common/cnxk/roc_sso.c
@@ -185,8 +185,8 @@ sso_rsrc_get(struct roc_sso *roc_sso)
 }
 
 void
-sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp,
-   uint16_t hwgrp[], uint16_t n, uint16_t enable)
+sso_hws_link_modify(uint8_t hws, uintptr_t base, struct plt_bitmap *bmp, 
uint16_t hwgrp[],
+   uint16_t n, uint8_t set, uint16_t enable)
 {
uint64_t reg;
int i, j, k;
@@ -203,7 +203,7 @@ sso_hws_link_modify(uint8_t hws, uintptr_t base, struct 
plt_bitmap *bmp,
k = n % 4;
k = k ? k : 4;
for (j = 0; j < k; j++) {
-   mask[j] = hwgrp[i + j] | enable << 14;
+   mask[j] = hwgrp[i + j] | (uint32_t)set << 12 | enable 
<< 14;
if (bmp) {
enable ? plt_bitmap_set(bmp, hwgrp[i + j]) :
 plt_bitmap_clear(bmp, hwgrp[i + j]);
@@ -289,8 +289,8 @@ roc_sso_ns_to_gw(struct roc_sso *roc_sso, uint64_t ns)
 }
 
 int
-roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[],
-uint16_t nb_hwgrp)
+roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], 
uint16_t nb_hwgrp,
+uint8_t set)
 {
struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
struct sso *sso;
@@ -298,14 +298,14 @@ roc_sso_hws_link(struct roc_sso *roc_sso, uint8_t hws, 
uint16_t hwgrp[],
 
sso = roc_sso_to_sso_priv(roc_sso);
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12);
-   sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 1);
+   sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 
set, 1);
 
return nb_hwgrp;
 }
 
 int
-roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[],
-  uint16_t nb_hwgrp)
+roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, uint16_t hwgrp[], 
uint16_t nb_hwgrp,
+  uint8_t set)
 {
struct dev *dev = &roc_sso_to_sso_priv(roc_sso)->dev;
struct sso *sso;
@@ -313,7 +313,7 @@ roc_sso_hws_unlink(struct roc_sso *roc_sso, uint8_t hws, 
uint16_t hwgrp[],
 
sso = roc_sso_to_sso_priv(roc_sso);
base = dev->bar2 + (RVU_BLOCK_ADDR_SSOW << 20 | hws << 12);
-   sso_hws_link_modify(hws, base, sso->link_map[hws], hwgrp, nb_hwgrp, 0);
+   sso_hws_link_modify(hws, base,

[RFC 3/3] test/event: add event link profile test

2023-08-09 Thread pbhagavatula
From: Pavan Nikhilesh 

Add test case to verify event link profiles.

Signed-off-by: Pavan Nikhilesh 
---
 app/test/test_eventdev.c | 110 +++
 1 file changed, 110 insertions(+)

diff --git a/app/test/test_eventdev.c b/app/test/test_eventdev.c
index 336529038e..acce7cced8 100644
--- a/app/test/test_eventdev.c
+++ b/app/test/test_eventdev.c
@@ -1129,6 +1129,114 @@ test_eventdev_link_get(void)
return TEST_SUCCESS;
 }
 
+static int
+test_eventdev_change_profile(void)
+{
+#define MAX_RETRIES   4
+   uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+   uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+   struct rte_event_queue_conf qcfg;
+   struct rte_event_port_conf pcfg;
+   struct rte_event_dev_info info;
+   struct rte_event ev;
+   uint8_t q, re;
+   int rc;
+
+   rte_event_dev_info_get(TEST_DEV_ID, &info);
+
+   if (info.max_profiles_per_port <= 1)
+   return TEST_SKIPPED;
+
+   if (info.max_event_queues <= 1)
+   return TEST_SKIPPED;
+
+   rc = rte_event_port_default_conf_get(TEST_DEV_ID, 0, &pcfg);
+   TEST_ASSERT_SUCCESS(rc, "Failed to get port0 default config");
+   rc = rte_event_port_setup(TEST_DEV_ID, 0, &pcfg);
+   TEST_ASSERT_SUCCESS(rc, "Failed to setup port0");
+
+   rc = rte_event_queue_default_conf_get(TEST_DEV_ID, 0, &qcfg);
+   TEST_ASSERT_SUCCESS(rc, "Failed to get queue0 default config");
+   rc = rte_event_queue_setup(TEST_DEV_ID, 0, &qcfg);
+   TEST_ASSERT_SUCCESS(rc, "Failed to setup queue0");
+
+   q = 0;
+   rc = rte_event_port_link_with_profile(TEST_DEV_ID, 0, &q, NULL, 1, 0);
+   TEST_ASSERT(rc == 1, "Failed to link queue 0 to port 0 with profile 0");
+   q = 1;
+   rc = rte_event_port_link_with_profile(TEST_DEV_ID, 0, &q, NULL, 1, 1);
+   TEST_ASSERT(rc == 1, "Failed to link queue 1 to port 0 with profile 1");
+
+   rc = rte_event_port_links_get_with_profile(TEST_DEV_ID, 0, queues, 
priorities, 0);
+   TEST_ASSERT(rc == 1, "Failed to links");
+   TEST_ASSERT(queues[0] == 0, "Invalid queue found in link");
+
+   rc = rte_event_port_links_get_with_profile(TEST_DEV_ID, 0, queues, 
priorities, 1);
+   TEST_ASSERT(rc == 1, "Failed to links");
+   TEST_ASSERT(queues[0] == 1, "Invalid queue found in link");
+
+   rc = rte_event_dev_start(TEST_DEV_ID);
+   TEST_ASSERT_SUCCESS(rc, "Failed to start event device");
+
+   ev.event_type = RTE_EVENT_TYPE_CPU;
+   ev.queue_id = 0;
+   ev.op = RTE_EVENT_OP_NEW;
+   ev.flow_id = 0;
+   ev.u64 = 0xBADF00D0;
+   rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1);
+   TEST_ASSERT(rc == 1, "Failed to enqueue event");
+   ev.queue_id = 1;
+   ev.flow_id = 1;
+   rc = rte_event_enqueue_burst(TEST_DEV_ID, 0, &ev, 1);
+   TEST_ASSERT(rc == 1, "Failed to enqueue event");
+
+   ev.event = 0;
+   ev.u64 = 0;
+
+   rc = rte_event_port_change_profile(TEST_DEV_ID, 0, 1);
+   TEST_ASSERT_SUCCESS(rc, "Failed to change profile");
+
+   re = MAX_RETRIES;
+   while (re--) {
+   rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
+   printf("rc %d\n", rc);
+   if (rc)
+   break;
+   }
+
+   TEST_ASSERT(rc == 1, "Failed to dequeue event from profile 1");
+   TEST_ASSERT(ev.flow_id == 1, "Incorrect flow identifier from profile 
1");
+   TEST_ASSERT(ev.queue_id == 1, "Incorrect queue identifier from profile 
1");
+
+   re = MAX_RETRIES;
+   while (re--) {
+   rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
+   TEST_ASSERT(rc == 0, "Unexpected event dequeued from active 
profile");
+   }
+
+   rc = rte_event_port_change_profile(TEST_DEV_ID, 0, 0);
+   TEST_ASSERT_SUCCESS(rc, "Failed to change profile");
+
+   re = MAX_RETRIES;
+   while (re--) {
+   rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
+   if (rc)
+   break;
+   }
+
+   TEST_ASSERT(rc == 1, "Failed to dequeue event from profile 1");
+   TEST_ASSERT(ev.flow_id == 0, "Incorrect flow identifier from profile 
0");
+   TEST_ASSERT(ev.queue_id == 0, "Incorrect queue identifier from profile 
0");
+
+   re = MAX_RETRIES;
+   while (re--) {
+   rc = rte_event_dequeue_burst(TEST_DEV_ID, 0, &ev, 1, 0);
+   TEST_ASSERT(rc == 0, "Unexpected event dequeued from active 
profile");
+   }
+
+   return TEST_SUCCESS;
+}
+
 static int
 test_eventdev_close(void)
 {
@@ -1187,6 +1295,8 @@ static struct unit_test_suite eventdev_common_testsuite  
= {
test_eventdev_timeout_ticks),
TEST_CASE_ST(NULL, NULL,
test_eventdev_start_stop),
+   TEST_CASE_ST(eventdev_configure_setup, eventdev_stop_device,
+   test_eventdev_change_profile),
 

RE: [RFC PATCH] dmadev: offload to free source buffer

2023-08-09 Thread Amit Prakash Shukla
Hi Morten,

Please find my reply in-line.

Thanks,
Amit Shukla

> -Original Message-
> From: Morten Brørup 
> Sent: Wednesday, August 9, 2023 2:37 PM
> To: Amit Prakash Shukla ; Chengwen Feng
> ; Kevin Laatz ; Bruce
> Richardson 
> Cc: dev@dpdk.org; Jerin Jacob Kollanukkaran ;
> conor.wa...@intel.com; Vamsi Krishna Attunuru ;
> g.si...@nxp.com; sachin.sax...@oss.nxp.com; hemant.agra...@nxp.com;
> cheng1.ji...@intel.com; Nithin Kumar Dabilpuram
> ; Anoob Joseph 
> Subject: [EXT] RE: [RFC PATCH] dmadev: offload to free source buffer
> 
> External Email
> 
> --
> > From: Amit Prakash Shukla [mailto:amitpraka...@marvell.com]
> > Sent: Wednesday, 9 August 2023 08.09
> >
> > This changeset adds support in DMA library to free source DMA buffer
> > by hardware. On a supported hardware, application can pass on the
> > mempool information as part of vchan config when the DMA transfer
> > direction is configured as RTE_DMA_DIR_MEM_TO_DEV.
> 
> Isn't the DMA source buffer a memory area, and what needs to be freed is
> the mbuf holding the memory area, i.e. two different pointers?
No, it is same pointer. Assume mbuf created via mempool, mempool needs to be 
given via vchan config and iova passed to rte_dma_copy/rte_dma_copy_sg's can be 
any address in mbuf area of given mempool element.
For example, mempool element size is S. dequeued buff from mempool is at X. Any 
address in (X, X+S) can be given as iova to rte_dma_copy.

> 
> I like the concept. Something similar might also be useful for
> RTE_DMA_DIR_MEM_TO_MEM, e.g. packet capture. Although such a use
> case might require decrementing the mbuf refcount instead of freeing the
> mbuf directly to the mempool.
This operation is not supported in our hardware. It can be implemented in 
future if any hardware supports it.

> 
> PS: It has been a while since I looked at the DMA library, so ignore my
> comments if I got this wrong.



RE: [RFC PATCH] dmadev: offload to free source buffer

2023-08-09 Thread Morten Brørup
> From: Amit Prakash Shukla [mailto:amitpraka...@marvell.com]
> Sent: Wednesday, 9 August 2023 16.27
> 
> > From: Morten Brørup 
> > Sent: Wednesday, August 9, 2023 2:37 PM
> >
> > > From: Amit Prakash Shukla [mailto:amitpraka...@marvell.com]
> > > Sent: Wednesday, 9 August 2023 08.09
> > >
> > > This changeset adds support in DMA library to free source DMA buffer
> > > by hardware. On a supported hardware, application can pass on the
> > > mempool information as part of vchan config when the DMA transfer
> > > direction is configured as RTE_DMA_DIR_MEM_TO_DEV.
> >
> > Isn't the DMA source buffer a memory area, and what needs to be freed
> is
> > the mbuf holding the memory area, i.e. two different pointers?
> No, it is same pointer. Assume mbuf created via mempool, mempool needs to
> be given via vchan config and iova passed to
> rte_dma_copy/rte_dma_copy_sg's can be any address in mbuf area of given
> mempool element.
> For example, mempool element size is S. dequeued buff from mempool is at
> X. Any address in (X, X+S) can be given as iova to rte_dma_copy.

So the DMA library determines the pointer to the mbuf (in the given mempool) by 
looking at the iova passed to rte_dma_copy/rte_dma_copy_sg, and then calls 
rte_mempool_put with that pointer?

> 
> >
> > I like the concept. Something similar might also be useful for
> > RTE_DMA_DIR_MEM_TO_MEM, e.g. packet capture. Although such a use
> > case might require decrementing the mbuf refcount instead of freeing
> the
> > mbuf directly to the mempool.
> This operation is not supported in our hardware. It can be implemented in
> future if any hardware supports it.

OK, I didn't expect that - just floating the idea. :-)

> 
> >
> > PS: It has been a while since I looked at the DMA library, so ignore my
> > comments if I got this wrong.



Re: [RFC PATCH 0/5] replace build code for unit tests

2023-08-09 Thread Patrick Robb
It will break our 32 bit arm testing because we are unable to use meson
test after building for that SoC, so we run through unit tests with a
custom list using just dpdk test. It's not a problem, I just need to know
when this is going to reach main so I can make the corresponding change to
the 32 bit unit testing script beforehand.


Re: [RFC PATCH 0/5] replace build code for unit tests

2023-08-09 Thread Bruce Richardson
On Wed, Aug 09, 2023 at 10:53:21AM -0400, Patrick Robb wrote:
>It will break our 32 bit arm testing because we are unable to use meson
>test after building for that SoC, so we run through unit tests with a
>custom list using just dpdk test. It's not a problem, I just need to
>know when this is going to reach main so I can make the corresponding
>change to the 32 bit unit testing script beforehand.

The change is necessary because the test binary has moved? Just want to
check there are no other issues. I might see if I can provide a symlink or
something for compatibility if it helps.


Re: [PATCH 00/20] remove experimental flag from some API's

2023-08-09 Thread Stephen Hemminger
On Tue, 8 Aug 2023 16:23:43 -0700
Tyler Retzlaff  wrote:

> > 
> > bpf: not built on Windows. Needs some libelf.
> > pdump: not built on Windows. Needs bpf for filtering

A different topic, is it possible to get pdump working on Windows?
Is there a pcap and elf library?  

Might be possible to split out libelf dependency in bpf library.
Libelf is used to load external file, but some uses just use internal data.


[PATCH v3] doc: update QAT cryptodev guide to run on aarch64

2023-08-09 Thread Dharmik Thakkar
Update guide with instructions to run on Aarch64 based Ampere Altra
platform

Signed-off-by: Dharmik Thakkar 
Reviewed-by: Ruifeng Wang 
---
v3:
 - Fix duplicate target warning by using anonymous reference
v2:
 - Update driver guide with instructions to run on aarch64 instead
 ---
 .mailmap  | 2 +-
 doc/guides/cryptodevs/qat.rst | 6 ++
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/.mailmap b/.mailmap
index 864d33ee46fb..bb79fd4673ef 100644
--- a/.mailmap
+++ b/.mailmap
@@ -319,7 +319,7 @@ Devendra Singh Rawat 
 Dex Chen 
 Dexia Li 
 Dexuan Cui 
-Dharmik Thakkar 
+Dharmik Thakkar 
 Dheemanth Mallikarjun 
 Diana Wang 
 Didier Pallard 
diff --git a/doc/guides/cryptodevs/qat.rst b/doc/guides/cryptodevs/qat.rst
index afdfb0bd226b..9557b49be308 100644
--- a/doc/guides/cryptodevs/qat.rst
+++ b/doc/guides/cryptodevs/qat.rst
@@ -384,6 +384,12 @@ The "rte_cryptodev_devices_get()" returns the devices 
exposed by either of these
   _, e.g. ":41:01.0_qat_comp".
   This name can be passed to rte_compressdev_get_dev_id() to get the device_id.
 
+
+Running QAT on Aarch64 based Ampere Altra platform
+~~
+
+Requires Linux kernel v6.0+. See also `this patch description 
`__.
+
 .. _qat_kernel:
 
 Dependency on the QAT kernel driver
-- 
2.25.1



Re: [RFC PATCH 0/5] replace build code for unit tests

2023-08-09 Thread Patrick Robb
On Wed, Aug 9, 2023 at 11:05 AM Bruce Richardson 
wrote:

> On Wed, Aug 09, 2023 at 10:53:21AM -0400, Patrick Robb wrote:
> >It will break our 32 bit arm testing because we are unable to use
> meson
> >test after building for that SoC, so we run through unit tests with a
> >custom list using just dpdk test. It's not a problem, I just need to
> >know when this is going to reach main so I can make the corresponding
> >change to the 32 bit unit testing script beforehand.
>
> The change is necessary because the test binary has moved? Just want to
> check there are no other issues. I might see if I can provide a symlink or
> something for compatibility if it helps.
>

That's correct, there are no other issues, so it really is simple to
correct the patch in our script. A symlink should work, but I guess it
becomes clutter.


[PATCH] net/bonding: Fix header for C++

2023-08-09 Thread Visa Hankala
Apply C linkage to the whole header to allow use with C++.

Fixes: dc40f17a36b ("net/bonding: allow external state machine in mode 4")

Signed-off-by: Visa Hankala 

diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h 
b/drivers/net/bonding/rte_eth_bond_8023ad.h
index 7ad8d6d00b..921b4446b7 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -197,10 +197,6 @@ int
 rte_eth_bond_8023ad_slave_info(uint16_t port_id, uint16_t slave_id,
struct rte_eth_bond_8023ad_slave_info *conf);
 
-#ifdef __cplusplus
-}
-#endif
-
 /**
  * Configure a slave port to start collecting.
  *
@@ -331,4 +327,9 @@ rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
 int
 rte_eth_bond_8023ad_agg_selection_set(uint16_t port_id,
enum rte_bond_8023ad_agg_selection agg_selection);
+
+#ifdef __cplusplus
+}
+#endif
+
 #endif /* RTE_ETH_BOND_8023AD_H_ */


[PATCH 00/15] eal: mark older API's stable

2023-08-09 Thread Stephen Hemminger
About 80 function in EAL were marked experimental
and should have been made stable by now.

Stephen Hemminger (15):
  eal: make bitops a stable API
  eal: mark rte_dev API's as stable
  eal: make rte_class API's stable
  eal: make rte_version_XXX API's stable
  eal: make rte_drand a stable API
  eal: make rte_service_lcore_may_be_active stable
  eal: make rte_devargs_reset stable
  eal: make pflock API stable
  eal: make seqcount and seqlock stable
  eal: mark rte_intr_XXX API's as stable
  eal: mark rte_atomic128_cmp_exchange as stable
  eal: make most rte_thread API's stable
  eal: mark rte_power API's stable
  eal: mark rte_eal_vfio_get_token stable
  eal: mark rte_vect simd bandwidth API as stable

 lib/eal/arm/include/rte_atomic_64.h   |   1 -
 lib/eal/include/generic/rte_atomic.h  |   1 -
 .../include/generic/rte_power_intrinsics.h|  16 ---
 lib/eal/include/generic/rte_vect.h|   8 --
 lib/eal/include/rte_bitmap.h  |   8 --
 lib/eal/include/rte_bitops.h  |  40 --
 lib/eal/include/rte_class.h   |   4 -
 lib/eal/include/rte_dev.h |  32 -
 lib/eal/include/rte_devargs.h |   1 -
 lib/eal/include/rte_eal.h |   4 -
 lib/eal/include/rte_interrupts.h  |  28 
 lib/eal/include/rte_pflock.h  |  20 ---
 lib/eal/include/rte_random.h  |   4 -
 lib/eal/include/rte_seqcount.h|  23 ---
 lib/eal/include/rte_seqlock.h |  21 ---
 lib/eal/include/rte_service.h |   1 -
 lib/eal/include/rte_thread.h  |  57 
 lib/eal/include/rte_version.h |   6 -
 lib/eal/version.map   | 132 --
 lib/eal/x86/include/rte_atomic_64.h   |   1 -
 20 files changed, 55 insertions(+), 353 deletions(-)

-- 
2.39.2



[PATCH 01/15] eal: make bitops a stable API

2023-08-09 Thread Stephen Hemminger
These were added in 20.05 release.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_bitmap.h |  8 
 lib/eal/include/rte_bitops.h | 40 
 2 files changed, 48 deletions(-)

diff --git a/lib/eal/include/rte_bitmap.h b/lib/eal/include/rte_bitmap.h
index 46a822768d50..ec819595624c 100644
--- a/lib/eal/include/rte_bitmap.h
+++ b/lib/eal/include/rte_bitmap.h
@@ -203,9 +203,6 @@ rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t 
mem_size)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Bitmap clear slab overhead bits.
  *
  * @param slabs
@@ -215,7 +212,6 @@ rte_bitmap_init(uint32_t n_bits, uint8_t *mem, uint32_t 
mem_size)
  * @param pos
  *   The start bit position in the slabs to be cleared.
  */
-__rte_experimental
 static inline void
 __rte_bitmap_clear_slab_overhead_bits(uint64_t *slabs, uint32_t slab_size,
  uint32_t pos)
@@ -235,9 +231,6 @@ __rte_bitmap_clear_slab_overhead_bits(uint64_t *slabs, 
uint32_t slab_size,
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Bitmap initialization with all bits set
  *
  * @param n_bits
@@ -249,7 +242,6 @@ __rte_bitmap_clear_slab_overhead_bits(uint64_t *slabs, 
uint32_t slab_size,
  * @return
  *   Handle to bitmap instance.
  */
-__rte_experimental
 static inline struct rte_bitmap *
 rte_bitmap_init_with_all_set(uint32_t n_bits, uint8_t *mem, uint32_t mem_size)
 {
diff --git a/lib/eal/include/rte_bitops.h b/lib/eal/include/rte_bitops.h
index f50dbe43880c..41b1878841fc 100644
--- a/lib/eal/include/rte_bitops.h
+++ b/lib/eal/include/rte_bitops.h
@@ -40,9 +40,6 @@ extern "C" {
 /* 32-bit relaxed operations */
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Get the target bit from a 32-bit value without memory ordering.
  *
  * @param nr
@@ -52,7 +49,6 @@ extern "C" {
  * @return
  *   The target bit.
  */
-__rte_experimental
 static inline uint32_t
 rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t *addr)
 {
@@ -63,9 +59,6 @@ rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t 
*addr)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Set the target bit in a 32-bit value to 1 without memory ordering.
  *
  * @param nr
@@ -73,7 +66,6 @@ rte_bit_relaxed_get32(unsigned int nr, volatile uint32_t 
*addr)
  * @param addr
  *   The address holding the bit.
  */
-__rte_experimental
 static inline void
 rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t *addr)
 {
@@ -84,9 +76,6 @@ rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t 
*addr)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Clear the target bit in a 32-bit value to 0 without memory ordering.
  *
  * @param nr
@@ -94,7 +83,6 @@ rte_bit_relaxed_set32(unsigned int nr, volatile uint32_t 
*addr)
  * @param addr
  *   The address holding the bit.
  */
-__rte_experimental
 static inline void
 rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t *addr)
 {
@@ -105,9 +93,6 @@ rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t 
*addr)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Return the original bit from a 32-bit value, then set it to 1 without
  * memory ordering.
  *
@@ -118,7 +103,6 @@ rte_bit_relaxed_clear32(unsigned int nr, volatile uint32_t 
*addr)
  * @return
  *   The original bit.
  */
-__rte_experimental
 static inline uint32_t
 rte_bit_relaxed_test_and_set32(unsigned int nr, volatile uint32_t *addr)
 {
@@ -131,9 +115,6 @@ rte_bit_relaxed_test_and_set32(unsigned int nr, volatile 
uint32_t *addr)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Return the original bit from a 32-bit value, then clear it to 0 without
  * memory ordering.
  *
@@ -144,7 +125,6 @@ rte_bit_relaxed_test_and_set32(unsigned int nr, volatile 
uint32_t *addr)
  * @return
  *   The original bit.
  */
-__rte_experimental
 static inline uint32_t
 rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile uint32_t *addr)
 {
@@ -159,9 +139,6 @@ rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile 
uint32_t *addr)
 /* 64-bit relaxed operations */
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Get the target bit from a 64-bit value without memory ordering.
  *
  * @param nr
@@ -171,7 +148,6 @@ rte_bit_relaxed_test_and_clear32(unsigned int nr, volatile 
uint32_t *addr)
  * @return
  *   The target bit.
  */
-__rte_experimental
 static inline uint64_t
 rte_bit_relaxed_get64(unsigned int nr, volatile uint64_t *addr)
 {
@@ -182,9 +158,6 @@ rte_bit_relaxed_get64(unsigned int nr, vol

[PATCH 02/15] eal: mark rte_dev API's as stable

2023-08-09 Thread Stephen Hemminger
These have been around since 2020.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_dev.h | 32 
 lib/eal/version.map   | 28 +++-
 2 files changed, 11 insertions(+), 49 deletions(-)

diff --git a/lib/eal/include/rte_dev.h b/lib/eal/include/rte_dev.h
index 8568535ac04b..86ef2f54235d 100644
--- a/lib/eal/include/rte_dev.h
+++ b/lib/eal/include/rte_dev.h
@@ -341,7 +341,6 @@ typedef void *(*rte_dev_iterate_t)(const void *start,
  *   0 on successful initialization.
  *   <0 on error.
  */
-__rte_experimental
 int
 rte_dev_iterator_init(struct rte_dev_iterator *it, const char *str);
 
@@ -361,7 +360,6 @@ rte_dev_iterator_init(struct rte_dev_iterator *it, const 
char *str);
  *   NULL if an error occurred (rte_errno is set).
  *   NULL if no device could be found (rte_errno is not set).
  */
-__rte_experimental
 struct rte_device *
 rte_dev_iterator_next(struct rte_dev_iterator *it);
 
@@ -372,9 +370,6 @@ rte_dev_iterator_next(struct rte_dev_iterator *it);
 dev = rte_dev_iterator_next(it))
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * It registers the callback for the specific device.
  * Multiple callbacks can be registered at the same time.
  *
@@ -390,16 +385,12 @@ rte_dev_iterator_next(struct rte_dev_iterator *it);
  *  - On success, zero.
  *  - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_dev_event_callback_register(const char *device_name,
rte_dev_event_cb_fn cb_fn,
void *cb_arg);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * It unregisters the callback according to the specified device.
  *
  * @param device_name
@@ -415,16 +406,12 @@ rte_dev_event_callback_register(const char *device_name,
  *  - On success, return the number of callback entities removed.
  *  - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_dev_event_callback_unregister(const char *device_name,
  rte_dev_event_cb_fn cb_fn,
  void *cb_arg);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Executes all the user application registered callbacks for
  * the specific device.
  *
@@ -433,64 +420,47 @@ rte_dev_event_callback_unregister(const char *device_name,
  * @param event
  *  the device event type.
  */
-__rte_experimental
 void
 rte_dev_event_callback_process(const char *device_name,
   enum rte_dev_event_type event);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Start the device event monitoring.
  *
  * @return
  *   - On success, zero.
  *   - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_dev_event_monitor_start(void);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Stop the device event monitoring.
  *
  * @return
  *   - On success, zero.
  *   - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_dev_event_monitor_stop(void);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Enable hotplug handling for devices.
  *
  * @return
  *   - On success, zero.
  *   - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_dev_hotplug_handle_enable(void);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Disable hotplug handling for devices.
  *
  * @return
  *   - On success, zero.
  *   - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_dev_hotplug_handle_disable(void);
 
@@ -514,7 +484,6 @@ rte_dev_hotplug_handle_disable(void);
  * 0 if mapping was successful.
  * Negative value and rte_errno is set otherwise.
  */
-__rte_experimental
 int
 rte_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, size_t len);
 
@@ -538,7 +507,6 @@ rte_dev_dma_map(struct rte_device *dev, void *addr, 
uint64_t iova, size_t len);
  * 0 if un-mapping was successful.
  * Negative value and rte_errno is set otherwise.
  */
-__rte_experimental
 int
 rte_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova,
  size_t len);
diff --git a/lib/eal/version.map b/lib/eal/version.map
index bdb98cf47993..3df6c4163276 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -29,8 +29,19 @@ DPDK_24 {
rte_dev_bus;
rte_dev_bus_info;
rte_dev_devargs;
+   rte_dev_dma_map;
+   rte_dev_dma_unmap;
rte_dev_driver;
+   rte_dev_event_callback_process;
+   rte_dev_event_callback_register;
+   rte_dev_event_callback_unregister;
+   rte_dev_event_monitor_start;
+   rte_dev_event_monitor_stop;
+   rte_dev_hotplug_handle_disable;
+   rte_dev_hotplug_handle_enable;
rte_dev_is_probed;
+   rte_dev_iterator_init;
+   rte_dev_iterator

[PATCH 03/15] eal: make rte_class API's stable

2023-08-09 Thread Stephen Hemminger
These API's have been around for a while.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_class.h |  4 
 lib/eal/version.map | 10 --
 2 files changed, 4 insertions(+), 10 deletions(-)

diff --git a/lib/eal/include/rte_class.h b/lib/eal/include/rte_class.h
index 47b1764e7bf9..487d1abcde72 100644
--- a/lib/eal/include/rte_class.h
+++ b/lib/eal/include/rte_class.h
@@ -76,7 +76,6 @@ typedef int (*rte_class_cmp_t)(const struct rte_class *cls, 
const void *data);
  * @return
  *  A pointer to a rte_class structure or NULL in case no class matches
  */
-__rte_experimental
 struct rte_class *
 rte_class_find(const struct rte_class *start, rte_class_cmp_t cmp,
   const void *data);
@@ -84,7 +83,6 @@ rte_class_find(const struct rte_class *start, rte_class_cmp_t 
cmp,
 /**
  * Find the registered class for a given name.
  */
-__rte_experimental
 struct rte_class *
 rte_class_find_by_name(const char *name);
 
@@ -95,7 +93,6 @@ rte_class_find_by_name(const char *name);
  *   A pointer to a rte_class structure describing the class
  *   to be registered.
  */
-__rte_experimental
 void rte_class_register(struct rte_class *cls);
 
 /**
@@ -105,7 +102,6 @@ void rte_class_register(struct rte_class *cls);
  *   A pointer to a rte_class structure describing the class
  *   to be unregistered.
  */
-__rte_experimental
 void rte_class_unregister(struct rte_class *cls);
 
 /**
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 3df6c4163276..2f00f6a97989 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -17,6 +17,10 @@ DPDK_24 {
rte_bus_scan;
rte_calloc;
rte_calloc_socket;
+   rte_class_find;
+   rte_class_find_by_name;
+   rte_class_register;
+   rte_class_unregister;
rte_cpu_get_flag_enabled;
rte_cpu_get_flag_name;
rte_cpu_is_supported; # WINDOWS_NO_EXPORT
@@ -320,12 +324,6 @@ DPDK_24 {
 EXPERIMENTAL {
global:
 
-   # added in 18.08
-   rte_class_find;
-   rte_class_find_by_name;
-   rte_class_register;
-   rte_class_unregister;
-
# added in 20.05
__rte_eal_trace_generic_double;
__rte_eal_trace_generic_float;
-- 
2.39.2



[PATCH 04/15] eal: make rte_version_XXX API's stable

2023-08-09 Thread Stephen Hemminger
The subparts of rte_version were added in 2020 and
can now be marked stable.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_version.h |  6 --
 lib/eal/version.map   | 12 ++--
 2 files changed, 6 insertions(+), 12 deletions(-)

diff --git a/lib/eal/include/rte_version.h b/lib/eal/include/rte_version.h
index 414b6167f286..121d75bdbe28 100644
--- a/lib/eal/include/rte_version.h
+++ b/lib/eal/include/rte_version.h
@@ -35,37 +35,31 @@ extern "C" {
 /**
  * Function to return DPDK version prefix string
  */
-__rte_experimental
 const char *rte_version_prefix(void);
 
 /**
  * Function to return DPDK version year
  */
-__rte_experimental
 unsigned int rte_version_year(void);
 
 /**
  * Function to return DPDK version month
  */
-__rte_experimental
 unsigned int rte_version_month(void);
 
 /**
  * Function to return DPDK minor version number
  */
-__rte_experimental
 unsigned int rte_version_minor(void);
 
 /**
  * Function to return DPDK version suffix for any release candidates
  */
-__rte_experimental
 const char *rte_version_suffix(void);
 
 /**
  * Function to return DPDK version release candidate value
  */
-__rte_experimental
 unsigned int rte_version_release(void);
 
 /**
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 2f00f6a97989..e6d2fda95770 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -299,6 +299,12 @@ DPDK_24 {
rte_uuid_parse;
rte_uuid_unparse;
rte_version;
+   rte_version_minor;
+   rte_version_month;
+   rte_version_prefix;
+   rte_version_release;
+   rte_version_suffix;
+   rte_version_year;
rte_vfio_clear_group; # WINDOWS_NO_EXPORT
rte_vfio_container_create; # WINDOWS_NO_EXPORT
rte_vfio_container_destroy; # WINDOWS_NO_EXPORT
@@ -381,12 +387,6 @@ EXPERIMENTAL {
rte_thread_key_delete;
rte_thread_value_get;
rte_thread_value_set;
-   rte_version_minor;
-   rte_version_month;
-   rte_version_prefix;
-   rte_version_release;
-   rte_version_suffix;
-   rte_version_year;
 
# added in 21.08
rte_power_monitor_multi; # WINDOWS_NO_EXPORT
-- 
2.39.2



[PATCH 05/15] eal: make rte_drand a stable API

2023-08-09 Thread Stephen Hemminger
This API was added in 2020.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_random.h | 4 
 1 file changed, 4 deletions(-)

diff --git a/lib/eal/include/rte_random.h b/lib/eal/include/rte_random.h
index 2edf5d210b45..da8139cb10ad 100644
--- a/lib/eal/include/rte_random.h
+++ b/lib/eal/include/rte_random.h
@@ -68,9 +68,6 @@ uint64_t
 rte_rand_max(uint64_t upper_bound);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Generates a pseudo-random floating point number.
  *
  * This function returns a non-negative double-precision floating random
@@ -84,7 +81,6 @@ rte_rand_max(uint64_t upper_bound);
  * @return
  *   A pseudo-random value between 0 and 1.0.
  */
-__rte_experimental
 double rte_drand(void);
 
 #ifdef __cplusplus
-- 
2.39.2



[PATCH 06/15] eal: make rte_service_lcore_may_be_active stable

2023-08-09 Thread Stephen Hemminger
This API was added in 2020.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_service.h | 1 -
 lib/eal/version.map   | 4 ++--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/lib/eal/include/rte_service.h b/lib/eal/include/rte_service.h
index a77f33be488c..da5e246f7abf 100644
--- a/lib/eal/include/rte_service.h
+++ b/lib/eal/include/rte_service.h
@@ -275,7 +275,6 @@ int32_t rte_service_lcore_stop(uint32_t lcore_id);
  * @retval 1 Service thread is in the service core polling loop.
  * @retval -EINVAL Invalid *lcore_id* provided.
  */
-__rte_experimental
 int32_t rte_service_lcore_may_be_active(uint32_t lcore_id);
 
 /**
diff --git a/lib/eal/version.map b/lib/eal/version.map
index e6d2fda95770..2e50d6857d26 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -58,6 +58,7 @@ DPDK_24 {
rte_devargs_parsef;
rte_devargs_remove;
rte_devargs_type_count;
+   rte_drand;
rte_driver_name;
rte_dump_physmem_layout;
rte_dump_stack;
@@ -265,6 +266,7 @@ DPDK_24 {
rte_service_lcore_count_services;
rte_service_lcore_del;
rte_service_lcore_list;
+   rte_service_lcore_may_be_active;
rte_service_lcore_reset_all;
rte_service_lcore_start;
rte_service_lcore_stop;
@@ -371,7 +373,6 @@ EXPERIMENTAL {
# added in 20.11
__rte_eal_trace_generic_size_t; # WINDOWS_NO_EXPORT
rte_cpu_get_intrinsics_support; # WINDOWS_NO_EXPORT
-   rte_service_lcore_may_be_active;
rte_vect_get_max_simd_bitwidth;
rte_vect_set_max_simd_bitwidth;
 
@@ -400,7 +401,6 @@ EXPERIMENTAL {
rte_intr_type_set;
 
# added in 22.07
-   rte_drand;
rte_thread_get_affinity_by_id;
rte_thread_get_priority;
rte_thread_self;
-- 
2.39.2



[PATCH 07/15] eal: make rte_devargs_reset stable

2023-08-09 Thread Stephen Hemminger
Was added in 20.05 release.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_devargs.h | 1 -
 lib/eal/version.map   | 2 +-
 2 files changed, 1 insertion(+), 2 deletions(-)

diff --git a/lib/eal/include/rte_devargs.h b/lib/eal/include/rte_devargs.h
index 38dee2f2880c..2a5860353d93 100644
--- a/lib/eal/include/rte_devargs.h
+++ b/lib/eal/include/rte_devargs.h
@@ -169,7 +169,6 @@ __rte_format_printf(2, 0);
  * @param da
  *   The devargs structure holding the device information.
  */
-__rte_experimental
 void
 rte_devargs_reset(struct rte_devargs *da);
 
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 2e50d6857d26..200824a9e91f 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -57,6 +57,7 @@ DPDK_24 {
rte_devargs_parse;
rte_devargs_parsef;
rte_devargs_remove;
+   rte_devargs_reset;
rte_devargs_type_count;
rte_drand;
rte_driver_name;
@@ -382,7 +383,6 @@ EXPERIMENTAL {
rte_power_pause; # WINDOWS_NO_EXPORT
 
# added in 21.05
-   rte_devargs_reset;
rte_intr_callback_unregister_sync;
rte_thread_key_create;
rte_thread_key_delete;
-- 
2.39.2



[PATCH 08/15] eal: make pflock API stable

2023-08-09 Thread Stephen Hemminger
Added in 21.11 release.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_pflock.h | 20 
 1 file changed, 20 deletions(-)

diff --git a/lib/eal/include/rte_pflock.h b/lib/eal/include/rte_pflock.h
index a3f7291fa1f0..a751a7d8add9 100644
--- a/lib/eal/include/rte_pflock.h
+++ b/lib/eal/include/rte_pflock.h
@@ -79,15 +79,11 @@ typedef struct rte_pflock rte_pflock_t;
 #define RTE_PFLOCK_INITIALIZER {  }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Initialize the pflock to an unlocked state.
  *
  * @param pf
  *   A pointer to the pflock.
  */
-__rte_experimental
 static inline void
 rte_pflock_init(struct rte_pflock *pf)
 {
@@ -98,15 +94,11 @@ rte_pflock_init(struct rte_pflock *pf)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Take a pflock for read.
  *
  * @param pf
  *   A pointer to a pflock structure.
  */
-__rte_experimental
 static inline void
 rte_pflock_read_lock(rte_pflock_t *pf)
 {
@@ -127,15 +119,11 @@ rte_pflock_read_lock(rte_pflock_t *pf)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Release a pflock locked for reading.
  *
  * @param pf
  *   A pointer to the pflock structure.
  */
-__rte_experimental
 static inline void
 rte_pflock_read_unlock(rte_pflock_t *pf)
 {
@@ -143,15 +131,11 @@ rte_pflock_read_unlock(rte_pflock_t *pf)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Take the pflock for write.
  *
  * @param pf
  *   A pointer to the pflock structure.
  */
-__rte_experimental
 static inline void
 rte_pflock_write_lock(rte_pflock_t *pf)
 {
@@ -179,15 +163,11 @@ rte_pflock_write_lock(rte_pflock_t *pf)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Release a pflock held for writing.
  *
  * @param pf
  *   A pointer to a pflock structure.
  */
-__rte_experimental
 static inline void
 rte_pflock_write_unlock(rte_pflock_t *pf)
 {
-- 
2.39.2



[PATCH 09/15] eal: make seqcount and seqlock stable

2023-08-09 Thread Stephen Hemminger
These were add back in 22.07 release.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_seqcount.h | 23 ---
 lib/eal/include/rte_seqlock.h  | 21 -
 2 files changed, 44 deletions(-)

diff --git a/lib/eal/include/rte_seqcount.h b/lib/eal/include/rte_seqcount.h
index ff62708e1b7b..6390a5a72f7c 100644
--- a/lib/eal/include/rte_seqcount.h
+++ b/lib/eal/include/rte_seqcount.h
@@ -40,15 +40,11 @@ typedef struct {
 #define RTE_SEQCOUNT_INITIALIZER { .sn = 0 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Initialize the sequence counter.
  *
  * @param seqcount
  *   A pointer to the sequence counter.
  */
-__rte_experimental
 static inline void
 rte_seqcount_init(rte_seqcount_t *seqcount)
 {
@@ -56,9 +52,6 @@ rte_seqcount_init(rte_seqcount_t *seqcount)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Begin a read-side critical section.
  *
  * A call to this function marks the beginning of a read-side critical
@@ -100,8 +93,6 @@ rte_seqcount_init(rte_seqcount_t *seqcount)
  *
  * @see rte_seqcount_read_retry()
  */
-
-__rte_experimental
 static inline uint32_t
 rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
 {
@@ -113,9 +104,6 @@ rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * End a read-side critical section.
  *
  * A call to this function marks the end of a read-side critical
@@ -145,8 +133,6 @@ rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
  *
  * @see rte_seqcount_read_begin()
  */
-
-__rte_experimental
 static inline bool
 rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
 {
@@ -171,9 +157,6 @@ rte_seqcount_read_retry(const rte_seqcount_t *seqcount, 
uint32_t begin_sn)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Begin a write-side critical section.
  *
  * A call to this function marks the beginning of a write-side
@@ -195,8 +178,6 @@ rte_seqcount_read_retry(const rte_seqcount_t *seqcount, 
uint32_t begin_sn)
  *
  * @see rte_seqcount_write_end()
  */
-
-__rte_experimental
 static inline void
 rte_seqcount_write_begin(rte_seqcount_t *seqcount)
 {
@@ -213,9 +194,6 @@ rte_seqcount_write_begin(rte_seqcount_t *seqcount)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * End a write-side critical section.
  *
  * A call to this function marks the end of the write-side critical
@@ -227,7 +205,6 @@ rte_seqcount_write_begin(rte_seqcount_t *seqcount)
  *
  * @see rte_seqcount_write_begin()
  */
-__rte_experimental
 static inline void
 rte_seqcount_write_end(rte_seqcount_t *seqcount)
 {
diff --git a/lib/eal/include/rte_seqlock.h b/lib/eal/include/rte_seqlock.h
index fcbb9c586668..589c98188529 100644
--- a/lib/eal/include/rte_seqlock.h
+++ b/lib/eal/include/rte_seqlock.h
@@ -114,9 +114,6 @@ typedef struct {
}
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Initialize the seqlock.
  *
  * This function initializes the seqlock, and leaves the writer-side
@@ -125,7 +122,6 @@ typedef struct {
  * @param seqlock
  *   A pointer to the seqlock.
  */
-__rte_experimental
 static inline void
 rte_seqlock_init(rte_seqlock_t *seqlock)
 {
@@ -134,9 +130,6 @@ rte_seqlock_init(rte_seqlock_t *seqlock)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Begin a read-side critical section.
  *
  * See rte_seqcount_read_retry() for details.
@@ -150,8 +143,6 @@ rte_seqlock_init(rte_seqlock_t *seqlock)
  * @see rte_seqlock_read_retry()
  * @see rte_seqcount_read_retry()
  */
-
-__rte_experimental
 static inline uint32_t
 rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
 {
@@ -159,9 +150,6 @@ rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * End a read-side critical section.
  *
  * See rte_seqcount_read_retry() for details.
@@ -177,7 +165,6 @@ rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
  *
  * @see rte_seqlock_read_begin()
  */
-__rte_experimental
 static inline bool
 rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn)
 {
@@ -185,9 +172,6 @@ rte_seqlock_read_retry(const rte_seqlock_t *seqlock, 
uint32_t begin_sn)
 }
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Begin a write-side critical section.
  *
  * A call to this function acquires the write lock associated @p
@@ -212,7 +196,6 @@ rte_seqlock_read_retry(const rte_seqlock_t *seqlock, 
uint32_t begin_sn)
  *
  * @see rte_seqlock_write_unlock()
  */
-__rte_experimental
 static inline void
 rte_seqlock_write_lock(rte_seqlock_t *seqlock)
__rte_exclusive_lock_function(&seqlock->lock)
@@ -224,9 +207,6 @@ rte_seqlock_write_lock(rte_seqlock_t *seqlo

[PATCH 10/15] eal: mark rte_intr_XXX API's as stable

2023-08-09 Thread Stephen Hemminger
These were added back in 2020.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_interrupts.h | 28 
 lib/eal/version.map  | 14 +++---
 2 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/lib/eal/include/rte_interrupts.h b/lib/eal/include/rte_interrupts.h
index bcafdd58a912..1b9a0b2a78f3 100644
--- a/lib/eal/include/rte_interrupts.h
+++ b/lib/eal/include/rte_interrupts.h
@@ -127,9 +127,6 @@ rte_intr_callback_unregister_pending(const struct 
rte_intr_handle *intr_handle,
rte_intr_unregister_callback_fn ucb_fn);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Loop until rte_intr_callback_unregister() succeeds.
  * After a call to this function,
  * the callback provided by the specified interrupt handle is unregistered.
@@ -146,7 +143,6 @@ rte_intr_callback_unregister_pending(const struct 
rte_intr_handle *intr_handle,
  *  - On success, return the number of callback entities removed.
  *  - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_intr_callback_unregister_sync(const struct rte_intr_handle *intr_handle,
rte_intr_callback_fn cb, void *cb_arg);
@@ -201,9 +197,6 @@ int rte_intr_ack(const struct rte_intr_handle *intr_handle);
 int rte_thread_is_intr(void);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * It allocates memory for interrupt instance. API takes flag as an argument
  * which define from where memory should be allocated i.e. using DPDK memory
  * management library APIs or normal heap allocation.
@@ -221,28 +214,20 @@ int rte_thread_is_intr(void);
  *  - On success, address of interrupt handle.
  *  - On failure, NULL.
  */
-__rte_experimental
 struct rte_intr_handle *
 rte_intr_instance_alloc(uint32_t flags);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Free the memory allocated for interrupt handle resources.
  *
  * @param intr_handle
  *  Interrupt handle allocated with rte_intr_instance_alloc().
  *  If intr_handle is NULL, no operation is performed.
  */
-__rte_experimental
 void
 rte_intr_instance_free(struct rte_intr_handle *intr_handle);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Set the fd field of interrupt handle with user provided
  * file descriptor.
  *
@@ -255,14 +240,10 @@ rte_intr_instance_free(struct rte_intr_handle 
*intr_handle);
  *  - On success, zero.
  *  - On failure, a negative value and rte_errno is set.
  */
-__rte_experimental
 int
 rte_intr_fd_set(struct rte_intr_handle *intr_handle, int fd);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Returns the fd field of the given interrupt handle instance.
  *
  * @param intr_handle
@@ -272,14 +253,10 @@ rte_intr_fd_set(struct rte_intr_handle *intr_handle, int 
fd);
  *  - On success, fd field.
  *  - On failure, a negative value.
  */
-__rte_experimental
 int
 rte_intr_fd_get(const struct rte_intr_handle *intr_handle);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Set the type field of interrupt handle with user provided
  * interrupt type.
  *
@@ -292,15 +269,11 @@ rte_intr_fd_get(const struct rte_intr_handle 
*intr_handle);
  *  - On success, zero.
  *  - On failure, a negative value and rte_errno is set.
  */
-__rte_experimental
 int
 rte_intr_type_set(struct rte_intr_handle *intr_handle,
  enum rte_intr_handle_type type);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Returns the type field of the given interrupt handle instance.
  *
  * @param intr_handle
@@ -310,7 +283,6 @@ rte_intr_type_set(struct rte_intr_handle *intr_handle,
  *  - On success, interrupt type
  *  - On failure, RTE_INTR_HANDLE_UNKNOWN.
  */
-__rte_experimental
 enum rte_intr_handle_type
 rte_intr_type_get(const struct rte_intr_handle *intr_handle);
 
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 200824a9e91f..d75a7379cbdf 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -138,8 +138,15 @@ DPDK_24 {
rte_intr_callback_register;
rte_intr_callback_unregister;
rte_intr_callback_unregister_pending;
+   rte_intr_callback_unregister_sync;
rte_intr_disable;
rte_intr_enable;
+   rte_intr_fd_get;
+   rte_intr_fd_set;
+   rte_intr_instance_alloc;
+   rte_intr_instance_free;
+   rte_intr_type_get;
+   rte_intr_type_set;
rte_keepalive_create; # WINDOWS_NO_EXPORT
rte_keepalive_dispatch_pings; # WINDOWS_NO_EXPORT
rte_keepalive_mark_alive; # WINDOWS_NO_EXPORT
@@ -383,7 +390,6 @@ EXPERIMENTAL {
rte_power_pause; # WINDOWS_NO_EXPORT
 
# added in 21.05
-   rte_intr_callback_unregister_sync;
rte_thread_key_create;
rte_thread_key_delete;
rte_threa

[PATCH 11/15] eal: mark rte_atomic128_cmp_exchange as stable

2023-08-09 Thread Stephen Hemminger
This has been around since 2021.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/arm/include/rte_atomic_64.h  | 1 -
 lib/eal/include/generic/rte_atomic.h | 1 -
 lib/eal/x86/include/rte_atomic_64.h  | 1 -
 3 files changed, 3 deletions(-)

diff --git a/lib/eal/arm/include/rte_atomic_64.h 
b/lib/eal/arm/include/rte_atomic_64.h
index 604791150765..5005a6dfed76 100644
--- a/lib/eal/arm/include/rte_atomic_64.h
+++ b/lib/eal/arm/include/rte_atomic_64.h
@@ -94,7 +94,6 @@ __ATOMIC128_CAS_OP(__cas_128_acq_rel, "caspal")
 
 #endif
 
-__rte_experimental
 static inline int
 rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp,
const rte_int128_t *src, unsigned int weak, int success,
diff --git a/lib/eal/include/generic/rte_atomic.h 
b/lib/eal/include/generic/rte_atomic.h
index aef44e245548..1a9323dc8ad8 100644
--- a/lib/eal/include/generic/rte_atomic.h
+++ b/lib/eal/include/generic/rte_atomic.h
@@ -1121,7 +1121,6 @@ typedef struct {
  * @return
  *   Non-zero on success; 0 on failure.
  */
-__rte_experimental
 static inline int
 rte_atomic128_cmp_exchange(rte_int128_t *dst,
   rte_int128_t *exp,
diff --git a/lib/eal/x86/include/rte_atomic_64.h 
b/lib/eal/x86/include/rte_atomic_64.h
index 0edee8627224..e968bbf0ce65 100644
--- a/lib/eal/x86/include/rte_atomic_64.h
+++ b/lib/eal/x86/include/rte_atomic_64.h
@@ -182,7 +182,6 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)
 
 /* 128 bit atomic operations 
-*/
 
-__rte_experimental
 static inline int
 rte_atomic128_cmp_exchange(rte_int128_t *dst,
   rte_int128_t *exp,
-- 
2.39.2



[PATCH 12/15] eal: make most rte_thread API's stable

2023-08-09 Thread Stephen Hemminger
All rte_thread API's added before 23.03 release should be
marked as not experimental.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_thread.h | 57 
 lib/eal/version.map  | 41 +++---
 2 files changed, 17 insertions(+), 81 deletions(-)

diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h
index 369e2375f6a0..7da5794fc258 100644
--- a/lib/eal/include/rte_thread.h
+++ b/lib/eal/include/rte_thread.h
@@ -68,9 +68,6 @@ typedef struct {
 typedef struct eal_tls_key *rte_thread_key;
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Create a new thread that will invoke the 'thread_func' routine.
  *
  * @param thread_id
@@ -89,7 +86,6 @@ typedef struct eal_tls_key *rte_thread_key;
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_create(rte_thread_t *thread_id,
const rte_thread_attr_t *thread_attr,
rte_thread_func thread_func, void *arg);
@@ -128,9 +124,6 @@ rte_thread_create_control(rte_thread_t *thread, const char 
*name,
void *arg);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Waits for the thread identified by 'thread_id' to terminate
  *
  * @param thread_id
@@ -143,13 +136,9 @@ rte_thread_create_control(rte_thread_t *thread, const char 
*name,
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Indicate that the return value of the thread is not needed and
  * all thread resources should be release when the thread terminates.
  *
@@ -160,19 +149,14 @@ int rte_thread_join(rte_thread_t thread_id, uint32_t 
*value_ptr);
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_detach(rte_thread_t thread_id);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Get the id of the calling thread.
  *
  * @return
  *   Return the thread id of the calling thread.
  */
-__rte_experimental
 rte_thread_t rte_thread_self(void);
 
 /**
@@ -196,9 +180,6 @@ void
 rte_thread_set_name(rte_thread_t thread_id, const char *thread_name);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Check if 2 thread ids are equal.
  *
  * @param t1
@@ -211,13 +192,9 @@ rte_thread_set_name(rte_thread_t thread_id, const char 
*thread_name);
  *   If the ids are equal, return nonzero.
  *   Otherwise, return 0.
  */
-__rte_experimental
 int rte_thread_equal(rte_thread_t t1, rte_thread_t t2);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Initialize the attributes of a thread.
  * These attributes can be passed to the rte_thread_create() function
  * that will create a new thread and set its attributes according to attr.
@@ -229,13 +206,9 @@ int rte_thread_equal(rte_thread_t t1, rte_thread_t t2);
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_attr_init(rte_thread_attr_t *attr);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Set the thread priority value in the thread attributes pointed to
  * by 'thread_attr'.
  *
@@ -249,16 +222,12 @@ int rte_thread_attr_init(rte_thread_attr_t *attr);
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr,
enum rte_thread_priority priority);
 
 #ifdef RTE_HAS_CPUSET
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Set the CPU affinity value in the thread attributes pointed to
  * by 'thread_attr'.
  *
@@ -272,14 +241,10 @@ int rte_thread_attr_set_priority(rte_thread_attr_t 
*thread_attr,
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr,
rte_cpuset_t *cpuset);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Get the value of CPU affinity that is set in the thread attributes pointed
  * to by 'thread_attr'.
  *
@@ -293,14 +258,10 @@ int rte_thread_attr_set_affinity(rte_thread_attr_t 
*thread_attr,
  *   On success, return 0.
  *   On failure, return a positive errno-style error number.
  */
-__rte_experimental
 int rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr,
rte_cpuset_t *cpuset);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Set the affini

[PATCH 13/15] eal: mark rte_power API's stable

2023-08-09 Thread Stephen Hemminger
These were added back in 2020.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/generic/rte_power_intrinsics.h | 16 
 lib/eal/version.map| 13 -
 2 files changed, 4 insertions(+), 25 deletions(-)

diff --git a/lib/eal/include/generic/rte_power_intrinsics.h 
b/lib/eal/include/generic/rte_power_intrinsics.h
index f981df7d75a1..922c32c83fdb 100644
--- a/lib/eal/include/generic/rte_power_intrinsics.h
+++ b/lib/eal/include/generic/rte_power_intrinsics.h
@@ -54,9 +54,6 @@ struct rte_power_monitor_cond {
 };
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Monitor specific address for changes. This will cause the CPU to enter an
  * architecture-defined optimized power state until either the specified
  * memory address is written to, a certain TSC timestamp is reached, or other
@@ -84,14 +81,10 @@ struct rte_power_monitor_cond {
  *   -EINVAL on invalid parameters
  *   -ENOTSUP if unsupported
  */
-__rte_experimental
 int rte_power_monitor(const struct rte_power_monitor_cond *pmc,
const uint64_t tsc_timestamp);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Wake up a specific lcore that is in a power optimized state and is 
monitoring
  * an address.
  *
@@ -104,13 +97,9 @@ int rte_power_monitor(const struct rte_power_monitor_cond 
*pmc,
  * @param lcore_id
  *   Lcore ID of a sleeping thread.
  */
-__rte_experimental
 int rte_power_monitor_wakeup(const unsigned int lcore_id);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
  * Enter an architecture-defined optimized power state until a certain TSC
  * timestamp is reached.
  *
@@ -126,13 +115,9 @@ int rte_power_monitor_wakeup(const unsigned int lcore_id);
  *   -EINVAL on invalid parameters
  *   -ENOTSUP if unsupported
  */
-__rte_experimental
 int rte_power_pause(const uint64_t tsc_timestamp);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Monitor a set of addresses for changes. This will cause the CPU to enter an
  * architecture-defined optimized power state until either one of the specified
  * memory addresses is written to, a certain TSC timestamp is reached, or other
@@ -160,7 +145,6 @@ int rte_power_pause(const uint64_t tsc_timestamp);
  *   -EINVAL on invalid parameters
  *   -ENOTSUP if unsupported
  */
-__rte_experimental
 int rte_power_monitor_multi(const struct rte_power_monitor_cond pmc[],
const uint32_t num, const uint64_t tsc_timestamp);
 
diff --git a/lib/eal/version.map b/lib/eal/version.map
index a71147aec83f..3b42d6e0e4c1 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -250,6 +250,10 @@ DPDK_24 {
rte_mp_request_sync;
rte_mp_sendmsg;
rte_openlog_stream;
+   rte_power_monitor; # WINDOWS_NO_EXPORT
+   rte_power_monitor_wakeup; # WINDOWS_NO_EXPORT
+   rte_power_pause; # WINDOWS_NO_EXPORT
+   rte_power_monitor_multi; # WINDOWS_NO_EXPORT
rte_rand;
rte_rand_max;
rte_realloc;
@@ -401,15 +405,6 @@ EXPERIMENTAL {
rte_vect_get_max_simd_bitwidth;
rte_vect_set_max_simd_bitwidth;
 
-   # added in 21.02
-   rte_power_monitor; # WINDOWS_NO_EXPORT
-   rte_power_monitor_wakeup; # WINDOWS_NO_EXPORT
-   rte_power_pause; # WINDOWS_NO_EXPORT
-
-
-   # added in 21.08
-   rte_power_monitor_multi; # WINDOWS_NO_EXPORT
-
# added in 23.03
rte_lcore_register_usage_cb;
rte_thread_create_control;
-- 
2.39.2



[PATCH 14/15] eal: mark rte_eal_vfio_get_token stable

2023-08-09 Thread Stephen Hemminger
This API was added in 20.08 release.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/rte_eal.h | 4 
 lib/eal/version.map   | 4 +---
 2 files changed, 1 insertion(+), 7 deletions(-)

diff --git a/lib/eal/include/rte_eal.h b/lib/eal/include/rte_eal.h
index 53c4a5519e61..aac57665e6c2 100644
--- a/lib/eal/include/rte_eal.h
+++ b/lib/eal/include/rte_eal.h
@@ -412,16 +412,12 @@ int rte_eal_create_uio_dev(void);
 enum rte_intr_mode rte_eal_vfio_intr_mode(void);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice
- *
  * Copy the user-configured vfio VF token.
  *
  * @param vf_token
  *   vfio VF token configured with the command line is copied
  *   into this parameter, zero uuid by default.
  */
-__rte_experimental
 void rte_eal_vfio_get_vf_token(rte_uuid_t vf_token);
 
 /**
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 3b42d6e0e4c1..65435ae48696 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -89,6 +89,7 @@ DPDK_24 {
rte_eal_tailq_lookup;
rte_eal_tailq_register;
rte_eal_using_phys_addrs;
+   rte_eal_vfio_get_vf_token; # WINDOWS_NO_EXPORT
rte_eal_vfio_intr_mode; # WINDOWS_NO_EXPORT
rte_eal_wait_lcore;
rte_epoll_ctl;
@@ -396,9 +397,6 @@ EXPERIMENTAL {
rte_trace_regexp; # WINDOWS_NO_EXPORT
rte_trace_save; # WINDOWS_NO_EXPORT
 
-   # added in 20.08
-   rte_eal_vfio_get_vf_token; # WINDOWS_NO_EXPORT
-
# added in 20.11
__rte_eal_trace_generic_size_t; # WINDOWS_NO_EXPORT
rte_cpu_get_intrinsics_support; # WINDOWS_NO_EXPORT
-- 
2.39.2



[PATCH 15/15] eal: mark rte_vect simd bandwidth API as stable

2023-08-09 Thread Stephen Hemminger
These were added back in 20.11.

Signed-off-by: Stephen Hemminger 
---
 lib/eal/include/generic/rte_vect.h | 8 
 lib/eal/version.map| 4 ++--
 2 files changed, 2 insertions(+), 10 deletions(-)

diff --git a/lib/eal/include/generic/rte_vect.h 
b/lib/eal/include/generic/rte_vect.h
index 3fec2bf1a2ec..bf541ce9d928 100644
--- a/lib/eal/include/generic/rte_vect.h
+++ b/lib/eal/include/generic/rte_vect.h
@@ -204,21 +204,14 @@ enum rte_vect_max_simd {
 };
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Get the supported SIMD bitwidth.
  *
  * @return
  *   uint16_t bitwidth.
  */
-__rte_experimental
 uint16_t rte_vect_get_max_simd_bitwidth(void);
 
 /**
- * @warning
- * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
- *
  * Set the supported SIMD bitwidth.
  * This API should only be called once at initialization, before EAL init.
  *
@@ -229,7 +222,6 @@ uint16_t rte_vect_get_max_simd_bitwidth(void);
  *   - -EINVAL on invalid bitwidth parameter.
  *   - -EPERM if bitwidth is forced.
  */
-__rte_experimental
 int rte_vect_set_max_simd_bitwidth(uint16_t bitwidth);
 
 #endif /* _RTE_VECT_H_ */
diff --git a/lib/eal/version.map b/lib/eal/version.map
index 65435ae48696..d354b9966ce9 100644
--- a/lib/eal/version.map
+++ b/lib/eal/version.map
@@ -330,6 +330,8 @@ DPDK_24 {
rte_uuid_is_null;
rte_uuid_parse;
rte_uuid_unparse;
+   rte_vect_get_max_simd_bitwidth;
+   rte_vect_set_max_simd_bitwidth;
rte_version;
rte_version_minor;
rte_version_month;
@@ -400,8 +402,6 @@ EXPERIMENTAL {
# added in 20.11
__rte_eal_trace_generic_size_t; # WINDOWS_NO_EXPORT
rte_cpu_get_intrinsics_support; # WINDOWS_NO_EXPORT
-   rte_vect_get_max_simd_bitwidth;
-   rte_vect_set_max_simd_bitwidth;
 
# added in 23.03
rte_lcore_register_usage_cb;
-- 
2.39.2



Re: [PATCH 00/20] remove experimental flag from some API's

2023-08-09 Thread Dmitry Kozlyuk
2023-08-09 08:34 (UTC-0700), Stephen Hemminger:
> On Tue, 8 Aug 2023 16:23:43 -0700
> Tyler Retzlaff  wrote:
> 
> > > 
> > > bpf: not built on Windows. Needs some libelf.
> > > pdump: not built on Windows. Needs bpf for filtering  
> 
> A different topic, is it possible to get pdump working on Windows?

Unlikely with the current state of DPDK and pdump.
The main issue is multiprocess, which is not implemented.
Windows can share hugepages between processes (MapViewOfFile3)
or map it to a fixed address in the reserved region (VirtualAlloc2),
but not both, this was the blocker AFAIR.

> Is there a pcap and elf library?

net/pcap already uses libpcap.
Looks like there are libelf ports too.

> Might be possible to split out libelf dependency in bpf library.
> Libelf is used to load external file, but some uses just use internal data.

ELF library is an optional dependency already.


Re: [PATCH] net/bonding: Fix header for C++

2023-08-09 Thread Tyler Retzlaff
On Wed, Aug 09, 2023 at 03:52:41PM +, Visa Hankala wrote:
> Apply C linkage to the whole header to allow use with C++.
> 
> Fixes: dc40f17a36b ("net/bonding: allow external state machine in mode 4")
> 
> Signed-off-by: Visa Hankala 
> 

Acked-by: Tyler Retzlaff 



Re: [PATCH 11/15] eal: mark rte_atomic128_cmp_exchange as stable

2023-08-09 Thread Tyler Retzlaff
On Wed, Aug 09, 2023 at 09:43:03AM -0700, Stephen Hemminger wrote:
> This has been around since 2021.
> 
> Signed-off-by: Stephen Hemminger 
> ---
>  lib/eal/arm/include/rte_atomic_64.h  | 1 -
>  lib/eal/include/generic/rte_atomic.h | 1 -
>  lib/eal/x86/include/rte_atomic_64.h  | 1 -
>  3 files changed, 3 deletions(-)
> 
> diff --git a/lib/eal/arm/include/rte_atomic_64.h 
> b/lib/eal/arm/include/rte_atomic_64.h
> index 604791150765..5005a6dfed76 100644
> --- a/lib/eal/arm/include/rte_atomic_64.h
> +++ b/lib/eal/arm/include/rte_atomic_64.h
> @@ -94,7 +94,6 @@ __ATOMIC128_CAS_OP(__cas_128_acq_rel, "caspal")
>  
>  #endif
>  
> -__rte_experimental
>  static inline int
>  rte_atomic128_cmp_exchange(rte_int128_t *dst, rte_int128_t *exp,
>   const rte_int128_t *src, unsigned int weak, int success,
> diff --git a/lib/eal/include/generic/rte_atomic.h 
> b/lib/eal/include/generic/rte_atomic.h
> index aef44e245548..1a9323dc8ad8 100644
> --- a/lib/eal/include/generic/rte_atomic.h
> +++ b/lib/eal/include/generic/rte_atomic.h
> @@ -1121,7 +1121,6 @@ typedef struct {
>   * @return
>   *   Non-zero on success; 0 on failure.
>   */
> -__rte_experimental
>  static inline int
>  rte_atomic128_cmp_exchange(rte_int128_t *dst,
>  rte_int128_t *exp,
> diff --git a/lib/eal/x86/include/rte_atomic_64.h 
> b/lib/eal/x86/include/rte_atomic_64.h
> index 0edee8627224..e968bbf0ce65 100644
> --- a/lib/eal/x86/include/rte_atomic_64.h
> +++ b/lib/eal/x86/include/rte_atomic_64.h
> @@ -182,7 +182,6 @@ static inline void rte_atomic64_clear(rte_atomic64_t *v)
>  
>  /* 128 bit atomic operations 
> -*/
>  
> -__rte_experimental
>  static inline int
>  rte_atomic128_cmp_exchange(rte_int128_t *dst,
>  rte_int128_t *exp,

I'm wondering if given the fluidity of changes in atomics right now we
should hold this for a little while / close to the end of the merge
window just in case we want to adjust it?

> -- 
> 2.39.2


Re: [PATCH 12/15] eal: make most rte_thread API's stable

2023-08-09 Thread Tyler Retzlaff
On Wed, Aug 09, 2023 at 09:43:04AM -0700, Stephen Hemminger wrote:
> All rte_thread API's added before 23.03 release should be
> marked as not experimental.
> 
> Signed-off-by: Stephen Hemminger 
> ---

Already acked the series but since i know about this set here's an
explicit ack.

note: i have on my todo list but still haven't managed to get to it
adding the __rte_deprecated to rte_ctrl_thread_create which has a posix
name in the signature. rte_thread_create_control was introduced as a
replacement in .. 23.07.

Acked-by: Tyler Retzlaff 

>  lib/eal/include/rte_thread.h | 57 
>  lib/eal/version.map  | 41 +++---
>  2 files changed, 17 insertions(+), 81 deletions(-)
> 
> diff --git a/lib/eal/include/rte_thread.h b/lib/eal/include/rte_thread.h
> index 369e2375f6a0..7da5794fc258 100644
> --- a/lib/eal/include/rte_thread.h
> +++ b/lib/eal/include/rte_thread.h
> @@ -68,9 +68,6 @@ typedef struct {
>  typedef struct eal_tls_key *rte_thread_key;
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Create a new thread that will invoke the 'thread_func' routine.
>   *
>   * @param thread_id
> @@ -89,7 +86,6 @@ typedef struct eal_tls_key *rte_thread_key;
>   *   On success, return 0.
>   *   On failure, return a positive errno-style error number.
>   */
> -__rte_experimental
>  int rte_thread_create(rte_thread_t *thread_id,
>   const rte_thread_attr_t *thread_attr,
>   rte_thread_func thread_func, void *arg);
> @@ -128,9 +124,6 @@ rte_thread_create_control(rte_thread_t *thread, const 
> char *name,
>   void *arg);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Waits for the thread identified by 'thread_id' to terminate
>   *
>   * @param thread_id
> @@ -143,13 +136,9 @@ rte_thread_create_control(rte_thread_t *thread, const 
> char *name,
>   *   On success, return 0.
>   *   On failure, return a positive errno-style error number.
>   */
> -__rte_experimental
>  int rte_thread_join(rte_thread_t thread_id, uint32_t *value_ptr);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Indicate that the return value of the thread is not needed and
>   * all thread resources should be release when the thread terminates.
>   *
> @@ -160,19 +149,14 @@ int rte_thread_join(rte_thread_t thread_id, uint32_t 
> *value_ptr);
>   *   On success, return 0.
>   *   On failure, return a positive errno-style error number.
>   */
> -__rte_experimental
>  int rte_thread_detach(rte_thread_t thread_id);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Get the id of the calling thread.
>   *
>   * @return
>   *   Return the thread id of the calling thread.
>   */
> -__rte_experimental
>  rte_thread_t rte_thread_self(void);
>  
>  /**
> @@ -196,9 +180,6 @@ void
>  rte_thread_set_name(rte_thread_t thread_id, const char *thread_name);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Check if 2 thread ids are equal.
>   *
>   * @param t1
> @@ -211,13 +192,9 @@ rte_thread_set_name(rte_thread_t thread_id, const char 
> *thread_name);
>   *   If the ids are equal, return nonzero.
>   *   Otherwise, return 0.
>   */
> -__rte_experimental
>  int rte_thread_equal(rte_thread_t t1, rte_thread_t t2);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Initialize the attributes of a thread.
>   * These attributes can be passed to the rte_thread_create() function
>   * that will create a new thread and set its attributes according to attr.
> @@ -229,13 +206,9 @@ int rte_thread_equal(rte_thread_t t1, rte_thread_t t2);
>   *   On success, return 0.
>   *   On failure, return a positive errno-style error number.
>   */
> -__rte_experimental
>  int rte_thread_attr_init(rte_thread_attr_t *attr);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Set the thread priority value in the thread attributes pointed to
>   * by 'thread_attr'.
>   *
> @@ -249,16 +222,12 @@ int rte_thread_attr_init(rte_thread_attr_t *attr);
>   *   On success, return 0.
>   *   On failure, return a positive errno-style error number.
>   */
> -__rte_experimental
>  int rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr,
>   enum rte_thread_priority priority);
>  
>  #ifdef RTE_HAS_CPUSET
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice.
> - *
>   * Set the CPU affinity value in the thread attributes pointed to
>   * by 'thread_attr'.
>   *
> @@ -272,14 +241,10 @@ int rte_thread_attr_set_priority(rte_thread_attr_t 
> *thread_attr,
>   *   On success, return 0.
>   *   On failure, return a positive errno-style error number.
>   */
> -__rte_experimental
>  int rte_thread_attr_set_af

Re: [PATCH 14/15] eal: mark rte_eal_vfio_get_token stable

2023-08-09 Thread Tyler Retzlaff
On Wed, Aug 09, 2023 at 09:43:06AM -0700, Stephen Hemminger wrote:
> This API was added in 20.08 release.
> 
> Signed-off-by: Stephen Hemminger 
> ---

there is driver work going on for similar functionality to vfio on
windows. it's possible that this api could be exposed for windows but
in the current form may not be suitable.

*not blocking* 

just wanted to make you aware, i don't have a firm enough schedule to
derail this process.

>  lib/eal/include/rte_eal.h | 4 
>  lib/eal/version.map   | 4 +---
>  2 files changed, 1 insertion(+), 7 deletions(-)
> 
> diff --git a/lib/eal/include/rte_eal.h b/lib/eal/include/rte_eal.h
> index 53c4a5519e61..aac57665e6c2 100644
> --- a/lib/eal/include/rte_eal.h
> +++ b/lib/eal/include/rte_eal.h
> @@ -412,16 +412,12 @@ int rte_eal_create_uio_dev(void);
>  enum rte_intr_mode rte_eal_vfio_intr_mode(void);
>  
>  /**
> - * @warning
> - * @b EXPERIMENTAL: this API may change without prior notice
> - *
>   * Copy the user-configured vfio VF token.
>   *
>   * @param vf_token
>   *   vfio VF token configured with the command line is copied
>   *   into this parameter, zero uuid by default.
>   */
> -__rte_experimental
>  void rte_eal_vfio_get_vf_token(rte_uuid_t vf_token);
>  
>  /**
> diff --git a/lib/eal/version.map b/lib/eal/version.map
> index 3b42d6e0e4c1..65435ae48696 100644
> --- a/lib/eal/version.map
> +++ b/lib/eal/version.map
> @@ -89,6 +89,7 @@ DPDK_24 {
>   rte_eal_tailq_lookup;
>   rte_eal_tailq_register;
>   rte_eal_using_phys_addrs;
> + rte_eal_vfio_get_vf_token; # WINDOWS_NO_EXPORT
>   rte_eal_vfio_intr_mode; # WINDOWS_NO_EXPORT
>   rte_eal_wait_lcore;
>   rte_epoll_ctl;
> @@ -396,9 +397,6 @@ EXPERIMENTAL {
>   rte_trace_regexp; # WINDOWS_NO_EXPORT
>   rte_trace_save; # WINDOWS_NO_EXPORT
>  
> - # added in 20.08
> - rte_eal_vfio_get_vf_token; # WINDOWS_NO_EXPORT
> -
>   # added in 20.11
>   __rte_eal_trace_generic_size_t; # WINDOWS_NO_EXPORT
>   rte_cpu_get_intrinsics_support; # WINDOWS_NO_EXPORT
> -- 
> 2.39.2


Re: [PATCH 01/15] eal: make bitops a stable API

2023-08-09 Thread Tyler Retzlaff
On Wed, Aug 09, 2023 at 09:42:53AM -0700, Stephen Hemminger wrote:
> These were added in 20.05 release.
> 
> Signed-off-by: Stephen Hemminger 
> ---

can i ask that this patch be held until this series is merged?

https://patchwork.dpdk.org/project/dpdk/list/?series=27612

i'm already overloaded with rebasing for other changes, i'm working
with david to get the above series merged.

adding david as a fyi


RE: [RFC PATCH] dmadev: offload to free source buffer

2023-08-09 Thread Amit Prakash Shukla



> -Original Message-
> From: Morten Brørup 
> Sent: Wednesday, August 9, 2023 8:19 PM
> To: Amit Prakash Shukla ; Chengwen Feng
> ; Kevin Laatz ; Bruce
> Richardson 
> Cc: dev@dpdk.org; Jerin Jacob Kollanukkaran ;
> conor.wa...@intel.com; Vamsi Krishna Attunuru ;
> g.si...@nxp.com; sachin.sax...@oss.nxp.com; hemant.agra...@nxp.com;
> cheng1.ji...@intel.com; Nithin Kumar Dabilpuram
> ; Anoob Joseph 
> Subject: [EXT] RE: [RFC PATCH] dmadev: offload to free source buffer
> 
> External Email
> 
> --
> > From: Amit Prakash Shukla [mailto:amitpraka...@marvell.com]
> > Sent: Wednesday, 9 August 2023 16.27
> >
> > > From: Morten Brørup 
> > > Sent: Wednesday, August 9, 2023 2:37 PM
> > >
> > > > From: Amit Prakash Shukla [mailto:amitpraka...@marvell.com]
> > > > Sent: Wednesday, 9 August 2023 08.09
> > > >
> > > > This changeset adds support in DMA library to free source DMA
> > > > buffer by hardware. On a supported hardware, application can pass
> > > > on the mempool information as part of vchan config when the DMA
> > > > transfer direction is configured as RTE_DMA_DIR_MEM_TO_DEV.
> > >
> > > Isn't the DMA source buffer a memory area, and what needs to be
> > > freed
> > is
> > > the mbuf holding the memory area, i.e. two different pointers?
> > No, it is same pointer. Assume mbuf created via mempool, mempool needs
> > to be given via vchan config and iova passed to
> > rte_dma_copy/rte_dma_copy_sg's can be any address in mbuf area of
> > given mempool element.
> > For example, mempool element size is S. dequeued buff from mempool is
> > at X. Any address in (X, X+S) can be given as iova to rte_dma_copy.
> 
> So the DMA library determines the pointer to the mbuf (in the given
> mempool) by looking at the iova passed to rte_dma_copy/rte_dma_copy_sg,
> and then calls rte_mempool_put with that pointer?

No. DMA hardware would determine the pointer to the mbuf using iova address and 
mempool. Hardware will free the buffer, on completion of data transfer.

> 
> >
> > >
> > > I like the concept. Something similar might also be useful for
> > > RTE_DMA_DIR_MEM_TO_MEM, e.g. packet capture. Although such a use
> > > case might require decrementing the mbuf refcount instead of freeing
> > the
> > > mbuf directly to the mempool.
> > This operation is not supported in our hardware. It can be implemented
> > in future if any hardware supports it.
> 
> OK, I didn't expect that - just floating the idea. :-)
> 
> >
> > >
> > > PS: It has been a while since I looked at the DMA library, so ignore
> > > my comments if I got this wrong.



RE: [RFC] ring: further performance improvements with C11

2023-08-09 Thread Konstantin Ananyev


> > > For improved performance over the current C11 based ring
> > > implementation following changes were made.
> > > (1) Replace tail store with RELEASE semantics in
> > > __rte_ring_update_tail with a RELEASE fence. Replace load of the tail
> > > with ACQUIRE semantics in __rte_ring_move_prod_head and
> > > __rte_ring_move_cons_head with ACQUIRE fences.
> > > (2) Remove ACQUIRE fences between load of the old_head and load of the
> > > cons_tail in __rte_ring_move_prod_head and __rte_ring_move_cons_head.
> > > These two fences are not required for the safety of the ring library.
> >
> > Hmm... with these changes, aren't we re-introducing the old bug fixed by 
> > this
> > commit:
> 
> Cover letter explains why this barrier does not solve what it intends to 
> solve and
> Why it should not matter.
> https://mails.dpdk.org/archives/dev/2023-June/270874.html

Ok, let's consider the case similar to yours (i), but when r->prod.head was 
moved for distance greater then r->capacity. 
To be more specific, let' start with the same initial state:
capacity = 32
r->cons.tail = 5
r->cons.head = 5
r->prod.head = 10
r-prod.tail = 10

time 0,  thread1: 
/* re-ordered load */
 const_tail = r->cons.tail; //= 5

Now, thread1 was stalled for a bit, meanwhile there were few enqueue/dequeus
done by other threads, so current state of the ring:
r->cons.tail = 105
r->cons.head = 105
r->prod.head = 110
r-prod.tail = 110

time 1,  thread1:
old_head =  r->prod.head; // 110
*free_entries = (capacity + cons_tail - old_head); // = (uint32_t)(32 + 5 - 
110) ==  (uint32_t)-73 == 4294967223 
 
 So, free_entries value is way too big, and that comparison:
 
if (unlikely(n > *free_entries))

might provide wrong result.

So I still think we do need some sort of _read_fence_ between these two loads.
As I said before, that looks exactly like the old bug, fixed a while ago:
http://git.dpdk.org/dpdk/commit/?id=9bc2cbb007c0a3335c5582357ae9f6d37ea0b654
but now re-introduced for C11 case.

> >
> > commit 9bc2cbb007c0a3335c5582357ae9f6d37ea0b654
> > Author: Jia He 
> > Date:   Fri Nov 10 03:30:42 2017 +
> >
> > ring: guarantee load/load order in enqueue and dequeue
> >
> > We watched a rte panic of mbuf_autotest in our qualcomm arm64 server
> > (Amberwing).
> >
> > Root cause:
> > In __rte_ring_move_cons_head()
> > ...
> > do {
> > /* Restore n as it may change every loop */
> > n = max;
> >
> > *old_head = r->cons.head;//1st load
> > const uint32_t prod_tail = r->prod.tail; //2nd load
> >
> > In weak memory order architectures (powerpc,arm), the 2nd load might be
> > reodered before the 1st load, that makes *entries is bigger than we 
> > wanted.
> > This nasty reording messed enque/deque up.
> > 
> > ?
> >
> > >
> > > Signed-off-by: Wathsala Vithanage 
> > > Reviewed-by: Honnappa Nagarahalli 
> > > Reviewed-by: Ruifeng Wang 
> > > ---
> > >  .mailmap|  1 +
> > >  lib/ring/rte_ring_c11_pvt.h | 35 ---
> > >  2 files changed, 21 insertions(+), 15 deletions(-)
> > >
> > > diff --git a/.mailmap b/.mailmap
> > > index 4018f0fc47..367115d134 100644
> > > --- a/.mailmap
> > > +++ b/.mailmap
> > > @@ -1430,6 +1430,7 @@ Walter Heymans
> > 
> > > Wang Sheng-Hui   Wangyu (Eric)
> > >   Waterman Cao 
> > > +Wathsala Vithanage 
> > >  Weichun Chen   Wei Dai 
> > > Weifeng Li  diff --git
> > > a/lib/ring/rte_ring_c11_pvt.h b/lib/ring/rte_ring_c11_pvt.h index
> > > f895950df4..63fe58ce9e 100644
> > > --- a/lib/ring/rte_ring_c11_pvt.h
> > > +++ b/lib/ring/rte_ring_c11_pvt.h
> > > @@ -16,6 +16,13 @@ __rte_ring_update_tail(struct rte_ring_headtail *ht,
> > uint32_t old_val,
> > >   uint32_t new_val, uint32_t single, uint32_t enqueue)  {
> > >   RTE_SET_USED(enqueue);
> > > + /*
> > > +  * Updating of ht->tail cannot happen before elements are added to or
> > > +  * removed from the ring, as it could result in data races between
> > > +  * producer and consumer threads. Therefore we need a release
> > > +  * barrier here.
> > > +  */
> > > + rte_atomic_thread_fence(__ATOMIC_RELEASE);
> > >
> > >   /*
> > >* If there are other enqueues/dequeues in progress that preceded
> > > us, @@ -24,7 +31,7 @@ __rte_ring_update_tail(struct rte_ring_headtail
> > *ht, uint32_t old_val,
> > >   if (!single)
> > >   rte_wait_until_equal_32(&ht->tail, old_val,
> > __ATOMIC_RELAXED);
> > >
> > > - __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELEASE);
> > > + __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELAXED);
> > >  }
> > >
> > >  /**
> > > @@ -66,14 +73,8 @@ __rte_ring_move_prod_head(struct rte_ring *r,
> > unsigned int is_sp,
> > >   /* Reset n to the initial burst count */
> > >   n = max;
> > >
> > > - /* Ensure the head is read before tail */
> > > - __atomic_thread_fence(__ATOMIC_ACQUIRE);
> > > -
> > > - 

Re: [RFC 0/3] Introduce event link profiles

2023-08-09 Thread Mattias Rönnblom

On 2023-08-09 16:26, pbhagavat...@marvell.com wrote:

From: Pavan Nikhilesh 

A collection of event queues linked to an event port can be associated
with unique identifier called as a profile, multiple such profiles can
be configured based on the event device capability using the function
`rte_event_port_link_with_profile` which takes arguments similar to
`rte_event_port_link` in addition to the profile identifier.



What is the overall goal with this new API? What problems does it intend 
to solve, that the old one doesn't.



The maximum link profiles that are supported by an event device is
advertised through the structure member
`rte_event_dev_info::max_profiles_per_port`.

By default, event ports are configured to use the link profile 0 on
initialization.

Once multiple link profiles are set up and the event device is started, the
application can use the function `rte_event_port_change_profile` to change
the currently active profile on an event port. This effects the next
`rte_event_dequeue_burst` call, where the event queues associated with the
newly active link profile will participate in scheduling.

Rudementary work flow would something like:

Config path:

 uint8_t lowQ[4] = {4, 5, 6, 7};
 uint8_t highQ[4] = {0, 1, 2, 3};

 if (rte_event_dev_info.max_profiles_per_port < 2)
 return -ENOTSUP;

 rte_event_port_link_with_profile(0, 0, highQ, NULL, 4, 0);
 rte_event_port_link_with_profile(0, 0, lowQ, NULL, 4, 1);

Worker path:

 empty_high_deq = 0;
 empty_low_deq = 0;
 is_low_deq = 0;
 while (1) {
 deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0);
 if (deq == 0) {
 /**
  * Change link profile based on work activity on current
  * active profile
  */
 if (is_low_deq) {
 empty_low_deq++;
 if (empty_low_deq == MAX_LOW_RETRY) {
 rte_event_port_change_profile(0, 0, 0);
 is_low_deq = 0;
 empty_low_deq = 0;
 }
 continue;
 }

 if (empty_high_deq == MAX_HIGH_RETRY) {
 rte_event_port_change_profile(0, 0, 1);
 is_low_deq = 1;
 empty_high_deq = 0;
 }
 continue;
 }

 // Process the event received.

 if (is_low_deq++ == MAX_LOW_EVENTS) {
 rte_event_port_change_profile(0, 0, 0);
 is_low_deq = 0;
 }
 }



This thing looks like the application is asked to do work scheduling. 
That doesn't sound right. That's the job of the work scheduler (i.e., 
the event device).


If this thing is merely a matter of changing what queues are linked to 
which ports, wouldn't a new call:

rte_event_port_link_modify()
suffice?


An application could use heuristic data of load/activity of a given event
port and change its active profile to adapt to the traffic pattern.

An unlink function `rte_event_port_unlink_with_profile` is provided to
modify the links associated to a profile, and
`rte_event_port_links_get_with_profile` can be used to retrieve the links
associated with a profile.

Pavan Nikhilesh (3):
   eventdev: introduce link profiles
   event/cnxk: implement event link profiles
   test/event: add event link profile test

  app/test/test_eventdev.c   | 110 ++
  config/rte_config.h|   1 +
  doc/guides/eventdevs/cnxk.rst  |   1 +
  doc/guides/prog_guide/eventdev.rst |  58 ++
  drivers/common/cnxk/roc_nix_inl_dev.c  |   4 +-
  drivers/common/cnxk/roc_sso.c  |  18 +-
  drivers/common/cnxk/roc_sso.h  |   8 +-
  drivers/common/cnxk/roc_sso_priv.h |   4 +-
  drivers/event/cnxk/cn10k_eventdev.c|  45 ++--
  drivers/event/cnxk/cn10k_worker.c  |  11 +
  drivers/event/cnxk/cn10k_worker.h  |   1 +
  drivers/event/cnxk/cn9k_eventdev.c |  72 ---
  drivers/event/cnxk/cn9k_worker.c   |  22 ++
  drivers/event/cnxk/cn9k_worker.h   |   2 +
  drivers/event/cnxk/cnxk_eventdev.c |  34 ++--
  drivers/event/cnxk/cnxk_eventdev.h |  10 +-
  drivers/event/dlb2/dlb2.c  |   1 +
  drivers/event/dpaa/dpaa_eventdev.c |   1 +
  drivers/event/dpaa2/dpaa2_eventdev.c   |   2 +-
  drivers/event/dsw/dsw_evdev.c  |   1 +
  drivers/event/octeontx/ssovf_evdev.c   |   2 +-
  drivers/event/opdl/opdl_evdev.c|   1 +
  drivers/event/skeleton/skeleton_eventdev.c |   1 +
  drivers/event/sw/sw_evdev.c|   1 +
  lib/eventdev/eventdev_pmd.h|  59 +-
  lib/eventdev/eventdev_private.c|   9 +
  lib/eventdev/eventdev_trace.h  |  22 ++
  lib/eventdev/eventdev_trace_points.c   |   6 +
  lib/eventdev/rte_eventdev.c| 146 ++---
  lib/eventdev/rte_eventdev.h| 226 

Re: [PATCH 09/15] eal: make seqcount and seqlock stable

2023-08-09 Thread Mattias Rönnblom

On 2023-08-09 18:43, Stephen Hemminger wrote:

These were add back in 22.07 release.

Signed-off-by: Stephen Hemminger 
---
  lib/eal/include/rte_seqcount.h | 23 ---
  lib/eal/include/rte_seqlock.h  | 21 -
  2 files changed, 44 deletions(-)



Acked-by: Mattias Rönnblom 


diff --git a/lib/eal/include/rte_seqcount.h b/lib/eal/include/rte_seqcount.h
index ff62708e1b7b..6390a5a72f7c 100644
--- a/lib/eal/include/rte_seqcount.h
+++ b/lib/eal/include/rte_seqcount.h
@@ -40,15 +40,11 @@ typedef struct {
  #define RTE_SEQCOUNT_INITIALIZER { .sn = 0 }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * Initialize the sequence counter.
   *
   * @param seqcount
   *   A pointer to the sequence counter.
   */
-__rte_experimental
  static inline void
  rte_seqcount_init(rte_seqcount_t *seqcount)
  {
@@ -56,9 +52,6 @@ rte_seqcount_init(rte_seqcount_t *seqcount)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * Begin a read-side critical section.
   *
   * A call to this function marks the beginning of a read-side critical
@@ -100,8 +93,6 @@ rte_seqcount_init(rte_seqcount_t *seqcount)
   *
   * @see rte_seqcount_read_retry()
   */
-
-__rte_experimental
  static inline uint32_t
  rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
  {
@@ -113,9 +104,6 @@ rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * End a read-side critical section.
   *
   * A call to this function marks the end of a read-side critical
@@ -145,8 +133,6 @@ rte_seqcount_read_begin(const rte_seqcount_t *seqcount)
   *
   * @see rte_seqcount_read_begin()
   */
-
-__rte_experimental
  static inline bool
  rte_seqcount_read_retry(const rte_seqcount_t *seqcount, uint32_t begin_sn)
  {
@@ -171,9 +157,6 @@ rte_seqcount_read_retry(const rte_seqcount_t *seqcount, 
uint32_t begin_sn)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * Begin a write-side critical section.
   *
   * A call to this function marks the beginning of a write-side
@@ -195,8 +178,6 @@ rte_seqcount_read_retry(const rte_seqcount_t *seqcount, 
uint32_t begin_sn)
   *
   * @see rte_seqcount_write_end()
   */
-
-__rte_experimental
  static inline void
  rte_seqcount_write_begin(rte_seqcount_t *seqcount)
  {
@@ -213,9 +194,6 @@ rte_seqcount_write_begin(rte_seqcount_t *seqcount)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * End a write-side critical section.
   *
   * A call to this function marks the end of the write-side critical
@@ -227,7 +205,6 @@ rte_seqcount_write_begin(rte_seqcount_t *seqcount)
   *
   * @see rte_seqcount_write_begin()
   */
-__rte_experimental
  static inline void
  rte_seqcount_write_end(rte_seqcount_t *seqcount)
  {
diff --git a/lib/eal/include/rte_seqlock.h b/lib/eal/include/rte_seqlock.h
index fcbb9c586668..589c98188529 100644
--- a/lib/eal/include/rte_seqlock.h
+++ b/lib/eal/include/rte_seqlock.h
@@ -114,9 +114,6 @@ typedef struct {
}
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * Initialize the seqlock.
   *
   * This function initializes the seqlock, and leaves the writer-side
@@ -125,7 +122,6 @@ typedef struct {
   * @param seqlock
   *   A pointer to the seqlock.
   */
-__rte_experimental
  static inline void
  rte_seqlock_init(rte_seqlock_t *seqlock)
  {
@@ -134,9 +130,6 @@ rte_seqlock_init(rte_seqlock_t *seqlock)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * Begin a read-side critical section.
   *
   * See rte_seqcount_read_retry() for details.
@@ -150,8 +143,6 @@ rte_seqlock_init(rte_seqlock_t *seqlock)
   * @see rte_seqlock_read_retry()
   * @see rte_seqcount_read_retry()
   */
-
-__rte_experimental
  static inline uint32_t
  rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
  {
@@ -159,9 +150,6 @@ rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * End a read-side critical section.
   *
   * See rte_seqcount_read_retry() for details.
@@ -177,7 +165,6 @@ rte_seqlock_read_begin(const rte_seqlock_t *seqlock)
   *
   * @see rte_seqlock_read_begin()
   */
-__rte_experimental
  static inline bool
  rte_seqlock_read_retry(const rte_seqlock_t *seqlock, uint32_t begin_sn)
  {
@@ -185,9 +172,6 @@ rte_seqlock_read_retry(const rte_seqlock_t *seqlock, 
uint32_t begin_sn)
  }
  
  /**

- * @warning
- * @b EXPERIMENTAL: this API may change without prior notice.
- *
   * Begin a write-side critical section.
   *
   * A call to this function acquires the write lock associated @p
@@ -212,7 +196,6 @@ rte_seqlock_read_retry(const rte_seqlock_t *seqlock, 
uint32_t begin_sn)
   *
   * @see rte_seqlock_write_unlock()
   */
-__rte_exp

Re: [PATCH 01/15] eal: make bitops a stable API

2023-08-09 Thread Tyler Retzlaff
On Wed, Aug 09, 2023 at 01:07:10PM -0700, Stephen Hemminger wrote:
> On Wed, 9 Aug 2023 10:58:51 -0700
> Tyler Retzlaff  wrote:
> 
> > On Wed, Aug 09, 2023 at 09:42:53AM -0700, Stephen Hemminger wrote:
> > > These were added in 20.05 release.
> > > 
> > > Signed-off-by: Stephen Hemminger 
> > > ---  
> > 
> > can i ask that this patch be held until this series is merged?
> 
> I expect these mostly will go in after other changes are done.

thanks!


RE: [EXT] Re: [RFC 0/3] Introduce event link profiles

2023-08-09 Thread Pavan Nikhilesh Bhagavatula
> On 2023-08-09 16:26, pbhagavat...@marvell.com wrote:
> > From: Pavan Nikhilesh 
> >
> > A collection of event queues linked to an event port can be associated
> > with unique identifier called as a profile, multiple such profiles can
> > be configured based on the event device capability using the function
> > `rte_event_port_link_with_profile` which takes arguments similar to
> > `rte_event_port_link` in addition to the profile identifier.
> >
> 
> What is the overall goal with this new API? What problems does it intend
> to solve, that the old one doesn't.

Linking and unlinking currently has huge overhead and when it needs to be done
in fastpath, we have to wait for unlinks to complete and handle other corner 
cases.

This patch set solves it by avoiding linking/unlinking altogether in fastpath by
preconfigured set of link profiles out of which only one would be active and 
can 
be changed in fastpath with a simple function call. There is no link/unlink 
waiting for
unlink overhead.

> 
> > The maximum link profiles that are supported by an event device is
> > advertised through the structure member
> > `rte_event_dev_info::max_profiles_per_port`.
> >
> > By default, event ports are configured to use the link profile 0 on
> > initialization.
> >
> > Once multiple link profiles are set up and the event device is started, the
> > application can use the function `rte_event_port_change_profile` to
> change
> > the currently active profile on an event port. This effects the next
> > `rte_event_dequeue_burst` call, where the event queues associated with
> the
> > newly active link profile will participate in scheduling.
> >
> > Rudementary work flow would something like:
> >
> > Config path:
> >
> >  uint8_t lowQ[4] = {4, 5, 6, 7};
> >  uint8_t highQ[4] = {0, 1, 2, 3};
> >
> >  if (rte_event_dev_info.max_profiles_per_port < 2)
> >  return -ENOTSUP;
> >
> >  rte_event_port_link_with_profile(0, 0, highQ, NULL, 4, 0);
> >  rte_event_port_link_with_profile(0, 0, lowQ, NULL, 4, 1);
> >
> > Worker path:
> >
> >  empty_high_deq = 0;
> >  empty_low_deq = 0;
> >  is_low_deq = 0;
> >  while (1) {
> >  deq = rte_event_dequeue_burst(0, 0, &ev, 1, 0);
> >  if (deq == 0) {
> >  /**
> >   * Change link profile based on work activity on current
> >   * active profile
> >   */
> >  if (is_low_deq) {
> >  empty_low_deq++;
> >  if (empty_low_deq == MAX_LOW_RETRY) {
> >  rte_event_port_change_profile(0, 0, 0);
> >  is_low_deq = 0;
> >  empty_low_deq = 0;
> >  }
> >  continue;
> >  }
> >
> >  if (empty_high_deq == MAX_HIGH_RETRY) {
> >  rte_event_port_change_profile(0, 0, 1);
> >  is_low_deq = 1;
> >  empty_high_deq = 0;
> >  }
> >  continue;
> >  }
> >
> >  // Process the event received.
> >
> >  if (is_low_deq++ == MAX_LOW_EVENTS) {
> >  rte_event_port_change_profile(0, 0, 0);
> >  is_low_deq = 0;
> >  }
> >  }
> >
> 
> This thing looks like the application is asked to do work scheduling.
> That doesn't sound right. That's the job of the work scheduler (i.e.,
> the event device).
> 
> If this thing is merely a matter of changing what queues are linked to
> which ports, wouldn't a new call:
> rte_event_port_link_modify()
> suffice?


Some applications divide their available lcores into multiple types of 
workers which each work on a unique set of event queues, application might 
need to modify the worker ratio based on various parameters at run time
without a lot of overhead.

Modifying links wouldn’t work because we might want to restore previous links 
based on the new traffic pattern etc.,.

> 
> > An application could use heuristic data of load/activity of a given event
> > port and change its active profile to adapt to the traffic pattern.
> >
> > An unlink function `rte_event_port_unlink_with_profile` is provided to
> > modify the links associated to a profile, and
> > `rte_event_port_links_get_with_profile` can be used to retrieve the links
> > associated with a profile.
> >
> > Pavan Nikhilesh (3):
> >eventdev: introduce link profiles
> >event/cnxk: implement event link profiles
> >test/event: add event link profile test
> >
> >   app/test/test_eventdev.c   | 110 ++
> >   config/rte_config.h|   1 +
> >   doc/guides/eventdevs/cnxk.rst  |   1 +
> >   doc/guides/prog_guide/eventdev.rst |  58 ++
> >   drivers/common/cnxk/roc_nix_inl_dev.c  |   4 +-
> >   drivers/common/cnxk/roc_sso.c  |  18 +-
> >   drivers/common/cnxk/roc_sso.h  |   8 +-
> >   drivers/common/cnxk/roc_sso_priv.h |   4 +-
> >   drivers/event/cnxk