FW: [PATCH v3 6/9] net/cpfl: add fxp rule module

2023-09-12 Thread Liu, Mingxia



> -Original Message-
> From: Qiao, Wenjing 
> Sent: Wednesday, September 6, 2023 5:34 PM
> To: Zhang, Yuying ; dev@dpdk.org; Zhang, Qi Z
> ; Wu, Jingjing ; Xing, Beilei
> 
> Cc: Liu, Mingxia 
> Subject: [PATCH v3 6/9] net/cpfl: add fxp rule module
> 
> From: Yuying Zhang 
> 
> Added low level fxp module for rule packing / creation / destroying.
> 
> Signed-off-by: Yuying Zhang 
> ---
>  drivers/net/cpfl/cpfl_controlq.c | 424 +++
> drivers/net/cpfl/cpfl_controlq.h |  24 ++
>  drivers/net/cpfl/cpfl_ethdev.c   |  31 +++
>  drivers/net/cpfl/cpfl_ethdev.h   |   6 +
>  drivers/net/cpfl/cpfl_fxp_rule.c | 297 ++
> drivers/net/cpfl/cpfl_fxp_rule.h |  68 +
>  drivers/net/cpfl/meson.build |   1 +
>  7 files changed, 851 insertions(+)
>  create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c  create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
> 
> diff --git a/drivers/net/cpfl/cpfl_controlq.c 
> b/drivers/net/cpfl/cpfl_controlq.c
> index 476c78f235..ed76282b0c 100644
> --- a/drivers/net/cpfl/cpfl_controlq.c
> +++ b/drivers/net/cpfl/cpfl_controlq.c
> @@ -331,6 +331,402 @@ cpfl_ctlq_add(struct idpf_hw *hw, struct
> cpfl_ctlq_create_info *qinfo,
>   return status;
>  }
> 
> +/**
> + * cpfl_ctlq_send - send command to Control Queue (CTQ)
> + * @hw: pointer to hw struct
> + * @cq: handle to control queue struct to send on
> + * @num_q_msg: number of messages to send on control queue
> + * @q_msg: pointer to array of queue messages to be sent
> + *
> + * The caller is expected to allocate DMAable buffers and pass them to
> +the
> + * send routine via the q_msg struct / control queue specific data struct.
> + * The control queue will hold a reference to each send message until
> + * the completion for that message has been cleaned.
> + */
> +int
> +cpfl_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
> +uint16_t num_q_msg, struct idpf_ctlq_msg q_msg[]) {
> + struct idpf_ctlq_desc *desc;
> + int num_desc_avail = 0;
> + int status = 0;
> + int i = 0;
> +
> + if (!cq || !cq->ring_size)
> + return -ENOBUFS;
> +
> + idpf_acquire_lock(&cq->cq_lock);
> +
> + /* Ensure there are enough descriptors to send all messages */
> + num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
> + if (num_desc_avail == 0 || num_desc_avail < num_q_msg) {
> + status = -ENOSPC;
> + goto sq_send_command_out;
> + }
> +
> + for (i = 0; i < num_q_msg; i++) {
> + struct idpf_ctlq_msg *msg = &q_msg[i];
> + uint64_t msg_cookie;
> +
> + desc = IDPF_CTLQ_DESC(cq, cq->next_to_use);
> + desc->opcode = CPU_TO_LE16(msg->opcode);
> + desc->pfid_vfid = CPU_TO_LE16(msg->func_id);
> + msg_cookie = *(uint64_t *)&msg->cookie;
> + desc->cookie_high =
> + CPU_TO_LE32(IDPF_HI_DWORD(msg_cookie));
> + desc->cookie_low =
> + CPU_TO_LE32(IDPF_LO_DWORD(msg_cookie));
> + desc->flags = CPU_TO_LE16((msg->host_id &
> IDPF_HOST_ID_MASK) <<
> + IDPF_CTLQ_FLAG_HOST_ID_S);
> + if (msg->data_len) {
> + struct idpf_dma_mem *buff = msg-
> >ctx.indirect.payload;
> +
> + desc->datalen |= CPU_TO_LE16(msg->data_len);
> + desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_BUF);
> + desc->flags |= CPU_TO_LE16(IDPF_CTLQ_FLAG_RD);
> + /* Update the address values in the desc with the pa
> +  * value for respective buffer
> +  */
> + desc->params.indirect.addr_high =
> + CPU_TO_LE32(IDPF_HI_DWORD(buff->pa));
> + desc->params.indirect.addr_low =
> + CPU_TO_LE32(IDPF_LO_DWORD(buff->pa));
> + idpf_memcpy(&desc->params, msg-
> >ctx.indirect.context,
> + IDPF_INDIRECT_CTX_SIZE,
> IDPF_NONDMA_TO_DMA);
> + } else {
> + idpf_memcpy(&desc->params, msg->ctx.direct,
> + IDPF_DIRECT_CTX_SIZE,
> IDPF_NONDMA_TO_DMA);
> + }
> +
> + /* Store buffer info */
> + cq->bi.tx_msg[cq->next_to_use] = msg;
> + (cq->next_to_use)++;
> + if (cq->next_to_use == cq->ring_size)
> + cq->next_to_use = 0;
> + }
> +
> + /* Force memory write to complete before letting hardware
> +  * know that there are new descriptors to fetch.
> +  */
> + idpf_wmb();
> + wr32(hw, cq->reg.tail, cq->next_to_use);
> +
> +sq_send_command_out:
> + idpf_release_lock(&cq->cq_lock);
> +
> + return status;
> +}
> +
> +/**
> + * __cpfl_ctlq_clean_sq - helper function to reclaim descriptors on HW
> +write
> + * back for the requested queue
> + * @cq: pointer to the sp

[PATCH v3] vhost: avoid potential null pointer access

2023-09-12 Thread Li Feng
If the user calls rte_vhost_vring_call() on a ring that has been
invalidated, we will encounter SEGV.

We should check the pointer firstly before accessing it.

Signed-off-by: Li Feng 
---
v2 -> v3:
- Also fix the rte_vhost_vring_call_nonblock.

v1 -> v2:
- Fix rebase error.



 lib/vhost/vhost.c | 14 --
 lib/vhost/vhost.h | 12 ++--
 2 files changed, 18 insertions(+), 8 deletions(-)

diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index eb6309b681..46f3391167 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -1327,6 +1327,7 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
 {
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+   int ret = 0;

dev = get_device(vid);
if (!dev)
@@ -1342,13 +1343,13 @@ rte_vhost_vring_call(int vid, uint16_t vring_idx)
rte_rwlock_read_lock(&vq->access_lock);

if (vq_is_packed(dev))
-   vhost_vring_call_packed(dev, vq);
+   ret = vhost_vring_call_packed(dev, vq);
else
-   vhost_vring_call_split(dev, vq);
+   ret = vhost_vring_call_split(dev, vq);

rte_rwlock_read_unlock(&vq->access_lock);

-   return 0;
+   return ret;
 }

 int
@@ -1356,6 +1357,7 @@ rte_vhost_vring_call_nonblock(int vid, uint16_t vring_idx)
 {
struct virtio_net *dev;
struct vhost_virtqueue *vq;
+   int ret = 0;

dev = get_device(vid);
if (!dev)
@@ -1372,13 +1374,13 @@ rte_vhost_vring_call_nonblock(int vid, uint16_t 
vring_idx)
return -EAGAIN;

if (vq_is_packed(dev))
-   vhost_vring_call_packed(dev, vq);
+   ret = vhost_vring_call_packed(dev, vq);
else
-   vhost_vring_call_split(dev, vq);
+   ret = vhost_vring_call_split(dev, vq);

rte_rwlock_read_unlock(&vq->access_lock);

-   return 0;
+   return ret;
 }

 uint16_t
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 9723429b1c..4c09c2ef0e 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -930,12 +930,15 @@ vhost_vring_inject_irq(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
dev->notify_ops->guest_notified(dev->vid);
 }

-static __rte_always_inline void
+static __rte_always_inline int
 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
/* Flush used->idx update before we read avail->flags. */
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);

+   if (!vq->avail || !vq->used)
+   return -1;
+
/* Don't kick guest if we don't reach index specified by guest. */
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
uint16_t old = vq->signalled_used;
@@ -957,9 +960,10 @@ vhost_vring_call_split(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
vhost_vring_inject_irq(dev, vq);
}
+   return 0;
 }

-static __rte_always_inline void
+static __rte_always_inline int
 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
uint16_t old, new, off, off_wrap;
@@ -968,6 +972,9 @@ vhost_vring_call_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
/* Flush used desc update. */
rte_atomic_thread_fence(__ATOMIC_SEQ_CST);

+   if (!vq->driver_event)
+   return -1;
+
if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
if (vq->driver_event->flags !=
VRING_EVENT_F_DISABLE)
@@ -1008,6 +1015,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct 
vhost_virtqueue *vq)
 kick:
if (kick)
vhost_vring_inject_irq(dev, vq);
+   return 0;
 }

 static __rte_always_inline void
--
2.41.0



[PATCH v5 00/10] net/cpfl: support port representor

2023-09-12 Thread beilei . xing
From: Beilei Xing 

1. code refine for representor support
2. support port representor

v5 changes:
 - refine cpfl_vport_info structure
 - refine cpfl_repr_link_update function
 - refine cpfl_repr_create function
v4 changes:
 - change the patch order
 - merge two patches
 - revert enum change
v3 changes:
 - Refine commit log.
 - Add macro and enum.
 - Refine doc.
 - Refine error handling.
v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (10):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: refine handle virtual channel message
  net/cpfl: introduce CP channel API
  net/cpfl: enable vport mapping
  net/cpfl: support vport list/info get
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support link update for representor

 doc/guides/nics/cpfl.rst   |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +
 drivers/net/cpfl/cpfl_ethdev.c | 621 
 drivers/net/cpfl/cpfl_ethdev.h |  91 +++-
 drivers/net/cpfl/cpfl_representor.c| 632 +
 drivers/net/cpfl/cpfl_representor.h|  26 +
 drivers/net/cpfl/cpfl_vchnl.c  |  72 +++
 drivers/net/cpfl/meson.build   |   4 +-
 9 files changed, 1719 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1



[PATCH v5 01/10] net/cpfl: refine devargs parse and process

2023-09-12 Thread beilei . xing
From: Beilei Xing 

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++---
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void 
*args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter,
-  struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
struct rte_devargs *devargs = pci_dev->device.devargs;
+   struct cpfl_devargs *cpfl_args = &adapter->devargs;
struct rte_kvargs *kvlist;
-   int i, ret;
+   int ret;
 
cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adap
if (ret != 0)
goto fail;
 
-   /* check parsed devargs */
-   if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-   adapter->max_vport_nb) {
-   PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-adapter->max_vport_nb);
-   ret = -EINVAL;
-   goto fail;
-   }
-
-   for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-   if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-   PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 
~ %d",
-cpfl_args->req_vports[i], 
adapter->max_vport_nb - 1);
-   ret = -EINVAL;
-   goto fail;
-   }
-
-   if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-   PMD_INIT_LOG(ERR, "Vport %d has been requested",
-cpfl_args->req_vports[i]);
-   ret = -EINVAL;
-   goto fail;
-   }
-   }
-
 fail:
rte_kvargs_free(kvlist);
return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext 
*adapter)
adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+   struct cpfl_devargs *devargs = &adapter->devargs;
+   int i;
+
+   /* refine vport number, at least 1 vport */
+   if (devargs->req_vport_nb == 0) {
+   devargs->req_vport_nb = 1;
+   devargs->req_vports[0] = 0;
+   }
+
+   /* check parsed devargs */
+   if (adapter->cur_vport_nb + devargs->req_vport_nb >
+   adapter->max_vport_nb) {
+   PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+adapter->max_vport_nb);
+   return -EINVAL;
+   }
+
+   for (i = 0; i < devargs->req_vport_nb; i++) {
+   if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+   PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 
~ %d",
+devargs->req_vports[i], 
adapter->max_vport_nb - 1);
+   return -EINVAL;
+   }
+
+   if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+   PMD_INIT_LOG(ERR, "Vport %d has been requested",
+devargs->req_vports[i]);
+   return -EINVAL;
+   }
+   }
+
+   return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+{
+   struct cpfl_vport_param vport_param;
+   char name[RTE_ETH_NAME_MAX_LEN];
+   int ret, i;
+
+   for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+   vport_param.adapter = adapter;
+   vport_param.devarg_id = adapter->devargs.req_vports[i];
+   vport_param.idx = cpfl_vport_idx_alloc(adapter);
+   if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+   PMD_INIT_LOG(ERR, "No space for vport %u", 
vport_param.devarg_id);
+   break;
+   }
+   snprintf(name, sizeof(name), "net_%s_vport_%d",
+pci_dev->device.name,
+adapter->devargs.req_vports[i]);
+   ret = rte_eth_dev_create(&pci_dev->device, name,
+   sizeof(struct cpfl_vport),
+   NULL, NULL, cpfl_dev_vport_init,
+   &vport_param);
+   if (ret != 0)
+   P

[PATCH v5 02/10] net/cpfl: introduce interface structure

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 15 +++
 2 files changed, 18 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void 
*init_params)
goto err;
}
 
+   cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+   cpfl_vport->itf.adapter = adapter;
+   cpfl_vport->itf.data = dev->data;
adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..feb1edc4b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,18 @@ struct p2p_queue_chunks_info {
uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+   CPFL_ITF_TYPE_VPORT,
+};
+
+struct cpfl_itf {
+   enum cpfl_itf_type type;
+   struct cpfl_adapter_ext *adapter;
+   void *data;
+};
+
 struct cpfl_vport {
+   struct cpfl_itf itf;
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +135,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p) \
container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev) \
+   ((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)   \
+   ((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1



[PATCH v5 03/10] net/cpfl: refine handle virtual channel message

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 48 +-
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..31a5822d2c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1450,40 +1450,52 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adap
return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-   struct idpf_vport *vport = NULL;
+   struct cpfl_vport *vport = NULL;
int i;
 
for (i = 0; i < adapter->cur_vport_nb; i++) {
-   vport = &adapter->vports[i]->base;
-   if (vport->vport_id != vport_id)
+   vport = adapter->vports[i];
+   if (vport == NULL)
+   continue;
+   if (vport->base.vport_id != vport_id)
continue;
else
return vport;
}
 
-   return vport;
+   return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, 
uint16_t msglen)
 {
struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-   struct rte_eth_dev_data *data = vport->dev_data;
-   struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+   struct cpfl_vport *vport;
+   struct rte_eth_dev_data *data;
+   struct rte_eth_dev *dev;
 
if (msglen < sizeof(struct virtchnl2_event)) {
PMD_DRV_LOG(ERR, "Error event");
return;
}
 
+   vport = cpfl_find_vport(adapter, vc_event->vport_id);
+   if (!vport) {
+   PMD_DRV_LOG(ERR, "Can't find vport.");
+   return;
+   }
+
+   data = vport->itf.data;
+   dev = &rte_eth_devices[data->port_id];
+
switch (vc_event->event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-   vport->link_up = !!(vc_event->link_status);
-   vport->link_speed = vc_event->link_speed;
+   vport->base.link_up = !!(vc_event->link_status);
+   vport->base.link_speed = vc_event->link_speed;
cpfl_dev_link_update(dev, 0);
break;
default:
@@ -1498,10 +1510,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext 
*adapter)
struct idpf_adapter *base = &adapter->base;
struct idpf_dma_mem *dma_mem = NULL;
struct idpf_hw *hw = &base->hw;
-   struct virtchnl2_event *vc_event;
struct idpf_ctlq_msg ctlq_msg;
enum idpf_mbx_opc mbx_op;
-   struct idpf_vport *vport;
uint16_t pending = 1;
uint32_t vc_op;
int ret;
@@ -1523,18 +1533,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext 
*adapter)
switch (mbx_op) {
case idpf_mbq_opc_send_msg_to_peer_pf:
if (vc_op == VIRTCHNL2_OP_EVENT) {
-   if (ctlq_msg.data_len < sizeof(struct 
virtchnl2_event)) {
-   PMD_DRV_LOG(ERR, "Error event");
-   return;
-   }
-   vc_event = (struct virtchnl2_event 
*)base->mbx_resp;
-   vport = cpfl_find_vport(adapter, 
vc_event->vport_id);
-   if (!vport) {
-   PMD_DRV_LOG(ERR, "Can't find vport.");
-   return;
-   }
-   cpfl_handle_event_msg(vport, base->mbx_resp,
- ctlq_msg.data_len);
+   cpfl_handle_vchnl_event_msg(adapter, 
adapter->base.mbx_resp,
+   ctlq_msg.data_len);
} else {
if (vc_op == base->pend_cmd)
notify_cmd(base, base->cmd_retval);
-- 
2.34.1



[PATCH v5 04/10] net/cpfl: introduce CP channel API

2023-09-12 Thread beilei . xing
From: Beilei Xing 

The CPCHNL2 defines the API (v2) used for communication between the
CPF driver and its on-chip management software. The CPFL PMD is a
specific CPF driver to utilize CPCHNL2 for device configuration and
event probing.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +
 1 file changed, 340 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 00..2eefcbcc10
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief  Command Opcodes
+ *  Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+   /* vport info */
+   CPCHNL2_OP_GET_VPORT_LIST   = 0x8025,
+   CPCHNL2_OP_GET_VPORT_INFO   = 0x8026,
+
+   /* DPHMA Event notifications */
+   CPCHNL2_OP_EVENT= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT  8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS  6
+
+#define CPCHNL2_FUNC_TYPE_PF   0
+#define CPCHNL2_FUNC_TYPE_SRIOV1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED   0
+#define CPCHNL2_VPORT_STATUS_ENABLED   1
+#define CPCHNL2_VPORT_STATUS_DISABLED  2
+#define CPCHNL2_VPORT_STATUS_DESTROYED 3
+
+/* Queue Groups Extension */
+/**/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X \
+   { static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+struct cpchnl2_queue_chunk {
+   u32 type;  /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum 
nsl_lan_queue_type */
+   u32 start_queue_id;
+   u32 num_queues;
+   u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+   u16 num_chunks;
+   u8 reserved[6];
+   struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks);
+
+struct cpchnl2_rx_queue_group_info {
+   /* User can ask to update rss_lut size originally allocated
+* by CreateVport command. New size will be returned if allocation 
succeeded,
+* otherwise original rss_size from CreateVport will be returned.
+*/
+   u16 rss_lut_size;
+   u8 pad[6]; /*Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info);
+
+struct cpchnl2_tx_queue_group_info {
+   u8 tx_tc; /*TX TC queue group will be connected to*/
+   /* Each group can have its own priority, value 0-7, while each group 
with unique
+* priority is strict priority. It can be single set of queue groups 
which configured with
+* same priority, then they are assumed part of WFQ arbitration group 
and are expected to be
+* assigned with weight.
+*/
+   u8 priority;
+   /* Determines if queue group is expected to be Strict Priority 
according to its priority */
+   u8 is_sp;
+   u8 pad;
+   /* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration 
set.
+* The weights of the groups are independent of each other. Possible 
values: 1-200.
+*/
+   u16 pir_weight;
+   /* Future extension purpose for CIR only */
+   u8 cir_pad[2];
+   u8 pad2[8]; /* Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info);
+
+struct cpchnl2_queue_group_id {
+   /* Queue group ID - depended on it's type:
+* Data & p2p - is an index which is relative to Vport.
+* Config & Mailbox - is an ID which is relative to func.
+* This ID is used in future calls, i.e. delete.
+* Requested by host and assigned by Control plane.
+*/
+   u16 queue_group_id;
+   /* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+   u16 queue_group_type;
+   u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id);
+
+struct cpchnl2_queue_group_info {
+   /* IN */
+   struct cpchnl2_queue_group_id qg_id;
+
+   /* IN, Number of queues of different types in the group. */
+   u16 num_tx_q;
+   u16 num_tx_complq;
+   u16 num_rx_q;
+   u16 num_rx_bufq;
+
+   struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+   struct cpchnl2_rx_queue_group_info rx_q_grp_info

[PATCH v5 05/10] net/cpfl: enable vport mapping

2023-09-12 Thread beilei . xing
From: Beilei Xing 

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +
 drivers/net/cpfl/cpfl_ethdev.h |  21 -
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 31a5822d2c..ad21f901bb 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1504,6 +1505,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext 
*adapter, uint8_t *msg, uint
}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vport_identity,
+  struct cpchnl2_vport_info *vport_info)
+{
+   struct cpfl_vport_info *info = NULL;
+   int ret;
+
+   rte_spinlock_lock(&adapter->vport_map_lock);
+   ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
+   if (ret >= 0) {
+   PMD_DRV_LOG(WARNING, "vport already exist, overwrite info 
anyway");
+   /* overwrite info */
+   if (info)
+   info->vport_info = *vport_info;
+   goto fini;
+   }
+
+   info = rte_zmalloc(NULL, sizeof(*info), 0);
+   if (info == NULL) {
+   PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+   ret = -ENOMEM;
+   goto err;
+   }
+
+   info->vport_info = *vport_info;
+
+   ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, 
info);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+   rte_free(info);
+   goto err;
+   }
+
+fini:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return 0;
+err:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id 
*vport_identity)
+{
+   struct cpfl_vport_info *info;
+   int ret;
+
+   rte_spinlock_lock(&adapter->vport_map_lock);
+   ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "vport id not exist");
+   goto err;
+   }
+
+   rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   rte_free(info);
+
+   return 0;
+
+err:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, 
uint16_t msglen)
+{
+   struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info 
*)msg;
+   struct cpchnl2_vport_info *info;
+   struct cpfl_vport_id vport_identity = { 0 };
+
+   if (msglen < sizeof(struct cpchnl2_event_info)) {
+   PMD_DRV_LOG(ERR, "Error event");
+   return;
+   }
+
+   switch (cpchnl2_event->header.type) {
+   case CPCHNL2_EVENT_VPORT_CREATED:
+   vport_identity.vport_id = 
cpchnl2_event->data.vport_created.vport.vport_id;
+   info = &cpchnl2_event->data.vport_created.info;
+   vport_identity.func_type = info->func_type;
+   vport_identity.pf_id = info->pf_id;
+   vport_identity.vf_id = info->vf_id;
+   if (cpfl_vport_info_create(adapter, &vport_identity, info))
+   PMD_DRV_LOG(WARNING, "Failed to handle 
CPCHNL2_EVENT_VPORT_CREATED");
+   break;
+   case CPCHNL2_EVENT_VPORT_DESTROYED:
+   vport_identity.vport_id = 
cpchnl2_event->data.vport_destroyed.vport.vport_id;
+   vport_identity.func_type = 
cpchnl2_event->data.vport_destroyed.func.func_type;
+   vport_identity.pf_id = 
cpchnl2_event->data.vport_destroyed.func.pf_id;
+   vport_identity.vf_id = 
cpchnl2_event->data.vport_destroyed.func.vf_id;
+   if (cpfl_vport_info_destroy(adapter, &vport_identity))
+   PMD_DRV_LOG(WARNING, "Failed to handle 
CPCHNL2_EVENT_VPORT_DESTROY");
+   break;
+   default:
+   PMD_DRV_LOG(ERR, " unknown event received %u", 
cpchnl2_event->header.type);
+   break;
+   }
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
if (vc_op == VIRTCHNL2_OP_EVENT) {
cpfl_handle_vchnl_event_msg(adapter, 
adapter->base.mbx_resp,
 

[PATCH v5 06/10] net/cpfl: support vport list/info get

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index de86c49016..4975c05a55 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -148,6 +148,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)   \
RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p) \
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 00..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include 
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_list_response *response)
+{
+   struct cpchnl2_get_vport_list_request request;
+   struct idpf_cmd_info args;
+   int err;
+
+   memset(&request, 0, sizeof(request));
+   request.func_type = vi->func_type;
+   request.pf_id = vi->pf_id;
+   request.vf_id = vi->vf_id;
+
+   memset(&args, 0, sizeof(args));
+   args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+   args.in_args = (uint8_t *)&request;
+   args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+   args.out_buffer = adapter->base.mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(&adapter->base, &args);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to execute command of 
CPCHNL2_OP_GET_VPORT_LIST");
+   return err;
+   }
+
+   rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+   return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_info_response *response)
+{
+   struct cpchnl2_get_vport_info_request request;
+   struct idpf_cmd_info args;
+   int err;
+
+   request.vport.vport_id = vport_id->vport_id;
+   request.vport.vport_type = vport_id->vport_type;
+   request.func.func_type = vi->func_type;
+   request.func.pf_id = vi->pf_id;
+   request.func.vf_id = vi->vf_id;
+
+   memset(&args, 0, sizeof(args));
+   args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+   args.in_args = (uint8_t *)&request;
+   args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+   args.out_buffer = adapter->base.mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(&adapter->base, &args);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to execute command of 
CPCHNL2_OP_GET_VPORT_INFO");
+   return err;
+   }
+
+   rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+   return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..2f0f5d8434 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
 'cpfl_ethdev.c',
 'cpfl_rxtx.c',
+'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1



[PATCH v5 08/10] net/cpfl: support probe again

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++---
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index eb57e355d2..47c4c5c796 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
CPFL_REPRESENTOR,
CPFL_TX_SINGLE_Q,
CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+   CPFL_REPRESENTOR,
+   NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
RTE_ETH_SPEED_NUM_NONE,
RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char 
*value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter, bool first)
 {
struct rte_devargs *devargs = pci_dev->device.devargs;
struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adap
if (devargs == NULL)
return 0;
 
-   kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+   kvlist = rte_kvargs_parse(devargs->args,
+   first ? cpfl_valid_args_first : cpfl_valid_args_again);
if (kvlist == NULL) {
PMD_INIT_LOG(ERR, "invalid kvargs key");
return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adap
if (ret != 0)
goto fail;
 
+   if (!first)
+   return 0;
+
ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 cpfl_args);
if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-  struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
struct cpfl_adapter_ext *adapter;
int retval;
 
-   if (!cpfl_adapter_list_init) {
-   rte_spinlock_init(&cpfl_adapter_lock);
-   TAILQ_INIT(&cpfl_adapter_list);
-   cpfl_adapter_list_init = true;
-   }
-
adapter = rte_zmalloc("cpfl_adapter_ext",
  sizeof(struct cpfl_adapter_ext), 0);
if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
return -ENOMEM;
}
 
-   retval = cpfl_parse_devargs(pci_dev, adapter);
+   retval = cpfl_parse_devargs(pci_dev, adapter, true);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to parse private devargs");
return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+{
+   int ret;
+
+   ret = cpfl_parse_devargs(pci_dev, adapter, false);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+   return ret;
+   }
+
+   ret = cpfl_repr_devargs_process(adapter);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+   return ret;
+   }
+
+   return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+  struct rte_pci_device *pci_dev)
+{
+   struct cpfl_adapter_ext *adapter;
+
+   if (!cpfl_adapter_list_init) {
+   rte_spinlock_init(&cpfl_adapter_lock);
+   TAILQ_INIT(&cpfl_adapter_list);
+   cpfl_adapter_list_init = true;
+   }
+
+   adapter = cpfl_find_adapter_ext(pci_dev);
+
+   if (adapter == NULL)
+   return cpfl_pci_probe_first(pci_dev);
+   else
+   return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
.id_table   = pci_id_cpfl_map,
-   .drv_flags  = RTE_PCI_DRV_NEED_MAPPING,
+   .drv_flags  = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_PROBE_AGAIN,
.probe  = cpfl_pci_probe,
  

[PATCH v5 07/10] net/cpfl: parse representor devargs

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Format:

[[c]pf]vf

  controller_id:

  0 : host (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- host > apf > vf 0,1,2,3
 same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- host > apf
 same as pf0 if omit default value.

representor=c1pf0
  -- accelerator core > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on host APF and one
representor for APF on accelerator core.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 doc/guides/nics/cpfl.rst   |  36 +
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c | 179 +
 drivers/net/cpfl/cpfl_ethdev.h |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..83a18c3f2e 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+[[c]pf]vf
+
+  Controller_id 0 is host (default), while 1 is accelerator core.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on host APF::
+
+-a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+-a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+-a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on accelerator core::
+
+-a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on host APF and one representor for CPF on accelerator core::
+
+-a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 --
diff --git a/doc/guides/rel_notes/release_23_11.rst 
b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..3d9be208d0 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,9 @@ New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index ad21f901bb..eb57e355d2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include 
 
 #include "cpfl_ethdev.h"
+#include 
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR   "representor"
 #define CPFL_TX_SINGLE_Q   "tx_single"
 #define CPFL_RX_SINGLE_Q   "rx_single"
 #define CPFL_VPORT "vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+   CPFL_REPRESENTOR,
CPFL_TX_SINGLE_Q,
CPFL_RX_SINGLE_Q,
CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void 
*args)
return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t 
val)
+{
+   uint16_t i;
+
+   for (i = 0; i < *len_list; i++) {
+   if (list[i] == val)
+   return 0;
+   }
+   if (*len_list >= max_list)
+   return -1;
+   list[(*len_list)++] = val;
+   return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+   const uint16_t max_list)
+{
+   uint16_t lo, hi, val;
+   int result, n = 0;
+   const char *pos = str;
+
+   result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+   if (result == 1) {
+   if (enlist(list, len_list, max_list, lo) != 0)
+   return NULL;
+   } else if (result == 2) {
+   if (lo > hi)
+   return NULL;
+   for (val = lo; val <= hi; val++) {
+   if (enlist(list, len_list, max_list, val) != 0)
+   return NULL;
+   }
+   } else {
+   return NULL;
+   }
+   return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const 
uint16_t max_list)
+{
+   const char *pos = str;
+
+   if (*pos == '[')
+   pos++;
+   while (1) {
+   pos = process_range(pos, list, len_list, max_list);
+   if (pos == 

[PATCH v5 09/10] net/cpfl: create port representor

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu 
Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  | 117 +++---
 drivers/net/cpfl/cpfl_ethdev.h  |  39 +-
 drivers/net/cpfl/cpfl_representor.c | 581 
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build|   1 +
 5 files changed, 715 insertions(+), 49 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 47c4c5c796..375bc8098c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1645,10 +1645,10 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext 
*adapter, uint8_t *msg, uint
}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
   struct cpfl_vport_id *vport_identity,
-  struct cpchnl2_vport_info *vport_info)
+  struct cpchnl2_event_vport_created *vport_created)
 {
struct cpfl_vport_info *info = NULL;
int ret;
@@ -1659,7 +1659,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
PMD_DRV_LOG(WARNING, "vport already exist, overwrite info 
anyway");
/* overwrite info */
if (info)
-   info->vport_info = *vport_info;
+   info->vport = *vport_created;
goto fini;
}
 
@@ -1670,7 +1670,7 @@ cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
goto err;
}
 
-   info->vport_info = *vport_info;
+   info->vport = *vport_created;
 
ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, 
info);
if (ret < 0) {
@@ -1696,7 +1696,7 @@ cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, 
struct cpfl_vport_id *
rte_spinlock_lock(&adapter->vport_map_lock);
ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
if (ret < 0) {
-   PMD_DRV_LOG(ERR, "vport id not exist");
+   PMD_DRV_LOG(ERR, "vport id does not exist");
goto err;
}
 
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+   char hname[32];
+
+   snprintf(hname, 32, "%s-repr_al", adapter->name);
+
+   rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+   struct rte_hash_parameters params = {
+   .name = hname,
+   .entries = CPFL_REPR_HASH_ENTRY_NUM,
+   .key_len = sizeof(struct cpfl_repr_id),
+   .hash_func = rte_hash_crc,
+   .socket_id = SOCKET_ID_ANY,
+   };
+
+   adapter->repr_allowlist_hash = rte_hash_create(¶ms);
+
+   if (adapter->repr_allowlist_hash == NULL) {
+   PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+   rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *a
goto err_vport_map_init;
}
 
+   ret = cpfl_repr_allowlist_init(adapter);
+   if (ret) {
+   PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+   goto err_repr_allowlist_init;
+   }
+
rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
adapter->max_vport_nb = adapter->base.caps.max_vports > 
CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *a
 
 err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+   cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext 
*adapter)
return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-   struct cpfl_devargs *devargs = &adapter->devargs;
-   int i, j;
-
-   /* check and refine repr args */
-   for (i = 0; i < devargs->repr_args_num; i++) {
-   struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-   /* set default host_id to xeon host */
-   if (eth_da->nb_mh_controllers == 0) {
-  

[PATCH v5 10/10] net/cpfl: support link update for representor

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Add link update ops for representor.

Signed-off-by: Jingjing Wu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h  |  1 +
 drivers/net/cpfl/cpfl_representor.c | 89 +++--
 2 files changed, 71 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a4ffd51fb3..d0dcc0cc05 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -162,6 +162,7 @@ struct cpfl_repr {
struct cpfl_repr_id repr_id;
struct rte_ether_addr mac_addr;
struct cpfl_vport_info *vport_info;
+   bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index d2558c39a8..4d15a26c80 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -308,6 +308,72 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev 
*dev,
return 0;
 }
 
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+   if ((host_id != CPFL_HOST_ID_HOST &&
+host_id != CPFL_HOST_ID_ACC) ||
+   (pf_id != CPFL_PF_TYPE_APF &&
+pf_id != CPFL_PF_TYPE_CPF))
+   return -EINVAL;
+
+   static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = 
{
+   [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+   [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+   [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+   [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+   };
+
+   return func_id_map[host_id][pf_id];
+}
+
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+   struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+   struct rte_eth_link *dev_link = ðdev->data->dev_link;
+   struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+   struct cpchnl2_get_vport_info_response response;
+   struct cpfl_vport_id vi;
+   int ret;
+
+   if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+   PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+   return -EINVAL;
+   }
+
+   if (wait_to_complete) {
+   if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) {
+   /* PF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+   vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, 
repr->repr_id.pf_id);
+   vi.vf_id = 0;
+   } else {
+   /* VF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+   vi.pf_id = CPFL_HOST0_APF;
+   vi.vf_id = repr->repr_id.vf_id;
+   }
+   ret = cpfl_cc_vport_info_get(adapter, 
&repr->vport_info->vport.vport,
+&vi, &response);
+   if (ret < 0) {
+   PMD_INIT_LOG(ERR, "Fail to get vport info.");
+   return ret;
+   }
+
+   if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+   repr->func_up = true;
+   else
+   repr->func_up = false;
+   }
+
+   dev_link->link_status = repr->func_up ?
+   RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+   return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
.dev_start  = cpfl_repr_dev_start,
.dev_stop   = cpfl_repr_dev_stop,
@@ -317,6 +383,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
.rx_queue_setup = cpfl_repr_rx_queue_setup,
.tx_queue_setup = cpfl_repr_tx_queue_setup,
+
+   .link_update= cpfl_repr_link_update,
 };
 
 static int
@@ -331,6 +399,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
repr->itf.adapter = adapter;
repr->itf.data = eth_dev->data;
+   if (repr->vport_info->vport.info.vport_status == 
CPCHNL2_VPORT_STATUS_ENABLED)
+   repr->func_up = true;
 
eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
@@ -349,25 +419,6 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
 }
 
-static int
-cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
-{
-   if ((host_id != CPFL_HOST_ID_HOST &&
-host_id != CPFL_HOST_ID_ACC) ||
-   (pf_id != CPFL_PF_TYPE_APF &&
-pf_id != CPFL_PF_TYPE_CPF))
-   return -EINVAL;
-
-   static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = 
{
-   [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
-   [CPFL_HOST_I

RE: [PATCH v2 0/2] ethdev: add random item support

2023-09-12 Thread Michael Baum

On Mon, 11 Sep 2023 18:55:45 +0200
Morten Brørup  wrote: 
> 
> > From: Michael Baum [mailto:michae...@nvidia.com]
> > Sent: Monday, 11 September 2023 08.42
> >
> > Add support for matching random value using new "rte_flow_item_random"
> > structure.
> 
> I get it. It can be used for things like stochastic sampling.
> 
> However, it doesn't provide support for e.g. 1/100 or 1/500.

It supports those values using "last" field in addition to "spec" and "mask".
It matches a range of random values for any requested percentage.

> 
> So here's a suggestion:
> 
> Instead of "value" (which is irrelevant) & "mask" (which is what really 
> controls the
> probability), wouldn't it be better if "value" was an inverse probability (and
> "mask" was irrelevant)? E.g. value=500 means that there is a 1 of 500 
> probability
> of a match.
> 
> Would this kind of random item better serve the purpose?
> 
> Or is the random item, in its current form, also required for other purposes?

The random item is more generic than stochastic sampling, it can implement 
distribution. 
When application wants to distribute the traffic between ports/queues, it can 
match all random value range with send to port/queue actions.


[PATCH] eal: fix modify data area after memset

2023-09-12 Thread Fengnan Chang
Let's look at this path:
malloc_elem_free
   ->malloc_elem_join_adjacent_free
  ->join_elem(elem, elem->next)

0. cur elem's pad > 0
1. data area memset in malloc_elem_free first.
2. next elem is free, try to join cur elem and next.
3. in join_elem, try to modify inner->size, this address had
memset in step 1, it casue the content of addrees become non-zero.

If user call rte_zmalloc, and pick this elem, it can't get all
zero'd memory.

Fixes: 2808a12cc053 (malloc: fix memory element size in case of padding)
Signed-off-by: Fengnan Chang 
---
 lib/eal/common/malloc_elem.c | 10 +-
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/lib/eal/common/malloc_elem.c b/lib/eal/common/malloc_elem.c
index 619c040aa3..93a23fa8d4 100644
--- a/lib/eal/common/malloc_elem.c
+++ b/lib/eal/common/malloc_elem.c
@@ -492,7 +492,7 @@ malloc_elem_alloc(struct malloc_elem *elem, size_t size, 
unsigned align,
  * be contiguous in memory.
  */
 static inline void
-join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
+join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2, bool 
update_inner)
 {
struct malloc_elem *next = elem2->next;
elem1->size += elem2->size;
@@ -502,7 +502,7 @@ join_elem(struct malloc_elem *elem1, struct malloc_elem 
*elem2)
elem1->heap->last = elem1;
elem1->next = next;
elem1->dirty |= elem2->dirty;
-   if (elem1->pad) {
+   if (elem1->pad && update_inner) {
struct malloc_elem *inner = RTE_PTR_ADD(elem1, elem1->pad);
inner->size = elem1->size - elem1->pad;
}
@@ -526,7 +526,7 @@ malloc_elem_join_adjacent_free(struct malloc_elem *elem)
 
/* remove from free list, join to this one */
malloc_elem_free_list_remove(elem->next);
-   join_elem(elem, elem->next);
+   join_elem(elem, elem->next, false);
 
/* erase header, trailer and pad */
memset(erase, MALLOC_POISON, erase_len);
@@ -550,7 +550,7 @@ malloc_elem_join_adjacent_free(struct malloc_elem *elem)
malloc_elem_free_list_remove(elem->prev);
 
new_elem = elem->prev;
-   join_elem(new_elem, elem);
+   join_elem(new_elem, elem, false);
 
/* erase header, trailer and pad */
memset(erase, MALLOC_POISON, erase_len);
@@ -683,7 +683,7 @@ malloc_elem_resize(struct malloc_elem *elem, size_t size)
 * join the two
 */
malloc_elem_free_list_remove(elem->next);
-   join_elem(elem, elem->next);
+   join_elem(elem, elem->next, true);
 
if (elem->size - new_size >= MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD) {
/* now we have a big block together. Lets cut it down a bit, by 
splitting */
-- 
2.20.1



RE: [PATCH] bus/vdev: fix devargs memory leak

2023-09-12 Thread Ling, WeiX
> -Original Message-
> From: Mingjin Ye 
> Sent: Friday, September 1, 2023 3:24 PM
> To: dev@dpdk.org
> Cc: Yang, Qiming ; Zhou, YidingX
> ; Ye, MingjinX ;
> sta...@dpdk.org; Burakov, Anatoly 
> Subject: [PATCH] bus/vdev: fix devargs memory leak
> 
> When a device is created by a secondary process, an empty devargs is
> temporarily generated and bound to it. This causes the device to not be
> associated with the correct devargs, and the empty devargs are not released
> when the resource is freed.
> 
> This patch fixes the issue by matching the devargs when inserting a device in
> secondary process.
> 
> Fixes: dda987315ca2 ("vdev: make virtual bus use its device struct")
> Fixes: a16040453968 ("eal: extract vdev infra")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Mingjin Ye 
> ---

Tested-by: Wei Ling 


[PATCH v6 01/10] net/cpfl: refine devargs parse and process

2023-09-12 Thread beilei . xing
From: Beilei Xing 

1. Keep devargs in adapter.
2. Refine handling the case with no vport be specified in devargs.
3. Separate devargs parse and devargs process

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 154 ++---
 drivers/net/cpfl/cpfl_ethdev.h |   1 +
 2 files changed, 84 insertions(+), 71 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c4ca9343c3..46b3a52e49 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1407,12 +1407,12 @@ parse_bool(const char *key, const char *value, void 
*args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter,
-  struct cpfl_devargs *cpfl_args)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
struct rte_devargs *devargs = pci_dev->device.devargs;
+   struct cpfl_devargs *cpfl_args = &adapter->devargs;
struct rte_kvargs *kvlist;
-   int i, ret;
+   int ret;
 
cpfl_args->req_vport_nb = 0;
 
@@ -1445,31 +1445,6 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adap
if (ret != 0)
goto fail;
 
-   /* check parsed devargs */
-   if (adapter->cur_vport_nb + cpfl_args->req_vport_nb >
-   adapter->max_vport_nb) {
-   PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
-adapter->max_vport_nb);
-   ret = -EINVAL;
-   goto fail;
-   }
-
-   for (i = 0; i < cpfl_args->req_vport_nb; i++) {
-   if (cpfl_args->req_vports[i] > adapter->max_vport_nb - 1) {
-   PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 
~ %d",
-cpfl_args->req_vports[i], 
adapter->max_vport_nb - 1);
-   ret = -EINVAL;
-   goto fail;
-   }
-
-   if (adapter->cur_vports & RTE_BIT32(cpfl_args->req_vports[i])) {
-   PMD_INIT_LOG(ERR, "Vport %d has been requested",
-cpfl_args->req_vports[i]);
-   ret = -EINVAL;
-   goto fail;
-   }
-   }
-
 fail:
rte_kvargs_free(kvlist);
return ret;
@@ -1915,15 +1890,79 @@ cpfl_adapter_ext_deinit(struct cpfl_adapter_ext 
*adapter)
adapter->vports = NULL;
 }
 
+static int
+cpfl_vport_devargs_process(struct cpfl_adapter_ext *adapter)
+{
+   struct cpfl_devargs *devargs = &adapter->devargs;
+   int i;
+
+   /* refine vport number, at least 1 vport */
+   if (devargs->req_vport_nb == 0) {
+   devargs->req_vport_nb = 1;
+   devargs->req_vports[0] = 0;
+   }
+
+   /* check parsed devargs */
+   if (adapter->cur_vport_nb + devargs->req_vport_nb >
+   adapter->max_vport_nb) {
+   PMD_INIT_LOG(ERR, "Total vport number can't be > %d",
+adapter->max_vport_nb);
+   return -EINVAL;
+   }
+
+   for (i = 0; i < devargs->req_vport_nb; i++) {
+   if (devargs->req_vports[i] > adapter->max_vport_nb - 1) {
+   PMD_INIT_LOG(ERR, "Invalid vport id %d, it should be 0 
~ %d",
+devargs->req_vports[i], 
adapter->max_vport_nb - 1);
+   return -EINVAL;
+   }
+
+   if (adapter->cur_vports & RTE_BIT32(devargs->req_vports[i])) {
+   PMD_INIT_LOG(ERR, "Vport %d has been requested",
+devargs->req_vports[i]);
+   return -EINVAL;
+   }
+   }
+
+   return 0;
+}
+
+static int
+cpfl_vport_create(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+{
+   struct cpfl_vport_param vport_param;
+   char name[RTE_ETH_NAME_MAX_LEN];
+   int ret, i;
+
+   for (i = 0; i < adapter->devargs.req_vport_nb; i++) {
+   vport_param.adapter = adapter;
+   vport_param.devarg_id = adapter->devargs.req_vports[i];
+   vport_param.idx = cpfl_vport_idx_alloc(adapter);
+   if (vport_param.idx == CPFL_INVALID_VPORT_IDX) {
+   PMD_INIT_LOG(ERR, "No space for vport %u", 
vport_param.devarg_id);
+   break;
+   }
+   snprintf(name, sizeof(name), "net_%s_vport_%d",
+pci_dev->device.name,
+adapter->devargs.req_vports[i]);
+   ret = rte_eth_dev_create(&pci_dev->device, name,
+   sizeof(struct cpfl_vport),
+   NULL, NULL, cpfl_dev_vport_init,
+   &vport_param);
+   if (ret != 0)
+   P

[PATCH v6 00/10] net/cpfl: support port representor

2023-09-12 Thread beilei . xing
From: Beilei Xing 

1. code refine for representor support
2. support port representor

v6 changes:
 - move some change from 08/10 to 06/10 patch
v5 changes:
 - refine cpfl_vport_info structure
 - refine cpfl_repr_link_update function
 - refine cpfl_repr_create function
v4 changes:
 - change the patch order
 - merge two patches
 - revert enum change
v3 changes:
 - Refine commit log.
 - Add macro and enum.
 - Refine doc.
 - Refine error handling.
v2 changes:
 - Remove representor data path.
 - Fix coding style.

Beilei Xing (10):
  net/cpfl: refine devargs parse and process
  net/cpfl: introduce interface structure
  net/cpfl: refine handle virtual channel message
  net/cpfl: introduce CP channel API
  net/cpfl: enable vport mapping
  net/cpfl: support vport list/info get
  net/cpfl: parse representor devargs
  net/cpfl: support probe again
  net/cpfl: create port representor
  net/cpfl: support link update for representor

 doc/guides/nics/cpfl.rst   |  36 ++
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +
 drivers/net/cpfl/cpfl_ethdev.c | 621 
 drivers/net/cpfl/cpfl_ethdev.h |  91 +++-
 drivers/net/cpfl/cpfl_representor.c| 632 +
 drivers/net/cpfl/cpfl_representor.h|  26 +
 drivers/net/cpfl/cpfl_vchnl.c  |  72 +++
 drivers/net/cpfl/meson.build   |   4 +-
 9 files changed, 1719 insertions(+), 106 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

-- 
2.34.1



[PATCH v6 02/10] net/cpfl: introduce interface structure

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Introduce cplf interface structure to distinguish vport and port
representor.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c |  3 +++
 drivers/net/cpfl/cpfl_ethdev.h | 15 +++
 2 files changed, 18 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 46b3a52e49..92fe92c00f 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1803,6 +1803,9 @@ cpfl_dev_vport_init(struct rte_eth_dev *dev, void 
*init_params)
goto err;
}
 
+   cpfl_vport->itf.type = CPFL_ITF_TYPE_VPORT;
+   cpfl_vport->itf.adapter = adapter;
+   cpfl_vport->itf.data = dev->data;
adapter->vports[param->idx] = cpfl_vport;
adapter->cur_vports |= RTE_BIT32(param->devarg_id);
adapter->cur_vport_nb++;
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index b637bf2e45..feb1edc4b8 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -86,7 +86,18 @@ struct p2p_queue_chunks_info {
uint32_t rx_buf_qtail_spacing;
 };
 
+enum cpfl_itf_type {
+   CPFL_ITF_TYPE_VPORT,
+};
+
+struct cpfl_itf {
+   enum cpfl_itf_type type;
+   struct cpfl_adapter_ext *adapter;
+   void *data;
+};
+
 struct cpfl_vport {
+   struct cpfl_itf itf;
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
 
@@ -124,5 +135,9 @@ TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p) \
container_of((p), struct cpfl_adapter_ext, base)
+#define CPFL_DEV_TO_VPORT(dev) \
+   ((struct cpfl_vport *)((dev)->data->dev_private))
+#define CPFL_DEV_TO_ITF(dev)   \
+   ((struct cpfl_itf *)((dev)->data->dev_private))
 
 #endif /* _CPFL_ETHDEV_H_ */
-- 
2.34.1



[PATCH v6 03/10] net/cpfl: refine handle virtual channel message

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Refine handle virtual channel event message.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 48 +-
 1 file changed, 24 insertions(+), 24 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 92fe92c00f..31a5822d2c 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1450,40 +1450,52 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adap
return ret;
 }
 
-static struct idpf_vport *
+static struct cpfl_vport *
 cpfl_find_vport(struct cpfl_adapter_ext *adapter, uint32_t vport_id)
 {
-   struct idpf_vport *vport = NULL;
+   struct cpfl_vport *vport = NULL;
int i;
 
for (i = 0; i < adapter->cur_vport_nb; i++) {
-   vport = &adapter->vports[i]->base;
-   if (vport->vport_id != vport_id)
+   vport = adapter->vports[i];
+   if (vport == NULL)
+   continue;
+   if (vport->base.vport_id != vport_id)
continue;
else
return vport;
}
 
-   return vport;
+   return NULL;
 }
 
 static void
-cpfl_handle_event_msg(struct idpf_vport *vport, uint8_t *msg, uint16_t msglen)
+cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, 
uint16_t msglen)
 {
struct virtchnl2_event *vc_event = (struct virtchnl2_event *)msg;
-   struct rte_eth_dev_data *data = vport->dev_data;
-   struct rte_eth_dev *dev = &rte_eth_devices[data->port_id];
+   struct cpfl_vport *vport;
+   struct rte_eth_dev_data *data;
+   struct rte_eth_dev *dev;
 
if (msglen < sizeof(struct virtchnl2_event)) {
PMD_DRV_LOG(ERR, "Error event");
return;
}
 
+   vport = cpfl_find_vport(adapter, vc_event->vport_id);
+   if (!vport) {
+   PMD_DRV_LOG(ERR, "Can't find vport.");
+   return;
+   }
+
+   data = vport->itf.data;
+   dev = &rte_eth_devices[data->port_id];
+
switch (vc_event->event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
PMD_DRV_LOG(DEBUG, "VIRTCHNL2_EVENT_LINK_CHANGE");
-   vport->link_up = !!(vc_event->link_status);
-   vport->link_speed = vc_event->link_speed;
+   vport->base.link_up = !!(vc_event->link_status);
+   vport->base.link_speed = vc_event->link_speed;
cpfl_dev_link_update(dev, 0);
break;
default:
@@ -1498,10 +1510,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext 
*adapter)
struct idpf_adapter *base = &adapter->base;
struct idpf_dma_mem *dma_mem = NULL;
struct idpf_hw *hw = &base->hw;
-   struct virtchnl2_event *vc_event;
struct idpf_ctlq_msg ctlq_msg;
enum idpf_mbx_opc mbx_op;
-   struct idpf_vport *vport;
uint16_t pending = 1;
uint32_t vc_op;
int ret;
@@ -1523,18 +1533,8 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext 
*adapter)
switch (mbx_op) {
case idpf_mbq_opc_send_msg_to_peer_pf:
if (vc_op == VIRTCHNL2_OP_EVENT) {
-   if (ctlq_msg.data_len < sizeof(struct 
virtchnl2_event)) {
-   PMD_DRV_LOG(ERR, "Error event");
-   return;
-   }
-   vc_event = (struct virtchnl2_event 
*)base->mbx_resp;
-   vport = cpfl_find_vport(adapter, 
vc_event->vport_id);
-   if (!vport) {
-   PMD_DRV_LOG(ERR, "Can't find vport.");
-   return;
-   }
-   cpfl_handle_event_msg(vport, base->mbx_resp,
- ctlq_msg.data_len);
+   cpfl_handle_vchnl_event_msg(adapter, 
adapter->base.mbx_resp,
+   ctlq_msg.data_len);
} else {
if (vc_op == base->pend_cmd)
notify_cmd(base, base->cmd_retval);
-- 
2.34.1



[PATCH v6 05/10] net/cpfl: enable vport mapping

2023-09-12 Thread beilei . xing
From: Beilei Xing 

1. Handle cpchnl event for vport create/destroy
2. Use hash table to store vport_id to vport_info mapping
3. Use spinlock for thread safe.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 157 +
 drivers/net/cpfl/cpfl_ethdev.h |  21 -
 drivers/net/cpfl/meson.build   |   2 +-
 3 files changed, 177 insertions(+), 3 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 31a5822d2c..a7a045ace4 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -10,6 +10,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "cpfl_ethdev.h"
 #include "cpfl_rxtx.h"
@@ -1504,6 +1505,108 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext 
*adapter, uint8_t *msg, uint
}
 }
 
+static int
+cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vport_identity,
+  struct cpchnl2_event_vport_created *vport_created)
+{
+   struct cpfl_vport_info *info = NULL;
+   int ret;
+
+   rte_spinlock_lock(&adapter->vport_map_lock);
+   ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
+   if (ret >= 0) {
+   PMD_DRV_LOG(WARNING, "vport already exist, overwrite info 
anyway");
+   /* overwrite info */
+   if (info)
+   info->vport = *vport_created;
+   goto fini;
+   }
+
+   info = rte_zmalloc(NULL, sizeof(*info), 0);
+   if (info == NULL) {
+   PMD_DRV_LOG(ERR, "Failed to alloc memory for vport map info");
+   ret = -ENOMEM;
+   goto err;
+   }
+
+   info->vport = *vport_created;
+
+   ret = rte_hash_add_key_data(adapter->vport_map_hash, vport_identity, 
info);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "Failed to add vport map into hash");
+   rte_free(info);
+   goto err;
+   }
+
+fini:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return 0;
+err:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return ret;
+}
+
+static int
+cpfl_vport_info_destroy(struct cpfl_adapter_ext *adapter, struct cpfl_vport_id 
*vport_identity)
+{
+   struct cpfl_vport_info *info;
+   int ret;
+
+   rte_spinlock_lock(&adapter->vport_map_lock);
+   ret = rte_hash_lookup_data(adapter->vport_map_hash, vport_identity, 
(void **)&info);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "vport id doesn't exist");
+   goto err;
+   }
+
+   rte_hash_del_key(adapter->vport_map_hash, vport_identity);
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   rte_free(info);
+
+   return 0;
+
+err:
+   rte_spinlock_unlock(&adapter->vport_map_lock);
+   return ret;
+}
+
+static void
+cpfl_handle_cpchnl_event_msg(struct cpfl_adapter_ext *adapter, uint8_t *msg, 
uint16_t msglen)
+{
+   struct cpchnl2_event_info *cpchnl2_event = (struct cpchnl2_event_info 
*)msg;
+   struct cpchnl2_event_vport_created *vport_created;
+   struct cpfl_vport_id vport_identity = { 0 };
+
+   if (msglen < sizeof(struct cpchnl2_event_info)) {
+   PMD_DRV_LOG(ERR, "Error event");
+   return;
+   }
+
+   switch (cpchnl2_event->header.type) {
+   case CPCHNL2_EVENT_VPORT_CREATED:
+   vport_identity.vport_id = 
cpchnl2_event->data.vport_created.vport.vport_id;
+   vport_created = &cpchnl2_event->data.vport_created;
+   vport_identity.func_type = vport_created->info.func_type;
+   vport_identity.pf_id = vport_created->info.pf_id;
+   vport_identity.vf_id = vport_created->info.vf_id;
+   if (cpfl_vport_info_create(adapter, &vport_identity, 
vport_created))
+   PMD_DRV_LOG(WARNING, "Failed to handle 
CPCHNL2_EVENT_VPORT_CREATED");
+   break;
+   case CPCHNL2_EVENT_VPORT_DESTROYED:
+   vport_identity.vport_id = 
cpchnl2_event->data.vport_destroyed.vport.vport_id;
+   vport_identity.func_type = 
cpchnl2_event->data.vport_destroyed.func.func_type;
+   vport_identity.pf_id = 
cpchnl2_event->data.vport_destroyed.func.pf_id;
+   vport_identity.vf_id = 
cpchnl2_event->data.vport_destroyed.func.vf_id;
+   if (cpfl_vport_info_destroy(adapter, &vport_identity))
+   PMD_DRV_LOG(WARNING, "Failed to handle 
CPCHNL2_EVENT_VPORT_DESTROY");
+   break;
+   default:
+   PMD_DRV_LOG(ERR, " unknown event received %u", 
cpchnl2_event->header.type);
+   break;
+   }
+}
+
 static void
 cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
 {
@@ -1535,6 +1638,9 @@ cpfl_handle_virtchnl_msg(struct cpfl_adapter_ext *adapter)
if (vc_op == VIRTCHNL2_OP_EVENT) {
 

[PATCH v6 04/10] net/cpfl: introduce CP channel API

2023-09-12 Thread beilei . xing
From: Beilei Xing 

The CPCHNL2 defines the API (v2) used for communication between the
CPF driver and its on-chip management software. The CPFL PMD is a
specific CPF driver to utilize CPCHNL2 for device configuration and
event probing.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_cpchnl.h | 340 +
 1 file changed, 340 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_cpchnl.h

diff --git a/drivers/net/cpfl/cpfl_cpchnl.h b/drivers/net/cpfl/cpfl_cpchnl.h
new file mode 100644
index 00..2eefcbcc10
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_cpchnl.h
@@ -0,0 +1,340 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#ifndef _CPFL_CPCHNL_H_
+#define _CPFL_CPCHNL_H_
+
+/** @brief  Command Opcodes
+ *  Values are to be different from virtchnl.h opcodes
+ */
+enum cpchnl2_ops {
+   /* vport info */
+   CPCHNL2_OP_GET_VPORT_LIST   = 0x8025,
+   CPCHNL2_OP_GET_VPORT_INFO   = 0x8026,
+
+   /* DPHMA Event notifications */
+   CPCHNL2_OP_EVENT= 0x8050,
+};
+
+/* Note! This affects the size of structs below */
+#define CPCHNL2_MAX_TC_AMOUNT  8
+
+#define CPCHNL2_ETH_LENGTH_OF_ADDRESS  6
+
+#define CPCHNL2_FUNC_TYPE_PF   0
+#define CPCHNL2_FUNC_TYPE_SRIOV1
+
+/* vport statuses - must match the DB ones - see enum cp_vport_status*/
+#define CPCHNL2_VPORT_STATUS_CREATED   0
+#define CPCHNL2_VPORT_STATUS_ENABLED   1
+#define CPCHNL2_VPORT_STATUS_DISABLED  2
+#define CPCHNL2_VPORT_STATUS_DESTROYED 3
+
+/* Queue Groups Extension */
+/**/
+
+#define MAX_Q_REGIONS 16
+/* TBD - with current structure sizes, in order not to exceed 4KB ICQH buffer
+ * no more than 11 queue groups are allowed per a single vport..
+ * More will be possible only with future msg fragmentation.
+ */
+#define MAX_Q_VPORT_GROUPS 11
+
+#define CPCHNL2_CHECK_STRUCT_LEN(n, X) enum static_assert_enum_##X \
+   { static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+struct cpchnl2_queue_chunk {
+   u32 type;  /* 0:QUEUE_TYPE_TX, 1:QUEUE_TYPE_RX */ /* enum 
nsl_lan_queue_type */
+   u32 start_queue_id;
+   u32 num_queues;
+   u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_queue_chunk);
+
+/* structure to specify several chunks of contiguous queues */
+struct cpchnl2_queue_grp_chunks {
+   u16 num_chunks;
+   u8 reserved[6];
+   struct cpchnl2_queue_chunk chunks[MAX_Q_REGIONS];
+};
+CPCHNL2_CHECK_STRUCT_LEN(264, cpchnl2_queue_grp_chunks);
+
+struct cpchnl2_rx_queue_group_info {
+   /* User can ask to update rss_lut size originally allocated
+* by CreateVport command. New size will be returned if allocation 
succeeded,
+* otherwise original rss_size from CreateVport will be returned.
+*/
+   u16 rss_lut_size;
+   u8 pad[6]; /*Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_rx_queue_group_info);
+
+struct cpchnl2_tx_queue_group_info {
+   u8 tx_tc; /*TX TC queue group will be connected to*/
+   /* Each group can have its own priority, value 0-7, while each group 
with unique
+* priority is strict priority. It can be single set of queue groups 
which configured with
+* same priority, then they are assumed part of WFQ arbitration group 
and are expected to be
+* assigned with weight.
+*/
+   u8 priority;
+   /* Determines if queue group is expected to be Strict Priority 
according to its priority */
+   u8 is_sp;
+   u8 pad;
+   /* Peak Info Rate Weight in case Queue Group is part of WFQ arbitration 
set.
+* The weights of the groups are independent of each other. Possible 
values: 1-200.
+*/
+   u16 pir_weight;
+   /* Future extension purpose for CIR only */
+   u8 cir_pad[2];
+   u8 pad2[8]; /* Future extension purpose*/
+};
+CPCHNL2_CHECK_STRUCT_LEN(16, cpchnl2_tx_queue_group_info);
+
+struct cpchnl2_queue_group_id {
+   /* Queue group ID - depended on it's type:
+* Data & p2p - is an index which is relative to Vport.
+* Config & Mailbox - is an ID which is relative to func.
+* This ID is used in future calls, i.e. delete.
+* Requested by host and assigned by Control plane.
+*/
+   u16 queue_group_id;
+   /* Functional type: see CPCHNL2_QUEUE_GROUP_TYPE definitions */
+   u16 queue_group_type;
+   u8 pad[4];
+};
+CPCHNL2_CHECK_STRUCT_LEN(8, cpchnl2_queue_group_id);
+
+struct cpchnl2_queue_group_info {
+   /* IN */
+   struct cpchnl2_queue_group_id qg_id;
+
+   /* IN, Number of queues of different types in the group. */
+   u16 num_tx_q;
+   u16 num_tx_complq;
+   u16 num_rx_q;
+   u16 num_rx_bufq;
+
+   struct cpchnl2_tx_queue_group_info tx_q_grp_info;
+   struct cpchnl2_rx_queue_group_info rx_q_grp_info

[PATCH v6 06/10] net/cpfl: support vport list/info get

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Support cp channel ops CPCHNL2_OP_CPF_GET_VPORT_LIST and
CPCHNL2_OP_CPF_GET_VPORT_INFO.

Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h |  8 
 drivers/net/cpfl/cpfl_vchnl.c  | 72 ++
 drivers/net/cpfl/meson.build   |  1 +
 3 files changed, 81 insertions(+)
 create mode 100644 drivers/net/cpfl/cpfl_vchnl.c

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 7d70ee13f2..eb51a12fac 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -148,6 +148,14 @@ struct cpfl_adapter_ext {
 
 TAILQ_HEAD(cpfl_adapter_list, cpfl_adapter_ext);
 
+int cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_list_response *response);
+int cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_info_response *response);
+
 #define CPFL_DEV_TO_PCI(eth_dev)   \
RTE_DEV_TO_PCI((eth_dev)->device)
 #define CPFL_ADAPTER_TO_EXT(p) \
diff --git a/drivers/net/cpfl/cpfl_vchnl.c b/drivers/net/cpfl/cpfl_vchnl.c
new file mode 100644
index 00..a21a4a451f
--- /dev/null
+++ b/drivers/net/cpfl/cpfl_vchnl.c
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2023 Intel Corporation
+ */
+
+#include "cpfl_ethdev.h"
+#include 
+
+int
+cpfl_cc_vport_list_get(struct cpfl_adapter_ext *adapter,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_list_response *response)
+{
+   struct cpchnl2_get_vport_list_request request;
+   struct idpf_cmd_info args;
+   int err;
+
+   memset(&request, 0, sizeof(request));
+   request.func_type = vi->func_type;
+   request.pf_id = vi->pf_id;
+   request.vf_id = vi->vf_id;
+
+   memset(&args, 0, sizeof(args));
+   args.ops = CPCHNL2_OP_GET_VPORT_LIST;
+   args.in_args = (uint8_t *)&request;
+   args.in_args_size = sizeof(struct cpchnl2_get_vport_list_request);
+   args.out_buffer = adapter->base.mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(&adapter->base, &args);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to execute command of 
CPCHNL2_OP_GET_VPORT_LIST");
+   return err;
+   }
+
+   rte_memcpy(response, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+
+   return 0;
+}
+
+int
+cpfl_cc_vport_info_get(struct cpfl_adapter_ext *adapter,
+  struct cpchnl2_vport_id *vport_id,
+  struct cpfl_vport_id *vi,
+  struct cpchnl2_get_vport_info_response *response)
+{
+   struct cpchnl2_get_vport_info_request request;
+   struct idpf_cmd_info args;
+   int err;
+
+   request.vport.vport_id = vport_id->vport_id;
+   request.vport.vport_type = vport_id->vport_type;
+   request.func.func_type = vi->func_type;
+   request.func.pf_id = vi->pf_id;
+   request.func.vf_id = vi->vf_id;
+
+   memset(&args, 0, sizeof(args));
+   args.ops = CPCHNL2_OP_GET_VPORT_INFO;
+   args.in_args = (uint8_t *)&request;
+   args.in_args_size = sizeof(struct cpchnl2_get_vport_info_request);
+   args.out_buffer = adapter->base.mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(&adapter->base, &args);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to execute command of 
CPCHNL2_OP_GET_VPORT_INFO");
+   return err;
+   }
+
+   rte_memcpy(response, args.out_buffer, sizeof(*response));
+
+   return 0;
+}
diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build
index 28167bb81d..2f0f5d8434 100644
--- a/drivers/net/cpfl/meson.build
+++ b/drivers/net/cpfl/meson.build
@@ -16,6 +16,7 @@ deps += ['hash', 'common_idpf']
 sources = files(
 'cpfl_ethdev.c',
 'cpfl_rxtx.c',
+'cpfl_vchnl.c',
 )
 
 if arch_subdir == 'x86'
-- 
2.34.1



[PATCH v6 08/10] net/cpfl: support probe again

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Only representor will be parsed for probe again.

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 69 +++---
 1 file changed, 56 insertions(+), 13 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b6fcfe4275..428d87b960 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -26,7 +26,7 @@ rte_spinlock_t cpfl_adapter_lock;
 struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
-static const char * const cpfl_valid_args[] = {
+static const char * const cpfl_valid_args_first[] = {
CPFL_REPRESENTOR,
CPFL_TX_SINGLE_Q,
CPFL_RX_SINGLE_Q,
@@ -34,6 +34,11 @@ static const char * const cpfl_valid_args[] = {
NULL
 };
 
+static const char * const cpfl_valid_args_again[] = {
+   CPFL_REPRESENTOR,
+   NULL
+};
+
 uint32_t cpfl_supported_speeds[] = {
RTE_ETH_SPEED_NUM_NONE,
RTE_ETH_SPEED_NUM_10M,
@@ -1533,7 +1538,7 @@ parse_repr(const char *key __rte_unused, const char 
*value, void *args)
 }
 
 static int
-cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter, bool first)
 {
struct rte_devargs *devargs = pci_dev->device.devargs;
struct cpfl_devargs *cpfl_args = &adapter->devargs;
@@ -1545,7 +1550,8 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adap
if (devargs == NULL)
return 0;
 
-   kvlist = rte_kvargs_parse(devargs->args, cpfl_valid_args);
+   kvlist = rte_kvargs_parse(devargs->args,
+   first ? cpfl_valid_args_first : cpfl_valid_args_again);
if (kvlist == NULL) {
PMD_INIT_LOG(ERR, "invalid kvargs key");
return -EINVAL;
@@ -1562,6 +1568,9 @@ cpfl_parse_devargs(struct rte_pci_device *pci_dev, struct 
cpfl_adapter_ext *adap
if (ret != 0)
goto fail;
 
+   if (!first)
+   return 0;
+
ret = rte_kvargs_process(kvlist, CPFL_VPORT, &parse_vport,
 cpfl_args);
if (ret != 0)
@@ -2291,18 +2300,11 @@ cpfl_vport_create(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *adapt
 }
 
 static int
-cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
-  struct rte_pci_device *pci_dev)
+cpfl_pci_probe_first(struct rte_pci_device *pci_dev)
 {
struct cpfl_adapter_ext *adapter;
int retval;
 
-   if (!cpfl_adapter_list_init) {
-   rte_spinlock_init(&cpfl_adapter_lock);
-   TAILQ_INIT(&cpfl_adapter_list);
-   cpfl_adapter_list_init = true;
-   }
-
adapter = rte_zmalloc("cpfl_adapter_ext",
  sizeof(struct cpfl_adapter_ext), 0);
if (adapter == NULL) {
@@ -2310,7 +2312,7 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
return -ENOMEM;
}
 
-   retval = cpfl_parse_devargs(pci_dev, adapter);
+   retval = cpfl_parse_devargs(pci_dev, adapter, true);
if (retval != 0) {
PMD_INIT_LOG(ERR, "Failed to parse private devargs");
return retval;
@@ -2355,6 +2357,46 @@ cpfl_pci_probe(struct rte_pci_driver *pci_drv 
__rte_unused,
return retval;
 }
 
+static int
+cpfl_pci_probe_again(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
+{
+   int ret;
+
+   ret = cpfl_parse_devargs(pci_dev, adapter, false);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to parse private devargs");
+   return ret;
+   }
+
+   ret = cpfl_repr_devargs_process(adapter);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to process reprenstor devargs");
+   return ret;
+   }
+
+   return 0;
+}
+
+static int
+cpfl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+  struct rte_pci_device *pci_dev)
+{
+   struct cpfl_adapter_ext *adapter;
+
+   if (!cpfl_adapter_list_init) {
+   rte_spinlock_init(&cpfl_adapter_lock);
+   TAILQ_INIT(&cpfl_adapter_list);
+   cpfl_adapter_list_init = true;
+   }
+
+   adapter = cpfl_find_adapter_ext(pci_dev);
+
+   if (adapter == NULL)
+   return cpfl_pci_probe_first(pci_dev);
+   else
+   return cpfl_pci_probe_again(pci_dev, adapter);
+}
+
 static int
 cpfl_pci_remove(struct rte_pci_device *pci_dev)
 {
@@ -2377,7 +2419,8 @@ cpfl_pci_remove(struct rte_pci_device *pci_dev)
 
 static struct rte_pci_driver rte_cpfl_pmd = {
.id_table   = pci_id_cpfl_map,
-   .drv_flags  = RTE_PCI_DRV_NEED_MAPPING,
+   .drv_flags  = RTE_PCI_DRV_NEED_MAPPING |
+ RTE_PCI_DRV_PROBE_AGAIN,
.probe  = cpfl_pci_probe,
  

[PATCH v6 07/10] net/cpfl: parse representor devargs

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Format:

[[c]pf]vf

  controller_id:

  0 : host (default)
  1:  acc

  pf_id:

  0 : apf (default)
  1 : cpf

Example:

representor=c0pf0vf[0-3]
  -- host > apf > vf 0,1,2,3
 same as pf0vf[0-3] and vf[0-3] if omit default value.

representor=c0pf0
  -- host > apf
 same as pf0 if omit default value.

representor=c1pf0
  -- accelerator core > apf

multiple representor devargs are supported.
e.g.: create 4 representors for 4 vfs on host APF and one
representor for APF on accelerator core.

  -- representor=vf[0-3],representor=c1pf0

Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 doc/guides/nics/cpfl.rst   |  36 +
 doc/guides/rel_notes/release_23_11.rst |   3 +
 drivers/net/cpfl/cpfl_ethdev.c | 179 +
 drivers/net/cpfl/cpfl_ethdev.h |   8 ++
 4 files changed, 226 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index 39a2b603f3..83a18c3f2e 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -92,6 +92,42 @@ Runtime Configuration
   Then the PMD will configure Tx queue with single queue mode.
   Otherwise, split queue mode is chosen by default.
 
+- ``representor`` (default ``not enabled``)
+
+  The cpfl PMD supports the creation of APF/CPF/VF port representors.
+  Each port representor corresponds to a single function of that device.
+  Using the ``devargs`` option ``representor`` the user can specify
+  which functions to create port representors.
+
+  Format is::
+
+[[c]pf]vf
+
+  Controller_id 0 is host (default), while 1 is accelerator core.
+  Pf_id 0 is APF (default), while 1 is CPF.
+  Default value can be omitted.
+
+  Create 4 representors for 4 vfs on host APF::
+
+-a BDF,representor=c0pf0vf[0-3]
+
+  Or::
+
+-a BDF,representor=pf0vf[0-3]
+
+  Or::
+
+-a BDF,representor=vf[0-3]
+
+  Create a representor for CPF on accelerator core::
+
+-a BDF,representor=c1pf1
+
+  Multiple representor devargs are supported. Create 4 representors for 4
+  vfs on host APF and one representor for CPF on accelerator core::
+
+-a BDF,representor=vf[0-3],representor=c1pf1
+
 
 Driver compilation and testing
 --
diff --git a/doc/guides/rel_notes/release_23_11.rst 
b/doc/guides/rel_notes/release_23_11.rst
index 333e1d95a2..3d9be208d0 100644
--- a/doc/guides/rel_notes/release_23_11.rst
+++ b/doc/guides/rel_notes/release_23_11.rst
@@ -78,6 +78,9 @@ New Features
 * build: Optional libraries can now be selected with the new ``enable_libs``
   build option similarly to the existing ``enable_drivers`` build option.
 
+* **Updated Intel cpfl driver.**
+
+  * Added support for port representor.
 
 Removed Items
 -
diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a7a045ace4..b6fcfe4275 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -13,8 +13,10 @@
 #include 
 
 #include "cpfl_ethdev.h"
+#include 
 #include "cpfl_rxtx.h"
 
+#define CPFL_REPRESENTOR   "representor"
 #define CPFL_TX_SINGLE_Q   "tx_single"
 #define CPFL_RX_SINGLE_Q   "rx_single"
 #define CPFL_VPORT "vport"
@@ -25,6 +27,7 @@ struct cpfl_adapter_list cpfl_adapter_list;
 bool cpfl_adapter_list_init;
 
 static const char * const cpfl_valid_args[] = {
+   CPFL_REPRESENTOR,
CPFL_TX_SINGLE_Q,
CPFL_RX_SINGLE_Q,
CPFL_VPORT,
@@ -1407,6 +1410,128 @@ parse_bool(const char *key, const char *value, void 
*args)
return 0;
 }
 
+static int
+enlist(uint16_t *list, uint16_t *len_list, const uint16_t max_list, uint16_t 
val)
+{
+   uint16_t i;
+
+   for (i = 0; i < *len_list; i++) {
+   if (list[i] == val)
+   return 0;
+   }
+   if (*len_list >= max_list)
+   return -1;
+   list[(*len_list)++] = val;
+   return 0;
+}
+
+static const char *
+process_range(const char *str, uint16_t *list, uint16_t *len_list,
+   const uint16_t max_list)
+{
+   uint16_t lo, hi, val;
+   int result, n = 0;
+   const char *pos = str;
+
+   result = sscanf(str, "%hu%n-%hu%n", &lo, &n, &hi, &n);
+   if (result == 1) {
+   if (enlist(list, len_list, max_list, lo) != 0)
+   return NULL;
+   } else if (result == 2) {
+   if (lo > hi)
+   return NULL;
+   for (val = lo; val <= hi; val++) {
+   if (enlist(list, len_list, max_list, val) != 0)
+   return NULL;
+   }
+   } else {
+   return NULL;
+   }
+   return pos + n;
+}
+
+static const char *
+process_list(const char *str, uint16_t *list, uint16_t *len_list, const 
uint16_t max_list)
+{
+   const char *pos = str;
+
+   if (*pos == '[')
+   pos++;
+   while (1) {
+   pos = process_range(pos, list, len_list, max_list);
+   if (pos == 

[PATCH v6 09/10] net/cpfl: create port representor

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Track representor request in the allowlist.
Representor will only be created for active vport.

Signed-off-by: Jingjing Wu 
Signed-off-by: Qi Zhang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  | 109 +++---
 drivers/net/cpfl/cpfl_ethdev.h  |  37 ++
 drivers/net/cpfl/cpfl_representor.c | 581 
 drivers/net/cpfl/cpfl_representor.h |  26 ++
 drivers/net/cpfl/meson.build|   1 +
 5 files changed, 710 insertions(+), 44 deletions(-)
 create mode 100644 drivers/net/cpfl/cpfl_representor.c
 create mode 100644 drivers/net/cpfl/cpfl_representor.h

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 428d87b960..189072ab33 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1645,7 +1645,7 @@ cpfl_handle_vchnl_event_msg(struct cpfl_adapter_ext 
*adapter, uint8_t *msg, uint
}
 }
 
-static int
+int
 cpfl_vport_info_create(struct cpfl_adapter_ext *adapter,
   struct cpfl_vport_id *vport_identity,
   struct cpchnl2_event_vport_created *vport_created)
@@ -1898,6 +1898,42 @@ cpfl_vport_map_uninit(struct cpfl_adapter_ext *adapter)
rte_hash_free(adapter->vport_map_hash);
 }
 
+static int
+cpfl_repr_allowlist_init(struct cpfl_adapter_ext *adapter)
+{
+   char hname[32];
+
+   snprintf(hname, 32, "%s-repr_al", adapter->name);
+
+   rte_spinlock_init(&adapter->repr_lock);
+
+#define CPFL_REPR_HASH_ENTRY_NUM 2048
+
+   struct rte_hash_parameters params = {
+   .name = hname,
+   .entries = CPFL_REPR_HASH_ENTRY_NUM,
+   .key_len = sizeof(struct cpfl_repr_id),
+   .hash_func = rte_hash_crc,
+   .socket_id = SOCKET_ID_ANY,
+   };
+
+   adapter->repr_allowlist_hash = rte_hash_create(¶ms);
+
+   if (adapter->repr_allowlist_hash == NULL) {
+   PMD_INIT_LOG(ERR, "Failed to create repr allowlist hash");
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static void
+cpfl_repr_allowlist_uninit(struct cpfl_adapter_ext *adapter)
+{
+   rte_hash_free(adapter->repr_allowlist_hash);
+}
+
+
 static int
 cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, struct cpfl_adapter_ext 
*adapter)
 {
@@ -1928,6 +1964,12 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *a
goto err_vport_map_init;
}
 
+   ret = cpfl_repr_allowlist_init(adapter);
+   if (ret) {
+   PMD_INIT_LOG(ERR, "Failed to init representor allowlist");
+   goto err_repr_allowlist_init;
+   }
+
rte_eal_alarm_set(CPFL_ALARM_INTERVAL, cpfl_dev_alarm_handler, adapter);
 
adapter->max_vport_nb = adapter->base.caps.max_vports > 
CPFL_MAX_VPORT_NUM ?
@@ -1952,6 +1994,8 @@ cpfl_adapter_ext_init(struct rte_pci_device *pci_dev, 
struct cpfl_adapter_ext *a
 
 err_vports_alloc:
rte_eal_alarm_cancel(cpfl_dev_alarm_handler, adapter);
+   cpfl_repr_allowlist_uninit(adapter);
+err_repr_allowlist_init:
cpfl_vport_map_uninit(adapter);
 err_vport_map_init:
idpf_adapter_deinit(base);
@@ -2227,48 +2271,6 @@ cpfl_vport_devargs_process(struct cpfl_adapter_ext 
*adapter)
return 0;
 }
 
-static int
-cpfl_repr_devargs_process(struct cpfl_adapter_ext *adapter)
-{
-   struct cpfl_devargs *devargs = &adapter->devargs;
-   int i, j;
-
-   /* check and refine repr args */
-   for (i = 0; i < devargs->repr_args_num; i++) {
-   struct rte_eth_devargs *eth_da = &devargs->repr_args[i];
-
-   /* set default host_id to xeon host */
-   if (eth_da->nb_mh_controllers == 0) {
-   eth_da->nb_mh_controllers = 1;
-   eth_da->mh_controllers[0] = CPFL_HOST_ID_HOST;
-   } else {
-   for (j = 0; j < eth_da->nb_mh_controllers; j++) {
-   if (eth_da->mh_controllers[j] > 
CPFL_HOST_ID_ACC) {
-   PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-eth_da->mh_controllers[j]);
-   return -EINVAL;
-   }
-   }
-   }
-
-   /* set default pf to APF */
-   if (eth_da->nb_ports == 0) {
-   eth_da->nb_ports = 1;
-   eth_da->ports[0] = CPFL_PF_TYPE_APF;
-   } else {
-   for (j = 0; j < eth_da->nb_ports; j++) {
-   if (eth_da->ports[j] > CPFL_PF_TYPE_CPF) {
-   PMD_INIT_LOG(ERR, "Invalid Host ID %d",
-eth_da->ports[j]);
-   return -EINVAL;
-   }
-   }
-   }
-   

[PATCH v6 10/10] net/cpfl: support link update for representor

2023-09-12 Thread beilei . xing
From: Beilei Xing 

Add link update ops for representor.

Signed-off-by: Jingjing Wu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.h  |  1 +
 drivers/net/cpfl/cpfl_representor.c | 89 +++--
 2 files changed, 71 insertions(+), 19 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index a4ffd51fb3..d0dcc0cc05 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -162,6 +162,7 @@ struct cpfl_repr {
struct cpfl_repr_id repr_id;
struct rte_ether_addr mac_addr;
struct cpfl_vport_info *vport_info;
+   bool func_up; /* If the represented function is up */
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index d2558c39a8..4d15a26c80 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -308,6 +308,72 @@ cpfl_repr_tx_queue_setup(__rte_unused struct rte_eth_dev 
*dev,
return 0;
 }
 
+static int
+cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
+{
+   if ((host_id != CPFL_HOST_ID_HOST &&
+host_id != CPFL_HOST_ID_ACC) ||
+   (pf_id != CPFL_PF_TYPE_APF &&
+pf_id != CPFL_PF_TYPE_CPF))
+   return -EINVAL;
+
+   static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = 
{
+   [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
+   [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_CPF] = CPFL_HOST0_CPF_ID,
+   [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_APF] = CPFL_ACC_APF_ID,
+   [CPFL_HOST_ID_ACC][CPFL_PF_TYPE_CPF] = CPFL_ACC_CPF_ID,
+   };
+
+   return func_id_map[host_id][pf_id];
+}
+
+static int
+cpfl_repr_link_update(struct rte_eth_dev *ethdev,
+ int wait_to_complete)
+{
+   struct cpfl_repr *repr = CPFL_DEV_TO_REPR(ethdev);
+   struct rte_eth_link *dev_link = ðdev->data->dev_link;
+   struct cpfl_adapter_ext *adapter = repr->itf.adapter;
+   struct cpchnl2_get_vport_info_response response;
+   struct cpfl_vport_id vi;
+   int ret;
+
+   if (!(ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)) {
+   PMD_INIT_LOG(ERR, "This ethdev is not representor.");
+   return -EINVAL;
+   }
+
+   if (wait_to_complete) {
+   if (repr->repr_id.type == RTE_ETH_REPRESENTOR_PF) {
+   /* PF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_PF;
+   vi.pf_id = cpfl_func_id_get(repr->repr_id.host_id, 
repr->repr_id.pf_id);
+   vi.vf_id = 0;
+   } else {
+   /* VF */
+   vi.func_type = CPCHNL2_FUNC_TYPE_SRIOV;
+   vi.pf_id = CPFL_HOST0_APF;
+   vi.vf_id = repr->repr_id.vf_id;
+   }
+   ret = cpfl_cc_vport_info_get(adapter, 
&repr->vport_info->vport.vport,
+&vi, &response);
+   if (ret < 0) {
+   PMD_INIT_LOG(ERR, "Fail to get vport info.");
+   return ret;
+   }
+
+   if (response.info.vport_status == CPCHNL2_VPORT_STATUS_ENABLED)
+   repr->func_up = true;
+   else
+   repr->func_up = false;
+   }
+
+   dev_link->link_status = repr->func_up ?
+   RTE_ETH_LINK_UP : RTE_ETH_LINK_DOWN;
+
+   return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
.dev_start  = cpfl_repr_dev_start,
.dev_stop   = cpfl_repr_dev_stop,
@@ -317,6 +383,8 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
 
.rx_queue_setup = cpfl_repr_rx_queue_setup,
.tx_queue_setup = cpfl_repr_tx_queue_setup,
+
+   .link_update= cpfl_repr_link_update,
 };
 
 static int
@@ -331,6 +399,8 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
repr->itf.type = CPFL_ITF_TYPE_REPRESENTOR;
repr->itf.adapter = adapter;
repr->itf.data = eth_dev->data;
+   if (repr->vport_info->vport.info.vport_status == 
CPCHNL2_VPORT_STATUS_ENABLED)
+   repr->func_up = true;
 
eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
@@ -349,25 +419,6 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
return cpfl_repr_allowlist_update(adapter, &repr->repr_id, eth_dev);
 }
 
-static int
-cpfl_func_id_get(uint8_t host_id, uint8_t pf_id)
-{
-   if ((host_id != CPFL_HOST_ID_HOST &&
-host_id != CPFL_HOST_ID_ACC) ||
-   (pf_id != CPFL_PF_TYPE_APF &&
-pf_id != CPFL_PF_TYPE_CPF))
-   return -EINVAL;
-
-   static const uint32_t func_id_map[CPFL_HOST_ID_NUM][CPFL_PF_TYPE_NUM] = 
{
-   [CPFL_HOST_ID_HOST][CPFL_PF_TYPE_APF] = CPFL_HOST0_APF,
-   [CPFL_HOST_I

Re: [External] Re: [RFC PATCH] move memset out of hold lock when rte_free

2023-09-12 Thread Fengnan Chang
This problem had fix in this patch
http://patches.dpdk.org/project/dpdk/patch/20230912090415.48709-1-changfeng...@bytedance.com/
I'm doing long-term test, especially rte_zmalloc.

Fengnan Chang  于2023年9月12日周二 10:44写道:
>
> This patch still have problem, I'll fix next version.
>
> Stephen Hemminger  于2023年9月6日周三 23:08写道:
> >
> > On Thu, 31 Aug 2023 19:19:37 +0800
> > Fengnan Chang  wrote:
> >
> > > +#ifndef RTE_MALLOC_DEBUG
> > > + if (internal_conf->legacy_mem) {
> > > + /* If orig_elem is dirty, the joint element is clean, we 
> > > need do memset now */
> > > + if (elem->orig_elem->dirty && !elem->dirty)
> > > + memset(ptr, 0, data_len);
> > > + } else if (!elem->dirty) {
> > > + memset(ptr, 0, data_len);
> > > + }
> > > +#else
> > > + /* Always poison the memory in debug mode. */
> > > + memset(ptr, MALLOC_POISON, data_len);
> > > +#endif
> >
> > The code reads better if positive clause was first.
>
> Got it,  I'll do as you suggest in next version.
> > I.e.
> >
> > #ifdef RTE_MALLOC_DEBUG
> > /* Always poison the memory in debug mode. */
> > memset(ptr, MALLOC_POISON, data_len);
> > #else
> > ...


[PATCH v2] cryptodev: add missing doc

2023-09-12 Thread Anoob Joseph
Description for rte_cryptodev_get_sec_ctx is missing. Add the same.

Fixes: eadb4fa1e1fe ("cryptodev: support security APIs")

Signed-off-by: Anoob Joseph 
---
 lib/cryptodev/rte_cryptodev.h | 9 +
 1 file changed, 9 insertions(+)

diff --git a/lib/cryptodev/rte_cryptodev.h b/lib/cryptodev/rte_cryptodev.h
index 9246df90ef..9017a56cd0 100644
--- a/lib/cryptodev/rte_cryptodev.h
+++ b/lib/cryptodev/rte_cryptodev.h
@@ -971,6 +971,15 @@ struct rte_cryptodev_cb_rcu {
/**< RCU QSBR variable per queue pair */
 };
 
+/**
+ * Get the security context for the cryptodev.
+ *
+ * @param dev_id
+ *   The device identifier.
+ * @return
+ *   - NULL on error.
+ *   - Pointer to security context on success.
+ */
 void *
 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
 
-- 
2.25.1



Re: [PATCH 2/2] lib/power:fix comparision to bool warning

2023-09-12 Thread Stephen Hemminger
On Mon, 13 Jun 2022 23:56:43 +0800
835703...@qq.com wrote:

> From: newsky647 <835703...@qq.com>
> 
> expr "if ([expr] == true)" can be simplified to "if ([expr])".
> Therefore, simplify it, no functional change.
> 
> Fixes: 450f0791312 ("lib/power:  power: add traffic pattern aware
> power control" Signed-off-by: newsky647 <835703...@qq.com>

This patch does not meet Developer Certificate of Origin legal
requirements. You need to use your legal name because Signed Off by
has legal meaning.


Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:

The contribution was created in whole or in part by me and I have the
right to submit it under the open source license indicated in the file;
or

The contribution is based upon previous work that, to the best of my
knowledge, is covered under an appropriate open source license and I
have the right under that license to submit that work with
modifications, whether created in whole or in part by me, under the
same open source license (unless I am permitted to submit under a
different license), as indicated in the file; or

The contribution was provided directly to me by some other person who
certified (a), (b) or (c) and I have not modified it.

I understand and agree that this project and the contribution are
public and that a record of the contribution (including all personal
information I submit with it, including my sign-off) is maintained
indefinitely and may be redistributed consistent with this project or
the open source license(s) involved.

then you just add a line saying:

Signed-off-by: Random J Developer 
using a known identity (sorry, no anonymous contributions.) This will
be done for you automatically if you use git commit -s. Reverts should
also include "Signed-off-by". git revert -s does that for you.

Some people also put extra tags at the end. They'll just be ignored for
now, but you can do this to mark internal company procedures or just
point out some special detail about the sign-off.

Any further SoBs (Signed-off-by:'s) following the author's SoB are from
people handling and transporting the patch, but were not involved in
its development. SoB chains should reflect the real route a patch took
as it was propagated to the maintainers and ultimately to Linus, with
the first SoB entry signalling primary authorship of a single author.


RE: [PATCH v2] windows/virt2phys: fix block MDL not updated

2023-09-12 Thread Li, Ming3


Hi Dmitry,

Thanks for the review, I'll send the next version patch.
Please see my comments below.

> -Original Message-
> From: Dmitry Kozlyuk 
> Sent: Tuesday, September 12, 2023 5:51 AM
> To: Li, Ming3 
> Cc: dev@dpdk.org; Tyler Retzlaff 
> Subject: Re: [PATCH v2] windows/virt2phys: fix block MDL not updated
> 
> Hi Ric,
> 
> 2023-09-11 21:09 (UTC+0800), Ric Li:
> > The virt2phys_translate function previously scanned existing blocks,
> > returning the physical address from the stored MDL info if present.
> > This method was problematic when a virtual address pointed to a freed
> > and reallocated memory segment, potentially changing the physical
> > address mapping. Yet, virt2phys_translate would consistently return
> > the originally stored physical address, which could be invalid.
> 
> I missed this case completely :(
> 
> Is any if these bugs are related?
> If so, please mention "Bugzilla ID: " in the commit message.
> 
> https://bugs.dpdk.org/show_bug.cgi?id=1201
> https://bugs.dpdk.org/show_bug.cgi?id=1213
> 

Sure, will do.

I cannot reproduce them in my environment, but from the message,
they both mentioned that some pages not unlocked after exit. So they can be 
related.

For example, in Bug 1201, it only exists on Windows 2019, may it be caused by 
the
OS limitation so that some memory segment got freed and allocated same virtual 
address again?
Maybe someone can use this patch to check if there is 'refresh' behavior from 
TraceView logs.

> >
> > This issue surfaced when allocating a memory region larger than 2MB
> > using rte_malloc. This action would allocate a new memory segment and
> > use virt2phy to set the iova. The driver would store the MDL
> 
> Typo: "iova" -> "IOVA" here and below.
> 

Noted, will fix in v3.

> > and lock the pages initially. When this region was freed, the memory
> > segment used as a whole page could be freed, invalidating the virtual
> > to physical mapping. It then needed to be deleted from the driver's
> > block MDL info. Before this fix, the driver would only return the
> > initial physical address, leading to illegal iova for some pages when
> > allocating a new memory region of the same size.
> >
> > To address this, a refresh function has been added. If a block with
> > the same base address is detected in the driver's context, the MDL's
> > physical address is compared with the real physical address.
> > If they don't match, the MDL within the block is released and rebuilt
> > to store the correct mapping.
> 
> What if the size is different?
> Should it be updated for the refreshed block along with the MDL?
> 

The size of single MDL is always 2MB since it describes a hugepage here. 
(at least from my observation :)) For allocated buffer larger than 2MB, it has
serval mem segs (related to serval MDLs), most buggy mem segs are those
possess a whole hugepage, these segments are freed along with the buffer,
so their MDLs become invalid.

Since the block is just wrapper for MDL and list entry,
the refresh action should be applied to the whole block.

> [...]
> > +static NTSTATUS
> > +virt2phys_block_refresh(struct virt2phys_block *block, PVOID base,
> > +size_t size) {
> > +   NTSTATUS status;
> > +   PMDL mdl = block->mdl;
> > +
> > +   /*
> > +* Check if we need to refresh MDL in block.
> > +* The virtual to physical memory mapping may be changed when the
> > +* virtual memory region is freed by the user process and malloc again,
> > +* then we need to unlock the physical memory and lock again to
> > +* refresh the MDL information stored in block.
> > +*/
> > +   PHYSICAL_ADDRESS block_phys, real_phys;
> > +
> > +   block_phys = virt2phys_block_translate(block, base);
> > +   real_phys = MmGetPhysicalAddress(base);
> > +
> > +   if (block_phys.QuadPart == real_phys.QuadPart)
> > +   /* No need to refresh block. */
> > +   return STATUS_SUCCESS;
> > +
> > +   virt2phys_unlock_memory(mdl);
> 
> After this call the block's MDL is a dangling pointer.
> If an error occurs below, the block with a dangling pointer will remain in 
> the list
> and will probably cause a crash later.
> If a block can't be refreshed, it must be freed (it's invalid anyway).
> 

I will change the refresh logic here to just check the PA, and if it doesn't 
match,
the block will be removed from process's blocks list(after the check function).
To make it easy for block removal, the single linked list will be replaced with
a double linked list.

> > +   mdl = NULL;
> > +
> > +   status = virt2phys_lock_memory(base, size, &mdl);
> > +   if (!NT_SUCCESS(status))
> > +   return status;
> > +
> > +   status = virt2phys_check_memory(mdl);
> > +   if (!NT_SUCCESS(status)) {
> > +   virt2phys_unlock_memory(mdl);
> > +   return status;
> > +   }
> > +   block->mdl = mdl;
> > +
> > +   TraceInfo("Block refreshed from %llx to %llx", block_phys.QuadPart,
> > +real_phys.QuadPart);
> 
> Please add process ID, block VA, and bloc

[PATCH v3] windows/virt2phys: fix block MDL not updated

2023-09-12 Thread Ric Li
The virt2phys_translate function previously scanned existing blocks,
returning the physical address from the stored MDL info if present.
This method was problematic when a virtual address pointed to a freed
and reallocated memory segment, potentially changing the physical
address mapping. Yet, virt2phys_translate would consistently return
the originally stored physical address, which could be invalid.

This issue surfaced when allocating a memory region larger than 2MB
using rte_malloc. This action would allocate a new memory segment
and use virt2phy to set the IOVA. The driver would store the MDL
and lock the pages initially. When this region was freed, the memory
segment used as a whole page could be freed, invalidating the virtual
to physical mapping. Before this fix, the driver would only return the
initial physical address, leading to illegal IOVA for some pages when
allocating a new memory region larger than the hugepage size (2MB).

To address this, a function to check block physical address has been
added. If a block with the same base address is detected in the
driver's context, the MDL's physical address is compared with the real
physical address. If they don't match, the block is removed and a new
one is created to store the correct mapping. To make the removal action
clear, the list to store MDL blocks is chenged to a double linked list.

Also fix the printing of PVOID type.

Bugzilla ID: 1201
Bugzilla ID: 1213

Signed-off-by: Ric Li 
---
v3:
* Change refresh action to block removal
* Change block list to double linked list

v2:
* Revert wrong usage of MmGetMdlStartVa

 windows/virt2phys/virt2phys.c   |  7 +--
 windows/virt2phys/virt2phys_logic.c | 70 ++---
 2 files changed, 57 insertions(+), 20 deletions(-)

diff --git a/windows/virt2phys/virt2phys.c b/windows/virt2phys/virt2phys.c
index f4d5298..b64a13d 100644
--- a/windows/virt2phys/virt2phys.c
+++ b/windows/virt2phys/virt2phys.c
@@ -182,7 +182,7 @@ virt2phys_device_EvtIoInCallerContext(WDFDEVICE device, 
WDFREQUEST request)
 {
WDF_REQUEST_PARAMETERS params;
ULONG code;
-   PVOID *virt;
+   PVOID *pvirt, virt;
PHYSICAL_ADDRESS *phys;
size_t size;
NTSTATUS status;
@@ -207,12 +207,13 @@ virt2phys_device_EvtIoInCallerContext(WDFDEVICE device, 
WDFREQUEST request)
}
 
status = WdfRequestRetrieveInputBuffer(
-   request, sizeof(*virt), (PVOID *)&virt, &size);
+   request, sizeof(*pvirt), (PVOID *)&pvirt, &size);
if (!NT_SUCCESS(status)) {
TraceWarning("Retrieving input buffer: %!STATUS!", status);
WdfRequestComplete(request, status);
return;
}
+   virt = *pvirt;
 
status = WdfRequestRetrieveOutputBuffer(
request, sizeof(*phys), (PVOID *)&phys, &size);
@@ -222,7 +223,7 @@ virt2phys_device_EvtIoInCallerContext(WDFDEVICE device, 
WDFREQUEST request)
return;
}
 
-   status = virt2phys_translate(*virt, phys);
+   status = virt2phys_translate(virt, phys);
if (NT_SUCCESS(status))
WdfRequestSetInformation(request, sizeof(*phys));
 
diff --git a/windows/virt2phys/virt2phys_logic.c 
b/windows/virt2phys/virt2phys_logic.c
index e3ff293..531f08c 100644
--- a/windows/virt2phys/virt2phys_logic.c
+++ b/windows/virt2phys/virt2phys_logic.c
@@ -12,13 +12,13 @@
 struct virt2phys_process {
HANDLE id;
LIST_ENTRY next;
-   SINGLE_LIST_ENTRY blocks;
+   LIST_ENTRY blocks;
ULONG64 memory;
 };
 
 struct virt2phys_block {
PMDL mdl;
-   SINGLE_LIST_ENTRY next;
+   LIST_ENTRY next;
 };
 
 static struct virt2phys_params g_params;
@@ -69,24 +69,28 @@ virt2phys_process_create(HANDLE process_id)
struct virt2phys_process *process;
 
process = ExAllocatePoolZero(NonPagedPool, sizeof(*process), 'pp2v');
-   if (process != NULL)
+   if (process != NULL) {
process->id = process_id;
+   InitializeListHead(&process->blocks);
+   }
+
return process;
 }
 
 static void
 virt2phys_process_free(struct virt2phys_process *process, BOOLEAN unmap)
 {
-   PSINGLE_LIST_ENTRY node;
+   PLIST_ENTRY node, next;
struct virt2phys_block *block;
 
TraceInfo("ID = %p, unmap = %!bool!", process->id, unmap);
 
-   node = process->blocks.Next;
-   while (node != NULL) {
+   for (node = process->blocks.Flink; node != &process->blocks; node = 
next) {
+   next = node->Flink;
block = CONTAINING_RECORD(node, struct virt2phys_block, next);
-   node = node->Next;
-   virt2phys_block_free(block, unmap);
+   RemoveEntryList(&block->next);
+
+   virt2phys_block_free(block, TRUE);
}
 
ExFreePool(process);
@@ -109,10 +113,10 @@ virt2phys_process_find(HANDLE process_id)
 static struct virt2phys_block *
 virt2

[PATCH 1/1] test/hash: fix error log output

2023-09-12 Thread Min Zhou
Caught while running meson test:
hash creation failedTest Failed

Add missing \n.

Fixes: 0eb3726ebcf ("test/hash: add test for read/write concurrency")

Signed-off-by: Min Zhou 
---
 app/test/test_hash_readwrite.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/app/test/test_hash_readwrite.c b/app/test/test_hash_readwrite.c
index 74ca13912f..4997a01249 100644
--- a/app/test/test_hash_readwrite.c
+++ b/app/test/test_hash_readwrite.c
@@ -162,7 +162,7 @@ init_params(int use_ext, int use_htm, int rw_lf, int 
use_jhash)
 
handle = rte_hash_create(&hash_params);
if (handle == NULL) {
-   printf("hash creation failed");
+   printf("hash creation failed\n");
return -1;
}
 
-- 
2.39.1



Re: [PATCH v2] windows/virt2phys: fix block MDL not updated

2023-09-12 Thread Dmitry Kozlyuk
2023-09-12 11:13 (UTC+), Li, Ming3:
> > Is any if these bugs are related?
> > If so, please mention "Bugzilla ID: " in the commit message.
> > 
> > https://bugs.dpdk.org/show_bug.cgi?id=1201
> > https://bugs.dpdk.org/show_bug.cgi?id=1213
> >   
> 
> Sure, will do.
> 
> I cannot reproduce them in my environment, but from the message,
> they both mentioned that some pages not unlocked after exit. So they can be 
> related.
> 
> For example, in Bug 1201, it only exists on Windows 2019, may it be caused by 
> the
> OS limitation so that some memory segment got freed and allocated same 
> virtual address again?
> Maybe someone can use this patch to check if there is 'refresh' behavior from 
> TraceView logs.

I've posted a comment in BZ 1201 (the bugs are from the same user)
inviting to test your patch, let's see.

[...]
> > > To address this, a refresh function has been added. If a block with
> > > the same base address is detected in the driver's context, the MDL's
> > > physical address is compared with the real physical address.
> > > If they don't match, the MDL within the block is released and rebuilt
> > > to store the correct mapping.  
> > 
> > What if the size is different?
> > Should it be updated for the refreshed block along with the MDL?
> >   
> 
> The size of single MDL is always 2MB since it describes a hugepage here. 
> (at least from my observation :))

Your observation is correct, DPDK memalloc layer currently works this way.

> For allocated buffer larger than 2MB, it has
> serval mem segs (related to serval MDLs), most buggy mem segs are those
> possess a whole hugepage, these segments are freed along with the buffer,
> so their MDLs become invalid.
> 
> Since the block is just wrapper for MDL and list entry,
> the refresh action should be applied to the whole block.

There is always a single MDL per block, but it can describe multiple pages
(generally, if used beyond DPDK). Suppose there was a block for one page.
Then this page has been deallocated and allocated again but this time
in the middle of a multi-page region.
With your patch this will work, but that one-page block will be just lost
(never found because its MDL base VA does not match the region start VA).
The downside is that the memory remains locked.

The solution could be to check, when inserting a new block,
if there are existing blocks covered by the new one,
and if so, to free those blocks as they correspond to deallocated regions.
I think this can be done with another patch to limit the scope of this one.

Ideally virt2phys should not be doing this guesswork at all.
DPDK can just tell it when pages are allocated and freed,
but this requires some rework of the userspace part.
Just thinking out loud.

[...]
> > >   /* Don't lock the same memory twice. */
> > >   if (block != NULL) {
> > > + KeAcquireSpinLock(g_lock, &irql);
> > > + status = virt2phys_block_refresh(block, base, size);
> > > + KeReleaseSpinLock(g_lock, irql);  
> > 
> > Is it safe to do all the external calls holding this spinlock?
> > I can't confirm from the doc that ZwQueryVirtualMemory(), for example, does
> > not access pageable data.
> > And virt2phys_lock_memory() raises exceptions, although it handles them.
> > Other stuff seems safe.
> > 
> > The rest of the code only takes the lock to access block and process lists, 
> > which
> > are allocated from the non-paged pool.
> > Now that I think of it, this may be insufficient because the code and the 
> > static
> > variables are not marked as non-paged.
> > 
> > The translation IOCTL performance is not critical, so maybe it is worth 
> > replacing
> > the spinlock with just a global mutex, WDYT?  
> 
> In the upcoming v3 patch, the lock will be used for block removal which won't 
> fail.
> 
> I'm relatively new to Windows driver development. From my perspective, the use
> of a spinlock seems appropriate in this driver. Maybe a read-write lock can be
> more effective here?

It is correctness that I am concerned with, not efficiency.
Translating VA to IOVA is not performance-critical,
the spinlock is used just because it seemed sufficient.

Relating the code to the docs [1]:

* The code within a critical region guarded by an spin lock
  must neither be pageable nor make any references to pageable data.

  - Process and block structures are allocated from the non-paged pool - OK.
  - The code is not marked as non-pageable - FAIL, though never fired.

* The code within a critical region guarded by a spin lock can neither
  call any external function that might access pageable data...

  - MDL manipulation and page locking can run at "dispatch" IRQL - OK.
  - ZwQueryVirtualMemory() - unsure

  ... or raise an exception, nor can it generate any exceptions.

  - MmLockPagesInMemory() does generate an exception on failure,
but it is handled - unsure

* The caller should release the spin lock with KeReleaseSpinLock as
  quickly as possible.

  - Before the patch, there was a fi

RE: [PATCH 1/1] net/mana: enable 32 bit build for mana driver

2023-09-12 Thread Wei Hu
> From: Stephen Hemminger 
> Sent: Saturday, September 9, 2023 10:52 PM
> > diff --git a/drivers/net/mana/mana.c b/drivers/net/mana/mana.c index
> > 7630118d4f..a20ca1a988 100644
> > --- a/drivers/net/mana/mana.c
> > +++ b/drivers/net/mana/mana.c
> > @@ -1260,7 +1260,7 @@ mana_probe_port(struct ibv_device *ibdev,
> struct ibv_device_attr_ex *dev_attr,
> >   /* Create a parent domain with the port number */
> >   attr.pd = priv->ib_pd;
> >   attr.comp_mask = IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT;
> > - attr.pd_context = (void *)(uint64_t)port;
> > + attr.pd_context = (void *)(size_t)port;
> 
> Since port is uint8_t, the better cast would be to uintptr_t which is always 
> an
> unsigned value of same size as pointer.
> 
> Also, not sure why using PRIxPTR is necessary; on all arch and platforms %p
> should work for printing a pointer and is more common usage.

Thanks Stephen. I will send out a v2 with port casting to uintptr_t.

Regarding the use of PRIxPTR, I have seen this in couple other drivers.
I think it is probably because the original variable is defined as uintptr_t,
which was typedef'ed to unsigned long. We probably want to differentiate
it from an actual pointer. And I was trying to stick to original code.

Wei


Re: [PATCH v6 0/2] Add l2reflect measurement application

2023-09-12 Thread Stephen Hemminger
On Fri,  2 Sep 2022 10:45:31 +0200
Felix Moessbauer  wrote:

>  app/l2reflect/colors.c|   34 ++
>  app/l2reflect/colors.h|   19 +
>  app/l2reflect/l2reflect.h |   53 ++
>  app/l2reflect/main.c  | 1007
> + app/l2reflect/meson.build |
> 21 + app/l2reflect/payload.h   |   26 +
>  app/l2reflect/stats.c |  225 +
>  app/l2reflect/stats.h |   67 +++
>  app/l2reflect/utils.c |   67 +++
>  app/l2reflect/utils.h |   20 +
>  app/meson.build   |3 +-

This belongs in the examples not the app subdirectory.
When you resubmit the patch please move it.


If feeling bored

2023-09-12 Thread Morten Brørup
For the script sharks...

RTE_LIBRTE_xxx seems redundant. There are still a bunch of those in DPDK, which 
could be replaced by RTE_xxx.

Cleaning up this will probably break the API.


Re: If feeling bored

2023-09-12 Thread David Marchand
On Tue, Sep 12, 2023 at 5:46 PM Morten Brørup  
wrote:
> RTE_LIBRTE_xxx seems redundant. There are still a bunch of those in DPDK, 
> which could be replaced by RTE_xxx.
>
> Cleaning up this will probably break the API.

If we exclude config/ doc/ and any rte_.*.h header, we should be good?

$ for pattern in $(git grep -o 'RTE_LIBRTE_[A-Z0-9_]*' ':^**/rte**.h'
':^config/' ':^doc/'); do file=${pattern%%:*}; symbol=${pattern##*:};
! git grep -wq $symbol '**/rte**.h' config/ doc/ || continue; sed -i
-e "s/$symbol/${symbol##RTE_LIB}/g" $file; done
$ git diff --stat
...
 135 files changed, 437 insertions(+), 437 deletions(-)

$ ninja -C build
...


There are still some exposed defines but it is a first move forward.
$ git grep RTE_LIBRTE_ | wc
3061010   22337



-- 
David Marchand



Re: [PATCH 1/3] vhost: fix build for powerpc

2023-09-12 Thread David Christensen




On 9/1/23 7:59 AM, Bruce Richardson wrote:

+PPC maintainer

On Thu, Aug 31, 2023 at 01:10:56PM +0100, Bruce Richardson wrote:

When building on Ubuntu using the packaged powerpc compiler[1], a
warning is issued about the print format of the __u64 values.

../../lib/vhost/vduse.c: In function ‘vduse_vring_setup’:
../../lib/vhost/vhost.h:676:17: error: format ‘%llx’ expects argument of
type ‘long long unsigned int’, but argument 5 has type ‘__u64’ {aka
‘long unsigned int’} [-Werror=format=]
   676 | "VHOST_CONFIG: (%s) " fmt, prefix, ##args)
   | ^

Changing the format specifier to %lx, or to use PRIx64 breaks other
builds, so the safest solution is to explicitly typecast the printed
values to match the format string.

[1] powerpc64le-linux-gnu-gcc (Ubuntu 12.3.0-1ubuntu1~23.04) 12.3.0

Fixes: a9120db8b98b ("vhost: add VDUSE device startup")
Cc: maxime.coque...@redhat.com
Cc: sta...@dpdk.org

Signed-off-by: Bruce Richardson 
---


Tested-by: David Christensen 


[PATCH] gpu/cuda: Add missing stdlib include

2023-09-12 Thread John Romein

getenv needs stdlib.h to be included.

Bugzilla ID: 1133

Fixes: 24c77594e08f ("gpu/cuda: map GPU memory with GDRCopy")
Signed-off-by: John Romein 
---
 drivers/gpu/cuda/gdrcopy.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/gpu/cuda/gdrcopy.c b/drivers/gpu/cuda/gdrcopy.c
index 322a5dbeb2..f19ad396f1 100644
--- a/drivers/gpu/cuda/gdrcopy.c
+++ b/drivers/gpu/cuda/gdrcopy.c
@@ -2,6 +2,8 @@
  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
  */

+#include 
+
 #include "common.h"

 #ifdef DRIVERS_GPU_CUDA_GDRCOPY_H
--
2.39.3




RE: [PATCH v2 5/5] devtools: ignore changes into bbdev experimental API

2023-09-12 Thread Vargas, Hernan
Hi Hemant,

Your previous ack was under the [PATCH v2 0/5] email.
Could you please place your ack for the patch under this email?

Thanks,
Hernan

Acked-by: Hernan Vargas 

> -Original Message-
> From: Hemant Agrawal 
> Sent: Wednesday, September 6, 2023 1:17 AM
> To: Chautru, Nicolas ; dev@dpdk.org;
> maxime.coque...@redhat.com
> Cc: Rix, Tom ; hemant.agra...@nxp.com;
> david.march...@redhat.com; Vargas, Hernan 
> Subject: Re: [PATCH v2 5/5] devtools: ignore changes into bbdev experimental
> API
> 
> 
> On 15-Jun-23 10:19 PM, Nicolas Chautru wrote:
> > Caution: This is an external email. Please take care when clicking
> > links or opening attachments. When in doubt, report the message using
> > the 'Report this email' button
> >
> >
> > Developpers are warned that the related fft experimental functions do
> > not preserve ABI, hence these can be waived.
> %s/Developpers/Developers
> >
> > Signed-off-by: Nicolas Chautru 
> > ---
> >   devtools/libabigail.abignore | 4 +++-
> >   1 file changed, 3 insertions(+), 1 deletion(-)
> >
> > diff --git a/devtools/libabigail.abignore
> > b/devtools/libabigail.abignore index 7a93de3ba1..09b8f156b5 100644
> > --- a/devtools/libabigail.abignore
> > +++ b/devtools/libabigail.abignore
> > @@ -30,7 +30,9 @@
> >   [suppress_type]
> >   type_kind = enum
> >   changed_enumerators = RTE_CRYPTO_ASYM_XFORM_ECPM,
> > RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
> > -
> > +; Ignore changes to bbdev FFT API which is experimental
> > +[suppress_type]
> > +name = rte_bbdev_fft_op
> >   
> >   ; Temporary exceptions till next major ABI version ;
> >   
> > --
> > 2.34.1
> >


RE: If feeling bored

2023-09-12 Thread Morten Brørup
> From: David Marchand [mailto:david.march...@redhat.com]
> Sent: Tuesday, 12 September 2023 18.50
> 
> On Tue, Sep 12, 2023 at 5:46 PM Morten Brørup 
> wrote:
> > RTE_LIBRTE_xxx seems redundant. There are still a bunch of those in
> DPDK, which could be replaced by RTE_xxx.
> >
> > Cleaning up this will probably break the API.
> 
> If we exclude config/ doc/ and any rte_.*.h header, we should be good?

Along with rte_*.h, we would also need to exclude all meson.build files.

But I'm not sure it's that simple...

E.g. RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF is in config/rte_config.h but also in 
drivers/net/i40e/i40e_ethdev.c and doc/guides/nics/i40e.rst.

I think we need to replace them all.


grep -r "RTE_LIBRTE_" lib/ app/ drivers/ buildtools/ config/ devtools/ doc/ 
dts/ examples/ usertools/

And some simple manual processing of the output gives this list of 48 unique 
names with this prefix:

RTE_LIBRTE_@0@_COMMON
RTE_LIBRTE_ARK_MIN_TX_PKTLEN
RTE_LIBRTE_AVP_DEBUG_BUFFERS
RTE_LIBRTE_BBDEV_SKIP_VALIDATE
RTE_LIBRTE_BNX2X_DEBUG_PERIODIC
RTE_LIBRTE_BNX2X_DEBUG_RX
RTE_LIBRTE_BNX2X_DEBUG_TX
RTE_LIBRTE_BPF_ELF
RTE_LIBRTE_DPAA_MAX_CRYPTODEV
RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
RTE_LIBRTE_EAL_VMWARE_TSC_MAP_SUPPORT
RTE_LIBRTE_ETHDEV_DEBUG
RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
RTE_LIBRTE_GRAPH_STATS
RTE_LIBRTE_HNS3_MAX_TQP_NUM_PER_PF
RTE_LIBRTE_I40E_16BYTE_RX_DESC
RTE_LIBRTE_I40E_QUEUE_NUM_PER_PF
RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF
RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM
RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
RTE_LIBRTE_IP_FRAG_MAX
RTE_LIBRTE_IP_FRAG_MAX_FRAG
RTE_LIBRTE_IP_FRAG_MAX_FRAGS
RTE_LIBRTE_IP_FRAG_TBL_STAT
RTE_LIBRTE_IXGBE_BYPASS
RTE_LIBRTE_MBUF_DEBUG
RTE_LIBRTE_MEMPOOL_DEBUG
RTE_LIBRTE_MEMPOOL_STATS
RTE_LIBRTE_ML_DEV_DEBUG
RTE_LIBRTE_MLX4_DEBUG
RTE_LIBRTE_MLX5_DEBUG
RTE_LIBRTE_NGBE_TM
RTE_LIBRTE_PMD_BBDEV_LA12XX
RTE_LIBRTE_PMD_DLB2_QUELL_STATS
RTE_LIBRTE_QEDE_DEBUG_RX
RTE_LIBRTE_QEDE_DEBUG_TX
RTE_LIBRTE_QEDE_FW
RTE_LIBRTE_RCU_DEBUG
RTE_LIBRTE_RING_DEBUG
RTE_LIBRTE_SFC_EFX_DEBUG
RTE_LIBRTE_TIMER_DEBUG
RTE_LIBRTE_TXGBE_DEBUG_RX
RTE_LIBRTE_TXGBE_DEBUG_TX
RTE_LIBRTE_TXGBE_DEBUG_TX_FREE
RTE_LIBRTE_TXGBE_PF_DISABLE_STRIP_CRC
RTE_LIBRTE_TXGBE_TM
RTE_LIBRTE_VHOST_NUMA
RTE_LIBRTE_VHOST_POSTCOPY

> 
> $ for pattern in $(git grep -o 'RTE_LIBRTE_[A-Z0-9_]*' ':^**/rte**.h'
> ':^config/' ':^doc/'); do file=${pattern%%:*}; symbol=${pattern##*:};
> ! git grep -wq $symbol '**/rte**.h' config/ doc/ || continue; sed -i
> -e "s/$symbol/${symbol##RTE_LIB}/g" $file; done
> $ git diff --stat
> ...
>  135 files changed, 437 insertions(+), 437 deletions(-)
> 
> $ ninja -C build
> ...
> 
> 
> There are still some exposed defines but it is a first move forward.
> $ git grep RTE_LIBRTE_ | wc
> 3061010   22337
> 
> 
> 
> --
> David Marchand



RE: [PATCH v6 00/10] net/cpfl: support port representor

2023-09-12 Thread Wu, Jingjing



> -Original Message-
> From: Xing, Beilei 
> Sent: Wednesday, September 13, 2023 1:30 AM
> To: Wu, Jingjing 
> Cc: dev@dpdk.org; Liu, Mingxia ; Xing, Beilei
> 
> Subject: [PATCH v6 00/10] net/cpfl: support port representor
> 
> From: Beilei Xing 

Acked-by: Jingjing Wu 


RE: [PATCH v6 00/10] net/cpfl: support port representor

2023-09-12 Thread Zhang, Qi Z



> -Original Message-
> From: Wu, Jingjing 
> Sent: Wednesday, September 13, 2023 9:01 AM
> To: Xing, Beilei 
> Cc: dev@dpdk.org; Liu, Mingxia 
> Subject: RE: [PATCH v6 00/10] net/cpfl: support port representor
> 
> 
> 
> > -Original Message-
> > From: Xing, Beilei 
> > Sent: Wednesday, September 13, 2023 1:30 AM
> > To: Wu, Jingjing 
> > Cc: dev@dpdk.org; Liu, Mingxia ; Xing, Beilei
> > 
> > Subject: [PATCH v6 00/10] net/cpfl: support port representor
> >
> > From: Beilei Xing 
> 
> Acked-by: Jingjing Wu 

Applied to dpdk-next-net-intel.

Thanks
Qi


RE: [PATCH] net/iavf: fix ESN session update

2023-09-12 Thread Zhang, Qi Z



> -Original Message-
> From: Radu Nicolau 
> Sent: Monday, September 11, 2023 5:21 PM
> To: Wu, Jingjing ; Xing, Beilei 
> Cc: dev@dpdk.org; Nicolau, Radu ;
> sta...@dpdk.org
> Subject: [PATCH] net/iavf: fix ESN session update
> 
> Update both high and low section of the ESN.
> 
> Fixes: 6bc987ecb860 ("net/iavf: support IPsec inline crypto")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Radu Nicolau 

Reviewed-by: Qi Zhang 

Applied to dpdk-next-net-intel.

Thanks
Qi



RE: [PATCH v3] common/idpf: refactor single queue Tx function

2023-09-12 Thread Wu, Wenjun1



> -Original Message-
> From: Su, Simei 
> Sent: Friday, September 8, 2023 6:28 PM
> To: Wu, Jingjing ; Xing, Beilei 
> ;
> Zhang, Qi Z 
> Cc: dev@dpdk.org; Wu, Wenjun1 ; Su, Simei
> 
> Subject: [PATCH v3] common/idpf: refactor single queue Tx function
> 
> This patch replaces flex Tx descriptor with base Tx descriptor to align with
> kernel driver practice.
> 
> Signed-off-by: Simei Su 
> ---
> v3:
> * Change context TSO descriptor from base mode to flex mode.
> 
> v2:
> * Refine commit title and commit log.
> * Remove redundant definition.
> * Modify base mode context TSO descriptor.
> 
>  drivers/common/idpf/idpf_common_rxtx.c| 39 +--
>  drivers/common/idpf/idpf_common_rxtx.h|  2 +-
>  drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
>  drivers/net/idpf/idpf_rxtx.c  |  2 +-
>  4 files changed, 39 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/common/idpf/idpf_common_rxtx.c
> b/drivers/common/idpf/idpf_common_rxtx.c
> index fc87e3e243..e6d2486272 100644
> --- a/drivers/common/idpf/idpf_common_rxtx.c
> +++ b/drivers/common/idpf/idpf_common_rxtx.c
> @@ -276,14 +276,14 @@ idpf_qc_single_tx_queue_reset(struct
> idpf_tx_queue *txq)
>   }
> 
>   txe = txq->sw_ring;
> - size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
> + size = sizeof(struct idpf_base_tx_desc) * txq->nb_tx_desc;
>   for (i = 0; i < size; i++)
>   ((volatile char *)txq->tx_ring)[i] = 0;
> 
>   prev = (uint16_t)(txq->nb_tx_desc - 1);
>   for (i = 0; i < txq->nb_tx_desc; i++) {
> - txq->tx_ring[i].qw1.cmd_dtype =
> -
>   rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
> + txq->tx_ring[i].qw1 =
> +
>   rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE);
>   txe[i].mbuf =  NULL;
>   txe[i].last_id = i;
>   txe[prev].next_id = i;
> @@ -1307,17 +1307,16 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
>   uint16_t nb_tx_to_clean;
>   uint16_t i;
> 
> - volatile struct idpf_flex_tx_desc *txd = txq->tx_ring;
> + volatile struct idpf_base_tx_desc *txd = txq->tx_ring;
> 
>   desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->rs_thresh);
>   if (desc_to_clean_to >= nb_tx_desc)
>   desc_to_clean_to = (uint16_t)(desc_to_clean_to -
> nb_tx_desc);
> 
>   desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
> - /* In the writeback Tx desccriptor, the only significant fields are the 
> 4-
> bit DTYPE */
> - if ((txd[desc_to_clean_to].qw1.cmd_dtype &
> -  rte_cpu_to_le_16(IDPF_TXD_QW1_DTYPE_M)) !=
> - rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
> + if ((txd[desc_to_clean_to].qw1 &
> +  rte_cpu_to_le_64(IDPF_TXD_QW1_DTYPE_M)) !=
> + rte_cpu_to_le_64(IDPF_TX_DESC_DTYPE_DESC_DONE)) {
>   TX_LOG(DEBUG, "TX descriptor %4u is not done "
>  "(port=%d queue=%d)", desc_to_clean_to,
>  txq->port_id, txq->queue_id);
> @@ -1331,10 +1330,7 @@ idpf_xmit_cleanup(struct idpf_tx_queue *txq)
>   nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
>   last_desc_cleaned);
> 
> - txd[desc_to_clean_to].qw1.cmd_dtype = 0;
> - txd[desc_to_clean_to].qw1.buf_size = 0;
> - for (i = 0; i < RTE_DIM(txd[desc_to_clean_to].qw1.flex.raw); i++)
> - txd[desc_to_clean_to].qw1.flex.raw[i] = 0;
> + txd[desc_to_clean_to].qw1 = 0;
> 
>   txq->last_desc_cleaned = desc_to_clean_to;
>   txq->nb_free = (uint16_t)(txq->nb_free + nb_tx_to_clean); @@ -
> 1347,8 +1343,8 @@ uint16_t  idpf_dp_singleq_xmit_pkts(void *tx_queue,
> struct rte_mbuf **tx_pkts,
> uint16_t nb_pkts)
>  {
> - volatile struct idpf_flex_tx_desc *txd;
> - volatile struct idpf_flex_tx_desc *txr;
> + volatile struct idpf_base_tx_desc *txd;
> + volatile struct idpf_base_tx_desc *txr;
>   union idpf_tx_offload tx_offload = {0};
>   struct idpf_tx_entry *txe, *txn;
>   struct idpf_tx_entry *sw_ring;
> @@ -1356,6 +1352,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
>   struct rte_mbuf *tx_pkt;
>   struct rte_mbuf *m_seg;
>   uint64_t buf_dma_addr;
> + uint32_t td_offset;
>   uint64_t ol_flags;
>   uint16_t tx_last;
>   uint16_t nb_used;
> @@ -1382,6 +1379,7 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
> 
>   for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
>   td_cmd = 0;
> + td_offset = 0;
> 
>   tx_pkt = *tx_pkts++;
>   RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
> @@ -1462,9 +1460,10 @@ idpf_dp_singleq_xmit_pkts(void *tx_queue, struct
> rte_mbuf **tx_pkts,
>   slen = m_seg->data_len;
>   buf_dma_addr = rte_mbuf_data_iova(m_seg);
>   txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr);
> -  

RE: [PATCH v3] common/idpf: refactor single queue Tx function

2023-09-12 Thread Xing, Beilei



> -Original Message-
> From: Su, Simei 
> Sent: Friday, September 8, 2023 6:28 PM
> To: Wu, Jingjing ; Xing, Beilei 
> ;
> Zhang, Qi Z 
> Cc: dev@dpdk.org; Wu, Wenjun1 ; Su, Simei
> 
> Subject: [PATCH v3] common/idpf: refactor single queue Tx function
> 
> This patch replaces flex Tx descriptor with base Tx descriptor to align with 
> kernel
> driver practice.
> 
> Signed-off-by: Simei Su 
> ---
> v3:
> * Change context TSO descriptor from base mode to flex mode.
> 
> v2:
> * Refine commit title and commit log.
> * Remove redundant definition.
> * Modify base mode context TSO descriptor.
> 
>  drivers/common/idpf/idpf_common_rxtx.c| 39 +--
>  drivers/common/idpf/idpf_common_rxtx.h|  2 +-
>  drivers/common/idpf/idpf_common_rxtx_avx512.c | 37 +-
>  drivers/net/idpf/idpf_rxtx.c  |  2 +-
>  4 files changed, 39 insertions(+), 41 deletions(-)
> 


> diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c index
> 3e3d81ca6d..64f2235580 100644
> --- a/drivers/net/idpf/idpf_rxtx.c
> +++ b/drivers/net/idpf/idpf_rxtx.c
> @@ -74,7 +74,7 @@ idpf_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t
> queue_idx,
>   ring_size = RTE_ALIGN(len * sizeof(struct
> idpf_flex_tx_sched_desc),
> IDPF_DMA_MEM_ALIGN);
>   else
> - ring_size = RTE_ALIGN(len * sizeof(struct
> idpf_flex_tx_desc),
> + ring_size = RTE_ALIGN(len * sizeof(struct
> idpf_base_tx_desc),

Check if idpf_flex_tx_desc is used in cpfl PMD.

> IDPF_DMA_MEM_ALIGN);
>   rte_memcpy(ring_name, "idpf Tx ring", sizeof("idpf Tx ring"));
>   break;
> --
> 2.25.1