RE: [EXT] [PATCH 2/3] crypto/scheduler: use unified session

2022-09-18 Thread Akhil Goyal
> diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
> index c975c38001..e181b0aa3e 100644
> --- a/app/test/test_cryptodev.c
> +++ b/app/test/test_cryptodev.c
> @@ -14986,8 +14986,8 @@ test_scheduler_attach_worker_op(void)
>   ts_params->session_mpool =
>   rte_cryptodev_sym_session_pool_create(
>   "test_sess_mp",
> - MAX_NB_SESSIONS, 0, 0, 0,
> - SOCKET_ID_ANY);
> + MAX_NB_SESSIONS,
> session_size,
> + 0, 0, SOCKET_ID_ANY);
>   TEST_ASSERT_NOT_NULL(ts_params->session_mpool,
>   "session mempool allocation failed");
>   }
This change should not be part of this patch. Please move it to other patch.


RE: [EXT] [PATCH 3/3] cryptodev: hide sym session structure

2022-09-18 Thread Akhil Goyal
> +#define CRYPTO_SESS_OPAQUE_DATA_OFF 0

CRYPTO_SESS_OPAQUE_DATA_OFF cannot be 0 as you have added a driver_id at start 
of struct.


> +/**
> + * Get opaque data from session handle
> + */
> +static inline uint64_t
> +rte_cryptodev_sym_session_opaque_data_get(void *sess)
> +{
> + return *((uint64_t *)sess - CRYPTO_SESS_OPAQUE_DATA_OFF);
> +}
> +
> +/**
> + * Set opaque data in session handle
> + */
> +static inline void
> +rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
> +{
> + uint64_t *data;
> + data = (((uint64_t *)sess) - CRYPTO_SESS_OPAQUE_DATA_OFF);
> + *data = opaque;
> +}




Re: Cannot set affinity - pthread_setaffinity_np - DPDK21

2022-09-18 Thread Kamaraj P
Hi Stephen,
Yes. The core mask we have aligned to separate cgroup. Probably this might
be an issue here. We will change the alignment in  our DPDK application.
BTW is there a way to check the cores affinity set to DPDK after
rte_eail_init ? Do we have to use the taskset command ?
Please advise.

Thanks,
Kamaraj

On Fri, Sep 16, 2022 at 8:43 PM Stephen Hemminger <
step...@networkplumber.org> wrote:

> On Fri, 16 Sep 2022 09:37:32 +0530
> Kamaraj P  wrote:
>
> > Hi Team,
> > Have sent a message to DPDK alias. Can you please have a look and share
> > your thoughts on this ?
>
> Top posting and nag messages just make developers angry
>
> >
> > On Thu, Sep 15, 2022 at 10:27 PM Kamaraj P  wrote:
> >
> > > Hi Team,
> > >
> > > There are lot of discussion about below code:
> > >  rte_panic("Cannot set affinity %d i %d threadid %d\n", ret, i,
> > > lcore_config[i].thread_id);
> > >
> > > Is there any guildiness(or patch) from the DPDK community about
> avoiding
> > > these panic messages in our DPDK application ? Even if we remove the
> panic
> > > to replace the warning , will it lead to any impact ?
>
> Did you check that the core mask passed into the application is valid?
> Or that that your application is not running in a container which blocks
> access to other CPU's.
>


[PATCH] net/iavf: fix error of virtchnl command

2022-09-18 Thread Yiding Zhou
When the device is bonded, bond pmd will register callback for LSC event.
This callback will execute some virtchnl commands in eal-intr-thread to
reinitialize the device with interrupts disabled. In this case, responses
to all commands not be received.

This commit starts a thread to handle all events to fix this issue.

Fixes: 48de41ca11f0 ("net/avf: enable link status update")
CC: sta...@dpdk.org

Signed-off-by: Yiding Zhou 
---
 drivers/net/iavf/iavf.h|   2 +
 drivers/net/iavf/iavf_ethdev.c |   5 ++
 drivers/net/iavf/iavf_vchnl.c  | 130 -
 3 files changed, 136 insertions(+), 1 deletion(-)

diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
index 025ab3ff60..7c215e1797 100644
--- a/drivers/net/iavf/iavf.h
+++ b/drivers/net/iavf/iavf.h
@@ -417,6 +417,8 @@ _atomic_set_async_response_cmd(struct iavf_info *vf, enum 
virtchnl_ops ops)
 }
 int iavf_check_api_version(struct iavf_adapter *adapter);
 int iavf_get_vf_resource(struct iavf_adapter *adapter);
+void iavf_dev_event_handler_fini(void);
+int iavf_dev_event_handler_init(void);
 void iavf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int iavf_enable_vlan_strip(struct iavf_adapter *adapter);
 int iavf_disable_vlan_strip(struct iavf_adapter *adapter);
diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
index 506fcff6e3..a584b8918d 100644
--- a/drivers/net/iavf/iavf_ethdev.c
+++ b/drivers/net/iavf/iavf_ethdev.c
@@ -2629,6 +2629,9 @@ iavf_dev_init(struct rte_eth_dev *eth_dev)
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
ð_dev->data->mac_addrs[0]);
 
+   if (iavf_dev_event_handler_init())
+   goto init_vf_err;
+
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) {
/* register callback func to eal lib */
rte_intr_callback_register(pci_dev->intr_handle,
@@ -2783,6 +2786,8 @@ iavf_dev_uninit(struct rte_eth_dev *dev)
 
iavf_dev_close(dev);
 
+   iavf_dev_event_handler_fini();
+
return 0;
 }
 
diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
index 21bd1e2193..5f3dd8779e 100644
--- a/drivers/net/iavf/iavf_vchnl.c
+++ b/drivers/net/iavf/iavf_vchnl.c
@@ -2,6 +2,8 @@
  * Copyright(c) 2017 Intel Corporation
  */
 
+#include 
+#include 
 #include 
 #include 
 #include 
@@ -27,6 +29,132 @@
 #define MAX_TRY_TIMES 2000
 #define ASQ_DELAY_MS  1
 
+struct iavf_arq_event_element {
+   TAILQ_ENTRY(iavf_arq_event_element) next;
+   struct rte_eth_dev *dev;
+   enum rte_eth_event_type event;
+   void *param;
+};
+
+struct iavf_event_handler {
+   rte_atomic32_t ndev;
+   pthread_t tid;
+   int fd[2];
+   pthread_mutex_t lock;
+   TAILQ_HEAD(event_lsit, iavf_arq_event_element) pending;
+};
+
+static struct iavf_event_handler event_handler = {
+   .fd = {-1, -1},
+};
+
+#ifndef TAILQ_FOREACH_SAFE
+#define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
+   for ((var) = TAILQ_FIRST((head)); \
+   (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
+   (var) = (tvar))
+#endif
+
+static void *
+iavf_dev_event_handle(void *param __rte_unused)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   TAILQ_HEAD(event_list, iavf_arq_event_element) pending;
+
+   while (true) {
+   char unused;
+   int nr = read(handler->fd[0], &unused, 1);
+   if (nr <= 0)
+   break;
+
+   TAILQ_INIT(&pending);
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_CONCAT(&pending, &handler->pending, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   struct iavf_arq_event_element *pos, *save_next;
+   TAILQ_FOREACH_SAFE(pos, &pending, next, save_next) {
+   TAILQ_REMOVE(&pending, pos, next);
+   rte_eth_dev_callback_process(pos->dev, pos->event, 
pos->param);
+   rte_free(pos);
+   }
+   }
+   return NULL;
+}
+
+static void
+iavf_dev_event_post(struct rte_eth_dev *dev,
+   enum rte_eth_event_type event,
+   void *param)
+{
+   struct iavf_event_handler *handler = &event_handler;
+   char notify_byte;
+   struct iavf_arq_event_element *elem = rte_malloc(NULL, sizeof(*elem), 
0);
+   if (!elem)
+   return;
+
+   elem->dev = dev;
+   elem->event = event;
+   elem->param = param;
+
+   pthread_mutex_lock(&handler->lock);
+   TAILQ_INSERT_TAIL(&handler->pending, elem, next);
+   pthread_mutex_unlock(&handler->lock);
+
+   write(handler->fd[1], ¬ify_byte, 1);
+}
+
+int
+iavf_dev_event_handler_init(void)
+{
+   struct iavf_event_handler *handler = &event_handler;
+
+   if (rte_atomic32_add_return(&handler->ndev, 1) != 1)
+   return 0;
+
+   if (pipe(handler->fd)) {
+   rte_atomic32_dec(&han