> -----Original Message-----
> From: Zhang, Yuying <yuying.zh...@intel.com>
> Sent: Saturday, August 12, 2023 3:55 PM
> To: dev@dpdk.org; Xing, Beilei <beilei.x...@intel.com>; Zhang, Qi Z
> <qi.z.zh...@intel.com>; Wu, Jingjing <jingjing...@intel.com>
> Cc: Zhang, Yuying <yuying.zh...@intel.com>
> Subject: [PATCH v1 4/5] net/cpfl: add fxp rule module
>
> Added low level fxp module for rule packing / creation / destroying.
>
> Signed-off-by: Yuying Zhang <yuying.zh...@intel.com>
> ---
> drivers/net/cpfl/cpfl_ethdev.h | 4 +
> drivers/net/cpfl/cpfl_fxp_rule.c | 288 +++++++++++++++++++++++++++++++
> drivers/net/cpfl/cpfl_fxp_rule.h | 87 ++++++++++
> drivers/net/cpfl/meson.build | 1 +
> 4 files changed, 380 insertions(+)
> create mode 100644 drivers/net/cpfl/cpfl_fxp_rule.c create mode 100644
> drivers/net/cpfl/cpfl_fxp_rule.h
>
> diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
> index
> c71f16ac60..63bcc5551f 100644
> --- a/drivers/net/cpfl/cpfl_ethdev.h
> +++ b/drivers/net/cpfl/cpfl_ethdev.h
> @@ -145,10 +145,14 @@ enum cpfl_itf_type {
>
> TAILQ_HEAD(cpfl_flow_list, rte_flow);
>
> +#define CPFL_FLOW_BATCH_SIZE 490
> struct cpfl_itf {
> enum cpfl_itf_type type;
> struct cpfl_adapter_ext *adapter;
> struct cpfl_flow_list flow_list;
> + struct idpf_dma_mem flow_dma;
> + struct idpf_dma_mem dma[CPFL_FLOW_BATCH_SIZE];
> + struct idpf_ctlq_msg msg[CPFL_FLOW_BATCH_SIZE];
> void *data;
> };
>
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.c
> b/drivers/net/cpfl/cpfl_fxp_rule.c
> new file mode 100644
> index 0000000000..936f57e4fa
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.c
> @@ -0,0 +1,288 @@
<...>
> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
> + struct idpf_ctlq_msg q_msg[])
> +{
> + int retries = 0;
> + struct idpf_dma_mem *dma;
> + uint16_t i;
> + uint16_t buff_cnt;
> + int ret = 0;
> +
> + retries = 0;
> + while (retries <= CTLQ_RECEIVE_RETRIES) {
> + rte_delay_us_sleep(10);
> + ret = cpfl_vport_ctlq_recv(cq, &num_q_msg, &q_msg[0]);
> +
> + if (ret && ret != CPFL_ERR_CTLQ_NO_WORK &&
> + ret != CPFL_ERR_CTLQ_ERROR) {
> + PMD_INIT_LOG(ERR, "failed to recv ctrlq msg. err:
> 0x%4x\n", ret);
> + retries++;
> + continue;
> + }
> +
> + if (ret == CPFL_ERR_CTLQ_NO_WORK) {
> + retries++;
> + continue;
> + }
> +
> + if (ret == CPFL_ERR_CTLQ_EMPTY)
> + break;
> +
> + ret = cpfl_process_rx_ctlq_msg(num_q_msg, q_msg);
> + if (ret) {
> + PMD_INIT_LOG(WARNING, "failed to process rx_ctrlq
> msg");
> + break;
Don't break, need to post buffer to recv ring.
Please check the internal fix patch.
> + }
> +
> + for (i = 0; i < num_q_msg; i++) {
> + if (q_msg[i].data_len > 0)
> + dma = q_msg[i].ctx.indirect.payload;
> + else
> + dma = NULL;
> +
> + buff_cnt = dma ? 1 : 0;
> + ret = cpfl_vport_ctlq_post_rx_buffs(hw, cq, &buff_cnt,
> &dma);
> + if (ret)
> + PMD_INIT_LOG(WARNING, "could not posted
> recv bufs\n");
> + }
> + break;
> + }
> +
> + if (retries > CTLQ_RECEIVE_RETRIES) {
> + PMD_INIT_LOG(ERR, "timed out while polling for receive
> response");
> + ret = -1;
> + }
> +
> + return ret;
> +}
> +
> +static int
> +pack_mod_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
Please follow the function name style, how about cpfl_mod_rule_pack?
> + struct idpf_ctlq_msg *msg)
<...>
> +
> +static int pack_default_rule(struct cpfl_rule_info *rinfo, struct
> idpf_dma_mem
static init
cpfl_default_rule_pack
> *dma,
> + struct idpf_ctlq_msg *msg, bool add) {
<...>
> +
> +static int pack_rule(struct cpfl_rule_info *rinfo, struct idpf_dma_mem *dma,
static init
cpfl_rule_pack
> + struct idpf_ctlq_msg *msg, bool add) {
> + int ret = 0;
> +
> + if (rinfo->type == CPFL_RULE_TYPE_SEM) {
> + if (pack_default_rule(rinfo, dma, msg, add) < 0)
> + ret = -1;
> + } else if (rinfo->type == CPFL_RULE_TYPE_MOD) {
> + if (pack_mod_rule(rinfo, dma, msg) < 0)
> + ret = -1;
> + }
Need to check invalid rinfo->type? E.g CPFL_RULE_TYPE_LEM?
> +
> + return ret;
> +}
> +
> +int
> +cpfl_rule_update(struct cpfl_itf *itf,
> + struct idpf_ctlq_info *tx_cq,
> + struct idpf_ctlq_info *rx_cq,
> + struct cpfl_rule_info *rinfo,
> + int rule_num,
> + bool add)
> +{
> + struct idpf_hw *hw = &itf->adapter->base.hw;
> + int i;
> + int ret = 0;
> +
> + if (rule_num == 0)
> + return 0;
> +
> + for (i = 0; i < rule_num; i++) {
> + ret = pack_rule(&rinfo[i], &itf->dma[i], &itf->msg[i], add);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Could not create rule");
Could not pack rule?
> + return ret;
> + }
> + }
> + ret = cpfl_send_ctlq_msg(hw, tx_cq, rule_num, itf->msg);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to send rule");
> + return ret;
> + }
> + ret = cpfl_receive_ctlq_msg(hw, rx_cq, rule_num, itf->msg);
> + if (ret) {
> + PMD_INIT_LOG(ERR, "Failed to create rule");
Is this function for update rule or create rule?
The function name is rule_update, but seems it's to create rule.
> + return ret;
> + }
> +
> + return 0;
> +}
> diff --git a/drivers/net/cpfl/cpfl_fxp_rule.h
> b/drivers/net/cpfl/cpfl_fxp_rule.h
> new file mode 100644
> index 0000000000..68efa8e3f8
> --- /dev/null
> +++ b/drivers/net/cpfl/cpfl_fxp_rule.h
> @@ -0,0 +1,87 @@
<...>
> +
> +int cpfl_rule_update(struct cpfl_itf *itf,
> + struct idpf_ctlq_info *tx_cq,
> + struct idpf_ctlq_info *rx_cq,
> + struct cpfl_rule_info *rinfo,
> + int rule_num,
> + bool add);
> +int
> +cpfl_send_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
> + struct idpf_ctlq_msg q_msg[]);
> +int
> +cpfl_receive_ctlq_msg(struct idpf_hw *hw, struct idpf_ctlq_info *cq, uint16_t
> num_q_msg,
No need new line.
> + struct idpf_ctlq_msg q_msg[]);
> +#endif /*CPFL_FXP_RULE_H*/
> diff --git a/drivers/net/cpfl/meson.build b/drivers/net/cpfl/meson.build index
> 222497f7c2..4061123034 100644
> --- a/drivers/net/cpfl/meson.build
> +++ b/drivers/net/cpfl/meson.build
> @@ -46,6 +46,7 @@ if js_dep.found()
> 'cpfl_flow_parser.c',
> 'cpfl_rules.c',
> 'cpfl_controlq.c',
> + 'cpfl_fxp_rule.c',
> )
> dpdk_conf.set('CPFL_FLOW_JSON_SUPPORT', true)
> ext_deps += js_dep
> --
> 2.25.1