From: Yuying Zhang <yuying.zh...@intel.com>

Add flow ops support for representor, so representor can
create, destroy, validate and flush rules.

Signed-off-by: Yuying Zhang <yuying.zh...@intel.com>
Acked-by: Qi Zhang <qi.z.zh...@intel.com>
---
 drivers/net/cpfl/cpfl_flow_engine_fxp.c | 74 +++++++++++++++++++++++++
 drivers/net/cpfl/cpfl_representor.c     | 29 ++++++++++
 2 files changed, 103 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_flow_engine_fxp.c 
b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
index 4c7b4deb7a..7a3376f9f6 100644
--- a/drivers/net/cpfl/cpfl_flow_engine_fxp.c
+++ b/drivers/net/cpfl/cpfl_flow_engine_fxp.c
@@ -72,6 +72,7 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
        struct cpfl_adapter_ext *ad = itf->adapter;
        struct cpfl_rule_info_meta *rim = meta;
        struct cpfl_vport *vport;
+       struct cpfl_repr *repr;
 
        if (!rim)
                return ret;
@@ -82,6 +83,10 @@ cpfl_fxp_create(struct rte_eth_dev *dev,
                 * Even index is tx queue and odd index is rx queue.
                 */
                cpq_id = vport->base.devarg_id * 2;
+       } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+               repr = (struct cpfl_repr *)itf;
+               cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+                         (CPFL_TX_CFGQ_NUM - 1)) * 2;
        } else {
                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, 
NULL,
                                   "fail to find correct control queue");
@@ -121,6 +126,7 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
        struct cpfl_rule_info_meta *rim;
        uint32_t i;
        struct cpfl_vport *vport;
+       struct cpfl_repr *repr;
 
        rim = flow->rule;
        if (!rim) {
@@ -134,6 +140,10 @@ cpfl_fxp_destroy(struct rte_eth_dev *dev,
        if (itf->type == CPFL_ITF_TYPE_VPORT) {
                vport = (struct cpfl_vport *)itf;
                cpq_id = vport->base.devarg_id * 2;
+       } else if (itf->type == CPFL_ITF_TYPE_REPRESENTOR) {
+               repr = (struct cpfl_repr *)itf;
+               cpq_id = ((repr->repr_id.pf_id  + repr->repr_id.vf_id) &
+                         (CPFL_TX_CFGQ_NUM - 1)) * 2;
        } else {
                rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, 
NULL,
                                   "fail to find correct control queue");
@@ -413,6 +423,64 @@ cpfl_is_mod_action(const struct rte_flow_action actions[])
        return false;
 }
 
+static bool
+cpfl_fxp_get_metadata_port(struct cpfl_itf *itf,
+                          const struct rte_flow_action actions[])
+{
+       const struct rte_flow_action *action;
+       enum rte_flow_action_type action_type;
+       const struct rte_flow_action_ethdev *ethdev;
+       struct cpfl_itf *target_itf;
+       bool ret;
+
+       if (itf->type == CPFL_ITF_TYPE_VPORT) {
+               ret = cpfl_metadata_write_port_id(itf);
+               if (!ret) {
+                       PMD_DRV_LOG(ERR, "fail to write port id");
+                       return false;
+               }
+       }
+
+       ret = cpfl_metadata_write_sourcevsi(itf);
+       if (!ret) {
+               PMD_DRV_LOG(ERR, "fail to write source vsi id");
+               return false;
+       }
+
+       ret = cpfl_metadata_write_vsi(itf);
+       if (!ret) {
+               PMD_DRV_LOG(ERR, "fail to write vsi id");
+               return false;
+       }
+
+       if (!actions || actions->type == RTE_FLOW_ACTION_TYPE_END)
+               return false;
+
+       for (action = actions; action->type != RTE_FLOW_ACTION_TYPE_END; 
action++) {
+               action_type = action->type;
+               switch (action_type) {
+               case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT:
+               case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR:
+                       ethdev = (const struct rte_flow_action_ethdev 
*)action->conf;
+                       target_itf = cpfl_get_itf_by_port_id(ethdev->port_id);
+                       if (!target_itf) {
+                               PMD_DRV_LOG(ERR, "fail to get target_itf by 
port id");
+                               return false;
+                       }
+                       ret = cpfl_metadata_write_targetvsi(target_itf);
+                       if (!ret) {
+                               PMD_DRV_LOG(ERR, "fail to write target vsi id");
+                               return false;
+                       }
+                       break;
+               default:
+                       continue;
+               }
+       }
+
+       return true;
+}
+
 static int
 cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
                              const struct rte_flow_attr *attr,
@@ -429,6 +497,12 @@ cpfl_fxp_parse_pattern_action(struct rte_eth_dev *dev,
        struct cpfl_rule_info_meta *rim;
        int ret;
 
+       ret = cpfl_fxp_get_metadata_port(itf, actions);
+       if (!ret) {
+               PMD_DRV_LOG(ERR, "Fail to save metadata.");
+               return -EINVAL;
+       }
+
        ret = cpfl_flow_parse_items(itf, adapter->flow_parser, pattern, attr, 
&pr_action);
        if (ret) {
                PMD_DRV_LOG(ERR, "No Match pattern support.");
diff --git a/drivers/net/cpfl/cpfl_representor.c 
b/drivers/net/cpfl/cpfl_representor.c
index 4d15a26c80..de3b426727 100644
--- a/drivers/net/cpfl/cpfl_representor.c
+++ b/drivers/net/cpfl/cpfl_representor.c
@@ -4,6 +4,8 @@
 
 #include "cpfl_representor.h"
 #include "cpfl_rxtx.h"
+#include "cpfl_flow.h"
+#include "cpfl_rules.h"
 
 static int
 cpfl_repr_allowlist_update(struct cpfl_adapter_ext *adapter,
@@ -374,6 +376,22 @@ cpfl_repr_link_update(struct rte_eth_dev *ethdev,
        return 0;
 }
 
+static int
+cpfl_dev_repr_flow_ops_get(struct rte_eth_dev *dev,
+                          const struct rte_flow_ops **ops)
+{
+       if (!dev)
+               return -EINVAL;
+
+#ifdef RTE_HAS_JANSSON
+       *ops = &cpfl_flow_ops;
+#else
+       *ops = NULL;
+       PMD_DRV_LOG(NOTICE, "not support rte_flow, please install json-c 
library.");
+#endif
+       return 0;
+}
+
 static const struct eth_dev_ops cpfl_repr_dev_ops = {
        .dev_start              = cpfl_repr_dev_start,
        .dev_stop               = cpfl_repr_dev_stop,
@@ -385,6 +403,7 @@ static const struct eth_dev_ops cpfl_repr_dev_ops = {
        .tx_queue_setup         = cpfl_repr_tx_queue_setup,
 
        .link_update            = cpfl_repr_link_update,
+       .flow_ops_get           = cpfl_dev_repr_flow_ops_get,
 };
 
 static int
@@ -393,6 +412,7 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
        struct cpfl_repr *repr = CPFL_DEV_TO_REPR(eth_dev);
        struct cpfl_repr_param *param = init_param;
        struct cpfl_adapter_ext *adapter = param->adapter;
+       int ret;
 
        repr->repr_id = param->repr_id;
        repr->vport_info = param->vport_info;
@@ -402,6 +422,15 @@ cpfl_repr_init(struct rte_eth_dev *eth_dev, void 
*init_param)
        if (repr->vport_info->vport.info.vport_status == 
CPCHNL2_VPORT_STATUS_ENABLED)
                repr->func_up = true;
 
+       TAILQ_INIT(&repr->itf.flow_list);
+       memset(repr->itf.dma, 0, sizeof(repr->itf.dma));
+       memset(repr->itf.msg, 0, sizeof(repr->itf.msg));
+       ret = cpfl_alloc_dma_mem_batch(&repr->itf.flow_dma, repr->itf.dma,
+                                      sizeof(union cpfl_rule_cfg_pkt_record),
+                                      CPFL_FLOW_BATCH_SIZE);
+       if (ret < 0)
+               return ret;
+
        eth_dev->dev_ops = &cpfl_repr_dev_ops;
 
        eth_dev->data->dev_flags |= RTE_ETH_DEV_REPRESENTOR;
-- 
2.34.1

Reply via email to