This patch adds a function to create the flow directory filter.

Signed-off-by: Wei Zhao <wei.zh...@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo...@intel.com>
---

v2:
--add new error set function
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 240 ++++++++++++++++++++++++++++++++++++++-
 drivers/net/ixgbe/ixgbe_ethdev.h |   5 +
 2 files changed, 244 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index c98aa0d..1c857fc 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -468,6 +468,11 @@ ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
                const struct rte_flow_item pattern[],
                const struct rte_flow_action actions[],
                struct rte_flow_error *error);
+static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error);
 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
                struct rte_flow_error *error);
 /*
@@ -850,11 +855,55 @@ static const struct rte_ixgbe_xstats_name_off 
rte_ixgbevf_stats_strings[] = {
                sizeof(rte_ixgbevf_stats_strings[0]))
 static const struct rte_flow_ops ixgbe_flow_ops = {
        ixgbe_flow_validate,
-       NULL,
+       ixgbe_flow_create,
        NULL,
        ixgbe_flow_flush,
        NULL,
 };
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+       TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+       struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+       TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+       struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+       TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+       struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+       TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+       struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+       TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+       struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+       TAILQ_ENTRY(ixgbe_flow_mem) entries;
+       struct ixgbe_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+struct ixgbe_ntuple_filter_list filter_ntuple_list;
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+struct ixgbe_ethertype_filter_list filter_ethertype_list;
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+struct ixgbe_syn_filter_list filter_syn_list;
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+struct ixgbe_flow_mem_list ixgbe_flow_list;
+
 /**
  * Atomically reads the link status information from global
  * structure rte_eth_dev.
@@ -1380,6 +1429,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev)
 
        /* initialize l2 tunnel filter list & hash */
        ixgbe_l2_tn_filter_init(eth_dev);
+
+       TAILQ_INIT(&filter_ntuple_list);
+       TAILQ_INIT(&filter_ethertype_list);
+       TAILQ_INIT(&filter_syn_list);
+       TAILQ_INIT(&filter_fdir_list);
+       TAILQ_INIT(&filter_l2_tunnel_list);
+       TAILQ_INIT(&ixgbe_flow_list);
+
        return 0;
 }
 
@@ -10334,6 +10391,187 @@ ixgbe_flow_validate(__rte_unused struct rte_eth_dev 
*dev,
        return ret;
 }
 
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+ixgbe_flow_create(struct rte_eth_dev *dev,
+                 const struct rte_flow_attr *attr,
+                 const struct rte_flow_item pattern[],
+                 const struct rte_flow_action actions[],
+                 struct rte_flow_error *error)
+{
+       int ret;
+       struct rte_eth_ntuple_filter ntuple_filter;
+       struct rte_eth_ethertype_filter ethertype_filter;
+       struct rte_eth_syn_filter syn_filter;
+       struct ixgbe_fdir_rule fdir_rule;
+       struct rte_eth_l2_tunnel_conf l2_tn_filter;
+       struct ixgbe_hw_fdir_info *fdir_info =
+               IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+       struct ixgbe_flow *flow = NULL;
+       struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
+       struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
+       struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
+       struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+       struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+       struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+
+       flow = rte_zmalloc("ixgbe_flow", sizeof(struct ixgbe_flow), 0);
+       if (!flow) {
+               PMD_DRV_LOG(ERR, "failed to allocate memory");
+               return (struct rte_flow *)flow;
+       }
+       ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
+                       sizeof(struct ixgbe_flow_mem), 0);
+       if (!ixgbe_flow_mem_ptr) {
+               PMD_DRV_LOG(ERR, "failed to allocate memory");
+               return NULL;
+       }
+       ixgbe_flow_mem_ptr->flow = flow;
+       TAILQ_INSERT_TAIL(&ixgbe_flow_list,
+                               ixgbe_flow_mem_ptr, entries);
+
+       memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+       ret = ixgbe_parse_ntuple_filter(attr, pattern,
+                       actions, &ntuple_filter, error);
+       if (!ret) {
+               ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+               if (!ret) {
+                       ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
+                               sizeof(struct ixgbe_ntuple_filter_ele), 0);
+                       (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
+                               &ntuple_filter,
+                               sizeof(struct rte_eth_ntuple_filter));
+                       TAILQ_INSERT_TAIL(&filter_ntuple_list,
+                               ntuple_filter_ptr, entries);
+                       flow->rule = ntuple_filter_ptr;
+                       flow->filter_type = RTE_ETH_FILTER_NTUPLE;
+               }
+               return (struct rte_flow *)flow;
+       }
+
+       memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+       ret = ixgbe_parse_ethertype_filter(attr, pattern,
+                               actions, &ethertype_filter, error);
+       if (!ret) {
+               ret = ixgbe_add_del_ethertype_filter(dev,
+                               &ethertype_filter, TRUE);
+               if (!ret) {
+                       ethertype_filter_ptr = rte_zmalloc(
+                               "ixgbe_ethertype_filter",
+                               sizeof(struct ixgbe_ethertype_filter_ele), 0);
+                       (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
+                               &ethertype_filter,
+                               sizeof(struct rte_eth_ethertype_filter));
+                       TAILQ_INSERT_TAIL(&filter_ethertype_list,
+                               ethertype_filter_ptr, entries);
+                       flow->rule = ethertype_filter_ptr;
+                       flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
+               }
+               return (struct rte_flow *)flow;
+       }
+
+       memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+       ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
+       if (!ret) {
+               ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
+               if (!ret) {
+                       syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
+                               sizeof(struct ixgbe_eth_syn_filter_ele), 0);
+                       (void)rte_memcpy(&syn_filter_ptr->filter_info,
+                               &syn_filter,
+                               sizeof(struct rte_eth_syn_filter));
+                       TAILQ_INSERT_TAIL(&filter_syn_list,
+                               syn_filter_ptr,
+                               entries);
+                       flow->rule = syn_filter_ptr;
+                       flow->filter_type = RTE_ETH_FILTER_SYN;
+               }
+               return (struct rte_flow *)flow;
+       }
+
+       memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
+       ret = ixgbe_parse_fdir_filter(attr, pattern,
+                               actions, &fdir_rule, error);
+       if (!ret) {
+               /* A mask cannot be deleted. */
+               if (fdir_rule.b_mask) {
+                       if (!fdir_info->mask_added) {
+                               /* It's the first time the mask is set. */
+                               rte_memcpy(&fdir_info->mask,
+                                       &fdir_rule.mask,
+                                       sizeof(struct ixgbe_hw_fdir_mask));
+                               ret = ixgbe_fdir_set_input_mask(dev);
+                               if (ret)
+                                       return NULL;
+
+                               fdir_info->mask_added = TRUE;
+                       } else {
+                               /**
+                                * Only support one global mask,
+                                * all the masks should be the same.
+                                */
+                               ret = memcmp(&fdir_info->mask,
+                                       &fdir_rule.mask,
+                                       sizeof(struct ixgbe_hw_fdir_mask));
+                               if (ret)
+                                       return NULL;
+                       }
+               }
+
+               if (fdir_rule.b_spec) {
+                       ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
+                                       FALSE, FALSE);
+                       if (!ret) {
+                               fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
+                                       sizeof(struct ixgbe_fdir_rule_ele), 0);
+                               (void)rte_memcpy(&fdir_rule_ptr->filter_info,
+                                       &fdir_rule,
+                                       sizeof(struct ixgbe_fdir_rule));
+                               TAILQ_INSERT_TAIL(&filter_fdir_list,
+                                       fdir_rule_ptr, entries);
+                               flow->rule = fdir_rule_ptr;
+                               flow->filter_type = RTE_ETH_FILTER_FDIR;
+
+                               return (struct rte_flow *)flow;
+                       }
+
+                       if (ret)
+                               return NULL;
+               }
+
+               return NULL;
+       }
+
+       memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+       ret = cons_parse_l2_tn_filter(attr, pattern,
+                                       actions, &l2_tn_filter, error);
+       if (!ret) {
+               ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
+               if (!ret) {
+                       l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
+                               sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
+                       (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
+                               &l2_tn_filter,
+                               sizeof(struct rte_eth_l2_tunnel_conf));
+                       TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
+                               l2_tn_filter_ptr, entries);
+                       flow->rule = l2_tn_filter_ptr;
+                       flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
+
+                       return (struct rte_flow *)flow;
+               }
+       }
+
+       rte_free(ixgbe_flow_mem_ptr);
+       rte_free(flow);
+       return NULL;
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 676d45f..0000138 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -329,6 +329,11 @@ struct ixgbe_l2_tn_info {
        bool e_tag_ether_type; /* ether type for e-tag */
 };
 
+struct ixgbe_flow {
+       enum rte_filter_type filter_type;
+       void *rule;
+};
+
 /*
  * Structure to store private data for each driver instance (for each port).
  */
-- 
2.5.5

Reply via email to