On 09/04, Ying Wang wrote:
>The patch reworks the generic flow API (rte_flow) implementation.
>It introduces an abstract layer which provides a unified interface
>for low-level filter engine (switch, fdir, hash) to register supported
>patterns and actions and implement flow validate/create/destroy/flush/
>query activities.
>
>The patch also removes the existing switch filter implementation to
>avoid compile error. Switch filter implementation for the new framework
>will be added in the following patch.
>
>Signed-off-by: Ying Wang <ying.a.w...@intel.com>
>---
> drivers/net/ice/ice_ethdev.c        |  22 +-
> drivers/net/ice/ice_ethdev.h        |  15 +-
> drivers/net/ice/ice_generic_flow.c  | 768 +++++++++++++++--------------------
> drivers/net/ice/ice_generic_flow.h  | 782 ++++++++----------------------------
> drivers/net/ice/ice_switch_filter.c | 511 -----------------------
> drivers/net/ice/ice_switch_filter.h |  18 -
> 6 files changed, 525 insertions(+), 1591 deletions(-)
>
>diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
>index 4e0645db1..647aca3ed 100644
>--- a/drivers/net/ice/ice_ethdev.c
>+++ b/drivers/net/ice/ice_ethdev.c
>@@ -15,7 +15,7 @@
> #include "base/ice_dcb.h"
> #include "ice_ethdev.h"
> #include "ice_rxtx.h"
>-#include "ice_switch_filter.h"
>+#include "ice_generic_flow.h"
> 
> /* devargs */
> #define ICE_SAFE_MODE_SUPPORT_ARG "safe-mode-support"
>@@ -1677,7 +1677,11 @@ ice_dev_init(struct rte_eth_dev *dev)
>       /* get base queue pairs index  in the device */
>       ice_base_queue_get(pf);
> 
>-      TAILQ_INIT(&pf->flow_list);
>+      ret = ice_flow_init(ad);
>+      if (ret) {
>+              PMD_INIT_LOG(ERR, "Failed to initialize flow");
>+              return ret;
>+      }
> 
>       return 0;
> 
>@@ -1796,6 +1800,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> {
>       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>       struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>+      struct ice_adapter *ad =
>+              ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> 
>       /* Since stop will make link down, then the link event will be
>        * triggered, disable the irq firstly to avoid the port_infoe etc
>@@ -1806,6 +1812,8 @@ ice_dev_close(struct rte_eth_dev *dev)
> 
>       ice_dev_stop(dev);
> 
>+      ice_flow_uninit(ad);
>+
>       /* release all queue resource */
>       ice_free_queues(dev);
> 
>@@ -1822,8 +1830,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
> {
>       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
>       struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
>-      struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>-      struct rte_flow *p_flow;
> 
>       ice_dev_close(dev);
> 
>@@ -1840,14 +1846,6 @@ ice_dev_uninit(struct rte_eth_dev *dev)
>       /* unregister callback func from eal lib */
>       rte_intr_callback_unregister(intr_handle,
>                                    ice_interrupt_handler, dev);
>-
>-      /* Remove all flows */
>-      while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>-              TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>-              ice_free_switch_filter_rule(p_flow->rule);
>-              rte_free(p_flow);
>-      }
>-
>       return 0;
> }
> 
>diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
>index 9bf5de08d..d1d07641d 100644
>--- a/drivers/net/ice/ice_ethdev.h
>+++ b/drivers/net/ice/ice_ethdev.h
>@@ -241,16 +241,14 @@ struct ice_vsi {
>       bool offset_loaded;
> };
> 
>-extern const struct rte_flow_ops ice_flow_ops;
>-
>-/* Struct to store flow created. */
>-struct rte_flow {
>-      TAILQ_ENTRY(rte_flow) node;
>-      void *rule;
>-};
> 
>+struct rte_flow;
> TAILQ_HEAD(ice_flow_list, rte_flow);
> 
>+
>+struct ice_flow_parser;
>+TAILQ_HEAD(ice_parser_list, ice_flow_parser);
>+
> struct ice_pf {
>       struct ice_adapter *adapter; /* The adapter this PF associate to */
>       struct ice_vsi *main_vsi; /* pointer to main VSI structure */
>@@ -278,6 +276,9 @@ struct ice_pf {
>       bool offset_loaded;
>       bool adapter_stopped;
>       struct ice_flow_list flow_list;
>+      struct ice_parser_list rss_parser_list;
>+      struct ice_parser_list perm_parser_list;
>+      struct ice_parser_list dist_parser_list;
> };
> 
> /**
>diff --git a/drivers/net/ice/ice_generic_flow.c 
>b/drivers/net/ice/ice_generic_flow.c
>index 1c0adc779..aa11d6170 100644
>--- a/drivers/net/ice/ice_generic_flow.c
>+++ b/drivers/net/ice/ice_generic_flow.c
>@@ -17,7 +17,22 @@
> 
> #include "ice_ethdev.h"
> #include "ice_generic_flow.h"
>-#include "ice_switch_filter.h"
>+
>+/**
>+ * Non-pipeline mode, fdir and swith both used as distributor,
>+ * fdir used first, switch used as fdir's backup.
>+ */
>+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY 0
>+/*Pipeline mode, switch used at permission stage*/
>+#define ICE_FLOW_CLASSIFY_STAGE_PERMISSION 1
>+/*Pipeline mode, fdir used at distributor stage*/
>+#define ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR 2
>+
>+static int ice_pipeline_stage =
>+              ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;
>+
>+static struct ice_engine_list engine_list =
>+              TAILQ_HEAD_INITIALIZER(engine_list);
> 
> static int ice_flow_validate(struct rte_eth_dev *dev,
>               const struct rte_flow_attr *attr,
>@@ -34,17 +49,153 @@ static int ice_flow_destroy(struct rte_eth_dev *dev,
>               struct rte_flow_error *error);
> static int ice_flow_flush(struct rte_eth_dev *dev,
>               struct rte_flow_error *error);
>+static int ice_flow_query_count(struct rte_eth_dev *dev,
>+              struct rte_flow *flow,
>+              const struct rte_flow_action *actions,
>+              void *data,
>+              struct rte_flow_error *error);
> 
> const struct rte_flow_ops ice_flow_ops = {
>       .validate = ice_flow_validate,
>       .create = ice_flow_create,
>       .destroy = ice_flow_destroy,
>       .flush = ice_flow_flush,
>+      .query = ice_flow_query_count,
> };
> 
>+
>+void
>+ice_register_flow_engine(struct ice_flow_engine *engine)
>+{
>+      TAILQ_INSERT_TAIL(&engine_list, engine, node);
>+}
>+
>+int
>+ice_flow_init(struct ice_adapter *ad)
>+{
>+      int ret = 0;
>+      struct ice_pf *pf = &ad->pf;
>+      void *temp;
>+      struct ice_flow_engine *engine = NULL;
>+
>+      TAILQ_INIT(&pf->flow_list);
>+      TAILQ_INIT(&pf->rss_parser_list);
>+      TAILQ_INIT(&pf->perm_parser_list);
>+      TAILQ_INIT(&pf->dist_parser_list);
>+
>+      TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+              if (engine->init == NULL)

What about provide some debug log info here? Adding one engine name member to
struct ice_flow_engine may help.

>+                      return -EINVAL;
>+
>+              ret = engine->init(ad);
>+              if (ret)
>+                      return ret;
>+      }
>+      return 0;
>+}
>+
>+void
>+ice_flow_uninit(struct ice_adapter *ad)
>+{
>+      struct ice_pf *pf = &ad->pf;
>+      struct ice_flow_engine *engine;
>+      struct rte_flow *p_flow;
>+      struct ice_flow_parser *p_parser;
>+      void *temp;
>+
>+      TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
>+              if (engine->uninit)
>+                      engine->uninit(ad);
>+      }
>+
>+      /* Remove all flows */
>+      while ((p_flow = TAILQ_FIRST(&pf->flow_list))) {
>+              TAILQ_REMOVE(&pf->flow_list, p_flow, node);
>+              if (p_flow->engine->free)
>+                      p_flow->engine->free(p_flow);
>+              rte_free(p_flow);
>+      }
>+
>+      /* Cleanup parser list */
>+      while ((p_parser = TAILQ_FIRST(&pf->rss_parser_list)))
>+              TAILQ_REMOVE(&pf->rss_parser_list, p_parser, node);
>+
>+      while ((p_parser = TAILQ_FIRST(&pf->perm_parser_list)))
>+              TAILQ_REMOVE(&pf->perm_parser_list, p_parser, node);
>+
>+      while ((p_parser = TAILQ_FIRST(&pf->dist_parser_list)))
>+              TAILQ_REMOVE(&pf->dist_parser_list, p_parser, node);
>+}
>+
>+int
>+ice_register_parser(struct ice_flow_parser *parser,
>+              struct ice_adapter *ad)
>+{
>+      struct ice_parser_list *list = NULL;
>+      struct ice_pf *pf = &ad->pf;
>+
>+      switch (parser->stage) {
>+      case ICE_FLOW_STAGE_RSS:
>+              list = &pf->rss_parser_list;
>+              break;
>+      case ICE_FLOW_STAGE_PERMISSION:
>+              list = &pf->perm_parser_list;
>+              break;
>+      case ICE_FLOW_STAGE_DISTRIBUTOR:
>+              list = &pf->dist_parser_list;
>+              break;
>+      default:
>+              return -EINVAL;
>+      }
>+
>+      if (ad->devargs.pipeline_mode_support)
>+              TAILQ_INSERT_TAIL(list, parser, node);
>+      else {
>+              if (parser->engine->type == ICE_FLOW_ENGINE_SWITCH
>+                      || parser->engine->type == ICE_FLOW_ENGINE_HASH)
>+                      TAILQ_INSERT_TAIL(list, parser, node);
>+              else if (parser->engine->type == ICE_FLOW_ENGINE_FDIR)
>+                      TAILQ_INSERT_HEAD(list, parser, node);
>+              else
>+                      return -EINVAL;
>+      }
>+      return 0;
>+}
>+
>+void
>+ice_unregister_parser(struct ice_flow_parser *parser,
>+              struct ice_adapter *ad)
>+{
>+      struct ice_pf *pf = &ad->pf;
>+      struct ice_parser_list *list;
>+      struct ice_flow_parser *p_parser;
>+      void *temp;
>+
>+      switch (parser->stage) {
>+      case ICE_FLOW_STAGE_RSS:
>+              list = &pf->rss_parser_list;
>+              break;
>+      case ICE_FLOW_STAGE_PERMISSION:
>+              list = &pf->perm_parser_list;
>+              break;
>+      case ICE_FLOW_STAGE_DISTRIBUTOR:
>+              list = &pf->dist_parser_list;
>+              break;
>+      default:
>+              return;
>+      }
>+
>+      TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
>+              if (p_parser->engine->type == parser->engine->type)
>+                      TAILQ_REMOVE(list, p_parser, node);
>+      }
>+
>+}
>+
> static int
>-ice_flow_valid_attr(const struct rte_flow_attr *attr,
>-                   struct rte_flow_error *error)
>+ice_flow_valid_attr(struct ice_adapter *ad,
>+              const struct rte_flow_attr *attr,
>+              struct rte_flow_error *error)
> {
>       /* Must be input direction */
>       if (!attr->ingress) {
>@@ -61,15 +212,25 @@ ice_flow_valid_attr(const struct rte_flow_attr *attr,
>                                  attr, "Not support egress.");
>               return -rte_errno;
>       }
>-
>-      /* Not supported */
>-      if (attr->priority) {
>-              rte_flow_error_set(error, EINVAL,
>-                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
>-                                 attr, "Not support priority.");
>-              return -rte_errno;
>+      /* Check pipeline mode support to set classification stage */
>+      if (ad->devargs.pipeline_mode_support) {
>+              if (0 == attr->priority)
>+                      ice_pipeline_stage =
>+                              ICE_FLOW_CLASSIFY_STAGE_PERMISSION;
>+              else
>+                      ice_pipeline_stage =
>+                              ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR;
>+      } else {
>+              ice_pipeline_stage =
>+                      ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY;

Do we really this assignment?

>+              /* Not supported */
>+              if (attr->priority) {
>+                      rte_flow_error_set(error, EINVAL,
>+                                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
>+                                         attr, "Not support priority.");
>+                      return -rte_errno;
>+              }
>       }
>-

Unrelated change.

>       /* Not supported */
>       if (attr->group) {
>               rte_flow_error_set(error, EINVAL,
>@@ -102,7 +263,7 @@ ice_find_first_item(const struct rte_flow_item *item, bool 
>is_void)
> /* Skip all VOID items of the pattern */
> static void
> ice_pattern_skip_void_item(struct rte_flow_item *items,
>-                          const struct rte_flow_item *pattern)
>+                      const struct rte_flow_item *pattern)
> {
>       uint32_t cpy_count = 0;
>       const struct rte_flow_item *pb = pattern, *pe = pattern;
>@@ -124,7 +285,6 @@ ice_pattern_skip_void_item(struct rte_flow_item *items,
>               items += cpy_count;
> 
>               if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
>-                      pb = pe;
>                       break;
>               }
> 
>@@ -151,11 +311,15 @@ ice_match_pattern(enum rte_flow_item_type *item_array,
>               item->type == RTE_FLOW_ITEM_TYPE_END);
> }
> 
>-static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
>+struct ice_pattern_match_item *
>+ice_search_pattern_match_item(const struct rte_flow_item pattern[],
>+              struct ice_pattern_match_item *array,
>+              uint32_t array_len,
>               struct rte_flow_error *error)
> {
>       uint16_t i = 0;
>-      uint64_t inset;
>+      struct ice_pattern_match_item *pattern_match_item;
>+      /* need free by each filter */
>       struct rte_flow_item *items; /* used for pattern without VOID items */
>       uint32_t item_num = 0; /* non-void item number */
> 
>@@ -172,451 +336,149 @@ static uint64_t ice_flow_valid_pattern(const struct 
>rte_flow_item pattern[],
>       if (!items) {
>               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
>                                  NULL, "No memory for PMD internal items.");
>-              return -ENOMEM;
>+              return NULL;
>+      }
>+      pattern_match_item = rte_zmalloc("ice_pattern_match_item",
>+                      sizeof(struct ice_pattern_match_item), 0);
>+      if (!pattern_match_item) {
>+              PMD_DRV_LOG(ERR, "Failed to allocate memory.");

Use rte_flow_error_set to align with others.

>+              return NULL;
>       }
>-
>       ice_pattern_skip_void_item(items, pattern);
> 
>-      for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
>-              if (ice_match_pattern(ice_supported_patterns[i].items,
>+      for (i = 0; i < array_len; i++)
>+              if (ice_match_pattern(array[i].pattern_list,
>                                     items)) {
>-                      inset = ice_supported_patterns[i].sw_fields;
>+                      pattern_match_item->input_set_mask =
>+                              array[i].input_set_mask;
>+                      pattern_match_item->pattern_list =
>+                              array[i].pattern_list;
>+                      pattern_match_item->meta = array[i].meta;
>                       rte_free(items);
>-                      return inset;
>+                      return pattern_match_item;
>               }
>       rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
>                          pattern, "Unsupported pattern");
> 
>       rte_free(items);
>-      return 0;
>-}
>-
>-static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
>-                      struct rte_flow_error *error)
>-{
>-      const struct rte_flow_item *item = pattern;
>-      const struct rte_flow_item_eth *eth_spec, *eth_mask;
>-      const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
>-      const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
>-      const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
>-      const struct rte_flow_item_udp *udp_spec, *udp_mask;
>-      const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
>-      const struct rte_flow_item_icmp *icmp_mask;
>-      const struct rte_flow_item_icmp6 *icmp6_mask;
>-      const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
>-      const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
>-      enum rte_flow_item_type item_type;
>-      uint8_t  ipv6_addr_mask[16] = {
>-              0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
>-              0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
>-      uint64_t input_set = ICE_INSET_NONE;
>-      bool is_tunnel = false;
>-
>-      for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
>-              if (item->last) {
>-                      rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Not support range");
>-                      return 0;
>-              }
>-              item_type = item->type;
>-              switch (item_type) {
>-              case RTE_FLOW_ITEM_TYPE_ETH:
>-                      eth_spec = item->spec;
>-                      eth_mask = item->mask;
>-
>-                      if (eth_spec && eth_mask) {
>-                              if (rte_is_broadcast_ether_addr(&eth_mask->src))
>-                                      input_set |= ICE_INSET_SMAC;
>-                              if (rte_is_broadcast_ether_addr(&eth_mask->dst))
>-                                      input_set |= ICE_INSET_DMAC;
>-                              if (eth_mask->type == RTE_BE16(0xffff))
>-                                      input_set |= ICE_INSET_ETHERTYPE;
>-                      }
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_IPV4:
>-                      ipv4_spec = item->spec;
>-                      ipv4_mask = item->mask;
>-
>-                      if (!(ipv4_spec && ipv4_mask))
>-                              break;
>-
>-                      /* Check IPv4 mask and update input set */
>-                      if (ipv4_mask->hdr.version_ihl ||
>-                          ipv4_mask->hdr.total_length ||
>-                          ipv4_mask->hdr.packet_id ||
>-                          ipv4_mask->hdr.hdr_checksum) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Invalid IPv4 mask.");
>-                              return 0;
>-                      }
>-
>-                      if (is_tunnel) {
>-                              if (ipv4_mask->hdr.src_addr == UINT32_MAX)
>-                                      input_set |= ICE_INSET_TUN_IPV4_SRC;
>-                              if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
>-                                      input_set |= ICE_INSET_TUN_IPV4_DST;
>-                              if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
>-                                      input_set |= ICE_INSET_TUN_IPV4_TTL;
>-                              if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
>-                                      input_set |= ICE_INSET_TUN_IPV4_PROTO;
>-                      } else {
>-                              if (ipv4_mask->hdr.src_addr == UINT32_MAX)
>-                                      input_set |= ICE_INSET_IPV4_SRC;
>-                              if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
>-                                      input_set |= ICE_INSET_IPV4_DST;
>-                              if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
>-                                      input_set |= ICE_INSET_IPV4_TTL;
>-                              if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
>-                                      input_set |= ICE_INSET_IPV4_PROTO;
>-                              if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
>-                                      input_set |= ICE_INSET_IPV4_TOS;
>-                      }
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_IPV6:
>-                      ipv6_spec = item->spec;
>-                      ipv6_mask = item->mask;
>-
>-                      if (!(ipv6_spec && ipv6_mask))
>-                              break;
>-
>-                      if (ipv6_mask->hdr.payload_len) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Invalid IPv6 mask");
>-                              return 0;
>-                      }
>-
>-                      if (is_tunnel) {
>-                              if (!memcmp(ipv6_mask->hdr.src_addr,
>-                                          ipv6_addr_mask,
>-                                          RTE_DIM(ipv6_mask->hdr.src_addr)))
>-                                      input_set |= ICE_INSET_TUN_IPV6_SRC;
>-                              if (!memcmp(ipv6_mask->hdr.dst_addr,
>-                                          ipv6_addr_mask,
>-                                          RTE_DIM(ipv6_mask->hdr.dst_addr)))
>-                                      input_set |= ICE_INSET_TUN_IPV6_DST;
>-                              if (ipv6_mask->hdr.proto == UINT8_MAX)
>-                                      input_set |= ICE_INSET_TUN_IPV6_PROTO;
>-                              if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
>-                                      input_set |= ICE_INSET_TUN_IPV6_TTL;
>-                      } else {
>-                              if (!memcmp(ipv6_mask->hdr.src_addr,
>-                                          ipv6_addr_mask,
>-                                          RTE_DIM(ipv6_mask->hdr.src_addr)))
>-                                      input_set |= ICE_INSET_IPV6_SRC;
>-                              if (!memcmp(ipv6_mask->hdr.dst_addr,
>-                                          ipv6_addr_mask,
>-                                          RTE_DIM(ipv6_mask->hdr.dst_addr)))
>-                                      input_set |= ICE_INSET_IPV6_DST;
>-                              if (ipv6_mask->hdr.proto == UINT8_MAX)
>-                                      input_set |= ICE_INSET_IPV6_PROTO;
>-                              if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
>-                                      input_set |= ICE_INSET_IPV6_HOP_LIMIT;
>-                              if ((ipv6_mask->hdr.vtc_flow &
>-                                      rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
>-                                              == rte_cpu_to_be_32
>-                                              (RTE_IPV6_HDR_TC_MASK))
>-                                      input_set |= ICE_INSET_IPV6_TOS;
>-                      }
>-
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_UDP:
>-                      udp_spec = item->spec;
>-                      udp_mask = item->mask;
>-
>-                      if (!(udp_spec && udp_mask))
>-                              break;
>-
>-                      /* Check UDP mask and update input set*/
>-                      if (udp_mask->hdr.dgram_len ||
>-                          udp_mask->hdr.dgram_cksum) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
>-                                                 item,
>-                                                 "Invalid UDP mask");
>-                              return 0;
>-                      }
>-
>-                      if (is_tunnel) {
>-                              if (udp_mask->hdr.src_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_TUN_SRC_PORT;
>-                              if (udp_mask->hdr.dst_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_TUN_DST_PORT;
>-                      } else {
>-                              if (udp_mask->hdr.src_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_SRC_PORT;
>-                              if (udp_mask->hdr.dst_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_DST_PORT;
>-                      }
>-
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_TCP:
>-                      tcp_spec = item->spec;
>-                      tcp_mask = item->mask;
>-
>-                      if (!(tcp_spec && tcp_mask))
>-                              break;
>-
>-                      /* Check TCP mask and update input set */
>-                      if (tcp_mask->hdr.sent_seq ||
>-                          tcp_mask->hdr.recv_ack ||
>-                          tcp_mask->hdr.data_off ||
>-                          tcp_mask->hdr.tcp_flags ||
>-                          tcp_mask->hdr.rx_win ||
>-                          tcp_mask->hdr.cksum ||
>-                          tcp_mask->hdr.tcp_urp) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
>-                                                 item,
>-                                                 "Invalid TCP mask");
>-                              return 0;
>-                      }
>-
>-                      if (is_tunnel) {
>-                              if (tcp_mask->hdr.src_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_TUN_SRC_PORT;
>-                              if (tcp_mask->hdr.dst_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_TUN_DST_PORT;
>-                      } else {
>-                              if (tcp_mask->hdr.src_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_SRC_PORT;
>-                              if (tcp_mask->hdr.dst_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_DST_PORT;
>-                      }
>-
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_SCTP:
>-                      sctp_spec = item->spec;
>-                      sctp_mask = item->mask;
>-
>-                      if (!(sctp_spec && sctp_mask))
>-                              break;
>-
>-                      /* Check SCTP mask and update input set */
>-                      if (sctp_mask->hdr.cksum) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Invalid SCTP mask");
>-                              return 0;
>-                      }
>-
>-                      if (is_tunnel) {
>-                              if (sctp_mask->hdr.src_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_TUN_SRC_PORT;
>-                              if (sctp_mask->hdr.dst_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_TUN_DST_PORT;
>-                      } else {
>-                              if (sctp_mask->hdr.src_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_SRC_PORT;
>-                              if (sctp_mask->hdr.dst_port == UINT16_MAX)
>-                                      input_set |= ICE_INSET_DST_PORT;
>-                      }
>-
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_ICMP:
>-                      icmp_mask = item->mask;
>-                      if (icmp_mask->hdr.icmp_code ||
>-                          icmp_mask->hdr.icmp_cksum ||
>-                          icmp_mask->hdr.icmp_ident ||
>-                          icmp_mask->hdr.icmp_seq_nb) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
>-                                                 item,
>-                                                 "Invalid ICMP mask");
>-                              return 0;
>-                      }
>-
>-                      if (icmp_mask->hdr.icmp_type == UINT8_MAX)
>-                              input_set |= ICE_INSET_ICMP;
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_ICMP6:
>-                      icmp6_mask = item->mask;
>-                      if (icmp6_mask->code ||
>-                          icmp6_mask->checksum) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                                 RTE_FLOW_ERROR_TYPE_ITEM,
>-                                                 item,
>-                                                 "Invalid ICMP6 mask");
>-                              return 0;
>-                      }
>-
>-                      if (icmp6_mask->type == UINT8_MAX)
>-                              input_set |= ICE_INSET_ICMP6;
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_VXLAN:
>-                      vxlan_spec = item->spec;
>-                      vxlan_mask = item->mask;
>-                      /* Check if VXLAN item is used to describe protocol.
>-                       * If yes, both spec and mask should be NULL.
>-                       * If no, both spec and mask shouldn't be NULL.
>-                       */
>-                      if ((!vxlan_spec && vxlan_mask) ||
>-                          (vxlan_spec && !vxlan_mask)) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Invalid VXLAN item");
>-                              return 0;
>-                      }
>-                      if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
>-                                      vxlan_mask->vni[1] == UINT8_MAX &&
>-                                      vxlan_mask->vni[2] == UINT8_MAX)
>-                              input_set |= ICE_INSET_TUN_ID;
>-                      is_tunnel = 1;
>-
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_NVGRE:
>-                      nvgre_spec = item->spec;
>-                      nvgre_mask = item->mask;
>-                      /* Check if NVGRE item is used to describe protocol.
>-                       * If yes, both spec and mask should be NULL.
>-                       * If no, both spec and mask shouldn't be NULL.
>-                       */
>-                      if ((!nvgre_spec && nvgre_mask) ||
>-                          (nvgre_spec && !nvgre_mask)) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Invalid NVGRE item");
>-                              return 0;
>-                      }
>-                      if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
>-                                      nvgre_mask->tni[1] == UINT8_MAX &&
>-                                      nvgre_mask->tni[2] == UINT8_MAX)
>-                              input_set |= ICE_INSET_TUN_ID;
>-                      is_tunnel = 1;
>-
>-                      break;
>-              case RTE_FLOW_ITEM_TYPE_VOID:
>-                      break;
>-              default:
>-                      rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ITEM,
>-                                         item,
>-                                         "Invalid pattern");
>-                      break;
>-              }
>-      }
>-      return input_set;
>-}
>-
>-static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
>-                      uint64_t inset, struct rte_flow_error *error)
>-{
>-      uint64_t fields;
>-
>-      /* get valid field */
>-      fields = ice_get_flow_field(pattern, error);
>-      if (!fields || fields & (~inset)) {
>-              rte_flow_error_set(error, EINVAL,
>-                                 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
>-                                 pattern,
>-                                 "Invalid input set");
>-              return -rte_errno;
>-      }
>-
>-      return 0;
>+      rte_free(pattern_match_item);
>+      return NULL;
> }
> 
>-static int ice_flow_valid_action(struct rte_eth_dev *dev,
>-                              const struct rte_flow_action *actions,
>-                              struct rte_flow_error *error)
>+static struct ice_flow_engine *
>+ice_parse_engine(struct ice_adapter *ad,
>+              struct ice_parser_list *parser_list,
>+              const struct rte_flow_item pattern[],
>+              const struct rte_flow_action actions[],
>+              void **meta,
>+              struct rte_flow_error *error)
> {
>-      const struct rte_flow_action_queue *act_q;
>-      uint16_t queue;
>-      const struct rte_flow_action *action;
>-      for (action = actions; action->type !=
>-                      RTE_FLOW_ACTION_TYPE_END; action++) {
>-              switch (action->type) {
>-              case RTE_FLOW_ACTION_TYPE_QUEUE:
>-                      act_q = action->conf;
>-                      queue = act_q->index;
>-                      if (queue >= dev->data->nb_rx_queues) {
>-                              rte_flow_error_set(error, EINVAL,
>-                                              RTE_FLOW_ERROR_TYPE_ACTION,
>-                                              actions, "Invalid queue ID for"
>-                                              " switch filter.");
>-                              return -rte_errno;
>-                      }
>-                      break;
>-              case RTE_FLOW_ACTION_TYPE_DROP:
>-              case RTE_FLOW_ACTION_TYPE_VOID:
>-                      break;
>-              default:
>-                      rte_flow_error_set(error, EINVAL,
>-                                         RTE_FLOW_ERROR_TYPE_ACTION, actions,
>-                                         "Invalid action.");
>-                      return -rte_errno;
>-              }
>+      struct ice_flow_engine *engine = NULL;
>+      struct ice_flow_parser *parser = NULL;
>+      void *temp;
>+      TAILQ_FOREACH_SAFE(parser, parser_list, node, temp) {
>+              if (parser->parse_pattern_action(ad, parser->array,
>+                              parser->array_len, pattern, actions,
>+                              meta, error) < 0)
>+                      continue;
>+              engine = parser->engine;
>+              break;
>       }
>-      return 0;
>+      return engine;
> }
> 
> static int
>-ice_flow_validate(struct rte_eth_dev *dev,
>-                 const struct rte_flow_attr *attr,
>-                 const struct rte_flow_item pattern[],
>-                 const struct rte_flow_action actions[],
>-                 struct rte_flow_error *error)
>+ice_flow_validate_filter(struct rte_eth_dev *dev,
>+              const struct rte_flow_attr *attr,
>+              const struct rte_flow_item pattern[],
>+              const struct rte_flow_action actions[],
>+              struct ice_flow_engine **engine,
>+              void **meta,
>+              struct rte_flow_error *error)
> {
>-      uint64_t inset = 0;
>       int ret = ICE_ERR_NOT_SUPPORTED;
>+      struct ice_adapter *ad =
>+              ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>+      struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
> 
>       if (!pattern) {
>               rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
>-                                 NULL, "NULL pattern.");
>+                              NULL, "NULL pattern.");
>               return -rte_errno;
>       }
> 
>       if (!actions) {
>               rte_flow_error_set(error, EINVAL,
>-                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
>-                                 NULL, "NULL action.");
>+                              RTE_FLOW_ERROR_TYPE_ACTION_NUM,
>+                              NULL, "NULL action.");
>               return -rte_errno;
>       }
>-
>       if (!attr) {
>               rte_flow_error_set(error, EINVAL,
>-                                 RTE_FLOW_ERROR_TYPE_ATTR,
>-                                 NULL, "NULL attribute.");
>+                              RTE_FLOW_ERROR_TYPE_ATTR,
>+                              NULL, "NULL attribute.");
>               return -rte_errno;
>       }
> 
>-      ret = ice_flow_valid_attr(attr, error);
>+      ret = ice_flow_valid_attr(ad, attr, error);
>       if (ret)
>               return ret;
> 
>-      inset = ice_flow_valid_pattern(pattern, error);
>-      if (!inset)
>-              return -rte_errno;
>-
>-      ret = ice_flow_valid_inset(pattern, inset, error);
>-      if (ret)
>-              return ret;
>+      *engine = ice_parse_engine(ad, &pf->rss_parser_list, pattern, actions,
>+                      meta, error);
>+      if (*engine != NULL)
>+              return 0;
>+
>+      switch (ice_pipeline_stage) {
>+      case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR_ONLY:
>+      case ICE_FLOW_CLASSIFY_STAGE_DISTRIBUTOR:
>+              *engine = ice_parse_engine(ad, &pf->dist_parser_list, pattern,
>+                              actions, meta, error);
>+              break;
>+      case ICE_FLOW_CLASSIFY_STAGE_PERMISSION:
>+              *engine = ice_parse_engine(ad, &pf->perm_parser_list, pattern,
>+                              actions, meta, error);
>+              break;
>+      default:
>+              return -EINVAL;
>+      }
> 
>-      ret = ice_flow_valid_action(dev, actions, error);
>-      if (ret)
>-              return ret;
>+      if (*engine == NULL)
>+              return -EINVAL;
> 
>       return 0;
> }
> 
>+static int
>+ice_flow_validate(struct rte_eth_dev *dev,
>+              const struct rte_flow_attr *attr,
>+              const struct rte_flow_item pattern[],
>+              const struct rte_flow_action actions[],
>+              struct rte_flow_error *error)
>+{
>+      int ret = ICE_ERR_NOT_SUPPORTED;
>+      void *meta = NULL;
>+      struct ice_flow_engine *engine = NULL;
>+
>+      ret = ice_flow_validate_filter(dev, attr, pattern, actions,
>+                      &engine, &meta, error);
>+      return ret;
>+}
>+
> static struct rte_flow *
> ice_flow_create(struct rte_eth_dev *dev,
>-               const struct rte_flow_attr *attr,
>-               const struct rte_flow_item pattern[],
>-               const struct rte_flow_action actions[],
>-               struct rte_flow_error *error)
>+              const struct rte_flow_attr *attr,
>+              const struct rte_flow_item pattern[],
>+              const struct rte_flow_action actions[],
>+              struct rte_flow_error *error)
> {
>       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>       struct rte_flow *flow = NULL;
>-      int ret;
>+      int ret = 0;
>+      struct ice_adapter *ad =
>+              ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>+      struct ice_flow_engine *engine = NULL;
>+      void *meta = NULL;
> 
>       flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
>       if (!flow) {
>@@ -626,65 +488,105 @@ ice_flow_create(struct rte_eth_dev *dev,
>               return flow;
>       }
> 
>-      ret = ice_flow_validate(dev, attr, pattern, actions, error);
>+      ret = ice_flow_validate_filter(dev, attr, pattern, actions,
>+                      &engine, &meta, error);
>       if (ret < 0)
>               goto free_flow;
> 
>-      ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
>+      if (engine->create == NULL)
>+              goto free_flow;
>+
>+      ret = engine->create(ad, flow, meta, error);
>       if (ret)
>               goto free_flow;
> 
>+      flow->engine = engine;
>       TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
>       return flow;
> 
> free_flow:
>-      rte_flow_error_set(error, -ret,
>-                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>-                         "Failed to create flow.");
>+      PMD_DRV_LOG(ERR, "Failed to create flow");

Why is this change?

>       rte_free(flow);
>       return NULL;
> }
> 
> static int
> ice_flow_destroy(struct rte_eth_dev *dev,
>-               struct rte_flow *flow,
>-               struct rte_flow_error *error)
>+              struct rte_flow *flow,
>+              struct rte_flow_error *error)
> {
>       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>+      struct ice_adapter *ad =
>+              ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
>       int ret = 0;
> 
>-      ret = ice_destroy_switch_filter(pf, flow, error);
>-
>+      if (!flow || !flow->engine->destroy) {
>+              rte_flow_error_set(error, EINVAL,
>+                              RTE_FLOW_ERROR_TYPE_HANDLE,
>+                              NULL, "NULL flow or NULL destroy");
>+              return -rte_errno;
>+      }
>+      ret = flow->engine->destroy(ad, flow, error);
>       if (!ret) {
>               TAILQ_REMOVE(&pf->flow_list, flow, node);
>               rte_free(flow);
>-      } else {
>-              rte_flow_error_set(error, -ret,
>-                                 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>-                                 "Failed to destroy flow.");
>-      }
>+      } else
>+              PMD_DRV_LOG(ERR, "Failed to destroy flow");

Ditto.

> 
>       return ret;
> }
> 
> static int
> ice_flow_flush(struct rte_eth_dev *dev,
>-             struct rte_flow_error *error)
>+              struct rte_flow_error *error)
> {
>       struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
>-      struct rte_flow *p_flow;
>+      struct rte_flow *p_flow = NULL;
>       void *temp;
>       int ret = 0;
> 
>       TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
>               ret = ice_flow_destroy(dev, p_flow, error);
>               if (ret) {
>-                      rte_flow_error_set(error, -ret,
>-                                         RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
>-                                         "Failed to flush SW flows.");
>-                      return -rte_errno;
>+                      PMD_DRV_LOG(ERR, "Failed to flush flows");

Ditto.


Thanks,
Xiaolong

Reply via email to