From: wei zhao1 <wei.zh...@intel.com>

Add rule validate function and check if the rule is a n-tuple rule,
and get the n-tuple info.

Signed-off-by: wei zhao1 <wei.zh...@intel.com>
Signed-off-by: Wenzhuo Lu <wenzhuo...@intel.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 349 +++++++++++++++++++++++++++++++++++++++
 1 file changed, 349 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f84ca17..d3768c6 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -61,6 +61,8 @@
 #include <rte_random.h>
 #include <rte_dev.h>
 #include <rte_hash_crc.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
 
 #include "ixgbe_logs.h"
 #include "base/ixgbe_api.h"
@@ -393,6 +395,26 @@ static int ixgbe_dev_udp_tunnel_port_del(struct 
rte_eth_dev *dev,
 static int ixgbe_filter_restore(struct rte_eth_dev *dev);
 static void ixgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 int ixgbe_flush_all_filter(struct rte_eth_dev *dev);
+static enum
+rte_flow_error_type cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                                       const struct rte_flow_item pattern[],
+                                       const struct rte_flow_action actions[],
+                                       struct rte_eth_ntuple_filter *filter);
+static enum
+rte_flow_error_type ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                                       const struct rte_flow_item pattern[],
+                                       const struct rte_flow_action actions[],
+                                       struct rte_eth_ntuple_filter *filter);
+enum rte_flow_error_type
+ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
+                                       const struct rte_flow_attr *attr,
+                                       const struct rte_flow_item pattern[],
+                                       const struct rte_flow_action actions[]);
+int ixgbe_flow_validate(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error);
 
 /*
  * Define VF Stats MACRO for Non "cleared on read" register
@@ -778,6 +800,14 @@ static const struct rte_ixgbe_xstats_name_off 
rte_ixgbevf_stats_strings[] = {
 #define IXGBEVF_NB_XSTATS (sizeof(rte_ixgbevf_stats_strings) / \
                sizeof(rte_ixgbevf_stats_strings[0]))
 
+static const struct rte_flow_ops ixgbe_flow_ops = {
+       ixgbe_flow_validate,
+       NULL,
+       NULL,
+       NULL,
+       NULL,
+};
+
 /**
  * Atomically reads the link status information from global
  * structure rte_eth_dev.
@@ -6311,6 +6341,11 @@ ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
        case RTE_ETH_FILTER_L2_TUNNEL:
                ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
                break;
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+               *(const void **)arg = &ixgbe_flow_ops;
+               break;
        default:
                PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
                                                        filter_type);
@@ -7995,6 +8030,320 @@ ixgbe_flush_all_filter(struct rte_eth_dev *dev)
        return 0;
 }
 
+static inline uint32_t
+rte_be_to_cpu_24(uint32_t x)
+{
+       return  ((x & 0x000000ffUL) << 16) |
+               (x & 0x0000ff00UL) |
+               ((x & 0x00ff0000UL) >> 16);
+}
+#define IXGBE_MIN_N_TUPLE_PRIO 1
+#define IXGBE_MAX_N_TUPLE_PRIO 7
+#define PATTERN_SKIP_VOID(filter, filter_struct, ret)\
+               do {\
+                       if (!pattern) {\
+                       memset(filter, 0, sizeof(filter_struct));\
+                       return ret;\
+                       } \
+                       item = pattern + i;\
+                       while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
+                                       i++;\
+                                       item = pattern + i;\
+                       } \
+               } while (0)
+
+#define ACTION_SKIP_VOID(filter, filter_struct, ret)\
+               do {\
+                       if (!actions) {\
+                       memset(filter, 0, sizeof(filter_struct));\
+                       return ret;\
+                       } \
+                       act = actions + i;\
+                       while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+                                       i++;\
+                                       act = actions + i;\
+                       } \
+               } while (0)
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ */
+static enum rte_flow_error_type
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                        const struct rte_flow_item pattern[],
+                        const struct rte_flow_action actions[],
+                        struct rte_eth_ntuple_filter *filter)
+{
+       const struct rte_flow_item *item;
+       const struct rte_flow_action *act;
+       const struct rte_flow_item_ipv4 *ipv4_spec;
+       const struct rte_flow_item_ipv4 *ipv4_mask;
+       const struct rte_flow_item_tcp *tcp_spec;
+       const struct rte_flow_item_tcp *tcp_mask;
+       const struct rte_flow_item_udp *udp_spec;
+       const struct rte_flow_item_udp *udp_mask;
+       uint32_t i;
+
+       /************************************************
+        * parse pattern
+        ************************************************/
+       i = 0;
+
+       /* the first not void item can be MAC or IPv4 */
+       PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+                         RTE_FLOW_ERROR_TYPE_ITEM_NUM);
+       if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+           item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+
+       /* Skip Ethernet */
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               /* if the first item is MAC, the content should be NULL */
+               if (item->spec || item->mask)
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+
+               /* check if the next not void item is IPv4 */
+               i++;
+               PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+                                 RTE_FLOW_ERROR_TYPE_ITEM);
+               if (item->type != RTE_FLOW_ITEM_TYPE_IPV4)
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /* get the IPv4 info */
+       if (!item->spec || !item->mask)
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+
+       ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+       /**
+        * Only support src & dst addresses, protocol,
+        * others should be masked.
+        */
+       if (ipv4_mask->hdr.version_ihl ||
+           ipv4_mask->hdr.type_of_service ||
+           ipv4_mask->hdr.total_length ||
+           ipv4_mask->hdr.packet_id ||
+           ipv4_mask->hdr.fragment_offset ||
+           ipv4_mask->hdr.time_to_live ||
+           ipv4_mask->hdr.hdr_checksum)
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+
+       filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+       filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+       filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+
+       ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+       filter->dst_ip = ipv4_spec->hdr.dst_addr;
+       filter->src_ip = ipv4_spec->hdr.src_addr;
+       filter->proto  = ipv4_spec->hdr.next_proto_id;
+
+       /* check if the next not void item is TCP or UDP */
+       i++;
+       PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+                         RTE_FLOW_ERROR_TYPE_ITEM);
+       if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
+           item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /* get the TCP/UDP info */
+       if (!item->spec || !item->mask)
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+
+       if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
+               tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+
+               /**
+                * Only support src & dst ports, tcp flags,
+                * others should be masked.
+                */
+               if (tcp_mask->hdr.sent_seq ||
+                   tcp_mask->hdr.recv_ack ||
+                   tcp_mask->hdr.data_off ||
+                   tcp_mask->hdr.rx_win ||
+                   tcp_mask->hdr.cksum ||
+                   tcp_mask->hdr.tcp_urp) {
+                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+               }
+
+               filter->dst_port_mask  = tcp_mask->hdr.dst_port;
+               filter->src_port_mask  = tcp_mask->hdr.src_port;
+               if (tcp_mask->hdr.tcp_flags == 0xFF) {
+                       filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
+               } else if (!tcp_mask->hdr.tcp_flags) {
+                       filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
+               } else {
+                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+               }
+
+               tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+               filter->dst_port  = tcp_spec->hdr.dst_port;
+               filter->src_port  = tcp_spec->hdr.src_port;
+               filter->tcp_flags = tcp_spec->hdr.tcp_flags;
+       } else {
+               udp_mask = (const struct rte_flow_item_udp *)item->mask;
+
+               /**
+                * Only support src & dst ports,
+                * others should be masked.
+                */
+               if (udp_mask->hdr.dgram_len ||
+                   udp_mask->hdr.dgram_cksum) {
+                       memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+                       return RTE_FLOW_ERROR_TYPE_ITEM;
+               }
+
+               filter->dst_port_mask = udp_mask->hdr.dst_port;
+               filter->src_port_mask = udp_mask->hdr.src_port;
+
+               udp_spec = (const struct rte_flow_item_udp *)item->spec;
+               filter->dst_port = udp_spec->hdr.dst_port;
+               filter->src_port = udp_spec->hdr.src_port;
+       }
+
+       /* check if the next not void item is END */
+       i++;
+       PATTERN_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+                         RTE_FLOW_ERROR_TYPE_ITEM);
+       if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /************************************************
+        * parse action
+        ************************************************/
+       i = 0;
+
+       /**
+        * n-tuple only supports forwarding,
+        * check if the first not void action is QUEUE.
+        */
+       ACTION_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+                        RTE_FLOW_ERROR_TYPE_ACTION_NUM);
+       if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ACTION;
+       }
+       filter->queue =
+               ((const struct rte_flow_action_queue *)act->conf)->index;
+
+       /* check if the next not void item is END */
+       i++;
+       ACTION_SKIP_VOID(filter, struct rte_eth_ntuple_filter,
+                        RTE_FLOW_ERROR_TYPE_ACTION);
+       if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ACTION;
+       }
+
+       /************************************************
+        * parse attr
+        ************************************************/
+       /* must be input direction */
+       if (!attr->ingress) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_INGRESS;
+       }
+
+       /* not supported */
+       if (attr->egress) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_EGRESS;
+       }
+
+       if (attr->priority > 0xFFFF) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+       }
+       filter->priority = (uint16_t)attr->priority;
+
+       return 0;
+}
+
+/* a specific function for ixgbe because the flags is specific */
+static enum rte_flow_error_type
+ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
+                         const struct rte_flow_item pattern[],
+                         const struct rte_flow_action actions[],
+                         struct rte_eth_ntuple_filter *filter)
+{
+       int ret;
+
+       ret = cons_parse_ntuple_filter(attr, pattern, actions, filter);
+
+       if (ret)
+               return ret;
+
+       /* Ixgbe doesn't support tcp flags. */
+       if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ITEM;
+       }
+
+       /* Ixgbe doesn't support many priorities. */
+       if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
+           filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
+               memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+               return RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY;
+       }
+
+       /* fixed value for ixgbe */
+       filter->flags = RTE_5TUPLE_FLAGS;
+       return 0;
+}
+
+/**
+ * Check if the flow rule is supported by ixgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+enum rte_flow_error_type
+ixgbe_flow_rule_validate(__rte_unused struct rte_eth_dev *dev,
+                       const struct rte_flow_attr *attr,
+                       const struct rte_flow_item pattern[],
+                       const struct rte_flow_action actions[])
+{
+       int ret;
+       struct rte_eth_ntuple_filter ntuple_filter;
+
+       memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+       ret = ixgbe_parse_ntuple_filter(attr, pattern, actions, &ntuple_filter);
+       if (!ret)
+               return RTE_FLOW_ERROR_TYPE_NONE;
+
+
+       return ret;
+}
+
+/* Check whether a flow rule can be created on ixgbe. */
+int
+ixgbe_flow_validate(struct rte_eth_dev *dev,
+               const struct rte_flow_attr *attr,
+               const struct rte_flow_item pattern[],
+               const struct rte_flow_action actions[],
+               struct rte_flow_error *error)
+{
+       error->type = ixgbe_flow_rule_validate(dev, attr, pattern, actions);
+       if (error->type == RTE_FLOW_ERROR_TYPE_NONE)
+               return 0;
+       else
+               return -EINVAL;
+
+}
+
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
-- 
2.5.5

Reply via email to