There is need for users to use flexible byte filter on x550.
This patch enable it.

Fixes: 82fb702077f6 ("ixgbe: support new flow director modes for X550")
Fixes: 11777435c727 ("net/ixgbe: parse flow director filter")

Signed-off-by: Wei Zhao <wei.zh...@intel.com>
---
 drivers/net/ixgbe/ixgbe_fdir.c |   9 +-
 drivers/net/ixgbe/ixgbe_flow.c | 274 ++++++++++++++++++++++++++++-------------
 2 files changed, 195 insertions(+), 88 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c
index e559f0f..deb9a21 100644
--- a/drivers/net/ixgbe/ixgbe_fdir.c
+++ b/drivers/net/ixgbe/ixgbe_fdir.c
@@ -307,6 +307,8 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev)
        /* flex byte mask */
        if (info->mask.flex_bytes_mask == 0)
                fdirm |= IXGBE_FDIRM_FLEX;
+       if (info->mask.src_ipv4_mask == 0 && info->mask.dst_ipv4_mask == 0)
+               fdirm |= IXGBE_FDIRM_L3P;
 
        IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
@@ -356,8 +358,7 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
        /* mask VM pool and DIPv6 since there are currently not supported
         * mask FLEX byte, it will be set in flex_conf
         */
-       uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 |
-                        IXGBE_FDIRM_FLEX;
+       uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
        uint32_t fdiripv6m;
        enum rte_fdir_mode mode = dev->data->dev_conf.fdir_conf.mode;
        uint16_t mac_mask;
@@ -385,6 +386,10 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
+       /* flex byte mask */
+       if (info->mask.flex_bytes_mask == 0)
+               fdirm |= IXGBE_FDIRM_FLEX;
+
        IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
 
        fdiripv6m = ((u32)0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT);
diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index f0fafeb..dc210c5 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -1622,9 +1622,9 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
        const struct rte_flow_item_raw *raw_mask;
        const struct rte_flow_item_raw *raw_spec;
        uint8_t j;
-
        struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
+
        if (!pattern) {
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM_NUM,
@@ -1651,9 +1651,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
         * value. So, we need not do anything for the not provided fields later.
         */
        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-       memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-       rule->mask.vlan_tci_mask = 0;
-       rule->mask.flex_bytes_mask = 0;
+       memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
        /**
         * The first not void item should be
@@ -1665,7 +1663,8 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
            item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
            item->type != RTE_FLOW_ITEM_TYPE_TCP &&
            item->type != RTE_FLOW_ITEM_TYPE_UDP &&
-           item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
+           item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
+           item->type != RTE_FLOW_ITEM_TYPE_RAW) {
                memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2201,6 +2200,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                }
 
                raw_mask = item->mask;
+               rule->b_mask = TRUE;
 
                /* check mask */
                if (raw_mask->relative != 0x1 ||
@@ -2217,6 +2217,7 @@ ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
                }
 
                raw_spec = item->spec;
+               rule->b_spec = TRUE;
 
                /* check spec */
                if (raw_spec->relative != 0 ||
@@ -2323,6 +2324,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr 
*attr,
        const struct rte_flow_item_eth *eth_mask;
        const struct rte_flow_item_vlan *vlan_spec;
        const struct rte_flow_item_vlan *vlan_mask;
+       const struct rte_flow_item_raw *raw_mask;
+       const struct rte_flow_item_raw *raw_spec;
        uint32_t j;
 
        if (!pattern) {
@@ -2351,8 +2354,7 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr 
*attr,
         * value. So, we need not do anything for the not provided fields later.
         */
        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-       memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
-       rule->mask.vlan_tci_mask = 0;
+       memset(&rule->mask, 0, sizeof(struct ixgbe_hw_fdir_mask));
 
        /**
         * The first not void item should be
@@ -2364,7 +2366,8 @@ ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr 
*attr,
            item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
            item->type != RTE_FLOW_ITEM_TYPE_UDP &&
            item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
-           item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+           item->type != RTE_FLOW_ITEM_TYPE_NVGRE &&
+           item->type != RTE_FLOW_ITEM_TYPE_RAW) {
                memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_ITEM,
@@ -2520,6 +2523,18 @@ ixgbe_parse_fdir_filter_tunnel(const struct 
rte_flow_attr *attr,
                                &rule->ixgbe_fdir.formatted.tni_vni),
                                vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
                }
+               /* check if the next not void item is MAC VLAN RAW or END*/
+               item = next_no_void_pattern(pattern, item);
+               if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+                       item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+                       item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+                       item->type != RTE_FLOW_ITEM_TYPE_END){
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
        }
 
        /* Get the NVGRE info */
@@ -2616,16 +2631,19 @@ ixgbe_parse_fdir_filter_tunnel(const struct 
rte_flow_attr *attr,
                        rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
                        nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
                }
-       }
 
-       /* check if the next not void item is MAC */
-       item = next_no_void_pattern(pattern, item);
-       if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
-               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_ITEM,
-                       item, "Not supported by fdir filter");
-               return -rte_errno;
+               /* check if the next not void item is MAC VLAN RAW or END*/
+               item = next_no_void_pattern(pattern, item);
+               if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+                       item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+                       item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+                       item->type != RTE_FLOW_ITEM_TYPE_END){
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
        }
 
        /**
@@ -2633,92 +2651,91 @@ ixgbe_parse_fdir_filter_tunnel(const struct 
rte_flow_attr *attr,
         * others should be masked.
         */
 
-       if (!item->mask) {
-               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_ITEM,
-                       item, "Not supported by fdir filter");
-               return -rte_errno;
-       }
-       /*Not supported last point for range*/
-       if (item->last) {
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       item, "Not supported last point for range");
-               return -rte_errno;
-       }
-       rule->b_mask = TRUE;
-       eth_mask = item->mask;
-
-       /* Ether type should be masked. */
-       if (eth_mask->type) {
-               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_ITEM,
-                       item, "Not supported by fdir filter");
-               return -rte_errno;
-       }
-
-       /* src MAC address should be masked. */
-       for (j = 0; j < ETHER_ADDR_LEN; j++) {
-               if (eth_mask->src.addr_bytes[j]) {
-                       memset(rule, 0,
-                              sizeof(struct ixgbe_fdir_rule));
+       if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+               if (!item->mask) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                        rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                item, "Not supported by fdir filter");
                        return -rte_errno;
                }
-       }
-       rule->mask.mac_addr_byte_mask = 0;
-       for (j = 0; j < ETHER_ADDR_LEN; j++) {
-               /* It's a per byte mask. */
-               if (eth_mask->dst.addr_bytes[j] == 0xFF) {
-                       rule->mask.mac_addr_byte_mask |= 0x1 << j;
-               } else if (eth_mask->dst.addr_bytes[j]) {
-                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+               /*Not supported last point for range*/
+               if (item->last) {
                        rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               rule->b_mask = TRUE;
+               eth_mask = item->mask;
+
+               /* Ether type should be masked. */
+               if (eth_mask->type) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
                                RTE_FLOW_ERROR_TYPE_ITEM,
                                item, "Not supported by fdir filter");
                        return -rte_errno;
                }
-       }
 
-       /* When no vlan, considered as full mask. */
-       rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
-
-       if (item->spec) {
-               rule->b_spec = TRUE;
-               eth_spec = item->spec;
-
-               /* Get the dst MAC. */
+               /* src MAC address should be masked. */
                for (j = 0; j < ETHER_ADDR_LEN; j++) {
-                       rule->ixgbe_fdir.formatted.inner_mac[j] =
-                               eth_spec->dst.addr_bytes[j];
+                       if (eth_mask->src.addr_bytes[j]) {
+                               memset(rule, 0,
+                              sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                               return -rte_errno;
+                       }
+               }
+               for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                       /* It's a per byte mask. */
+                       if (eth_mask->dst.addr_bytes[j] == 0xFF) {
+                               rule->mask.mac_addr_byte_mask |= 0x1 << j;
+                       } else if (eth_mask->dst.addr_bytes[j]) {
+                               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                               rte_flow_error_set(error, EINVAL,
+                                       RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by fdir filter");
+                               return -rte_errno;
+                       }
                }
-       }
 
-       /**
-        * Check if the next not void item is vlan or ipv4.
-        * IPv6 is not supported.
-        */
-       item = next_no_void_pattern(pattern, item);
-       if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
-               (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
-               memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_ITEM,
-                       item, "Not supported by fdir filter");
-               return -rte_errno;
-       }
-       /*Not supported last point for range*/
-       if (item->last) {
-               rte_flow_error_set(error, EINVAL,
+               if (item->spec) {
+                       rule->b_spec = TRUE;
+                       eth_spec = item->spec;
+
+                       /* Get the dst MAC. */
+                       for (j = 0; j < ETHER_ADDR_LEN; j++) {
+                               rule->ixgbe_fdir.formatted.inner_mac[j] =
+                                       eth_spec->dst.addr_bytes[j];
+                       }
+               }
+               /**
+                * Check if the next not void item is vlan or ipv4.
+                * IPv6 is not supported.
+                */
+               item = next_no_void_pattern(pattern, item);
+               if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
+                       item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+                       item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+                       /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                        item, "Not supported last point for range");
-               return -rte_errno;
+                       return -rte_errno;
+               }
        }
 
+
        if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
                if (!(item->spec && item->mask)) {
                        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
@@ -2736,10 +2753,90 @@ ixgbe_parse_fdir_filter_tunnel(const struct 
rte_flow_attr *attr,
                rule->mask.vlan_tci_mask = vlan_mask->tci;
                rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
                /* More than one tags are not supported. */
+               item = next_no_void_pattern(pattern, item);
+               if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
+                       item->type != RTE_FLOW_ITEM_TYPE_END) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+       }
+
+       /* Get the flex byte info */
+       if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
+               /* Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
+               /* mask should not be null */
+               if (!item->mask || !item->spec) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               raw_mask = item->mask;
+               rule->b_mask = TRUE;
 
+               /* check mask */
+               if (raw_mask->relative != 0x1 ||
+                   raw_mask->search != 0x1 ||
+                   raw_mask->reserved != 0x0 ||
+                   (uint32_t)raw_mask->offset != 0xffffffff ||
+                   raw_mask->limit != 0xffff ||
+                   raw_mask->length != 0xffff) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               raw_spec = item->spec;
+               rule->b_spec = TRUE;
+
+               /* check spec */
+               if (raw_spec->relative != 0 ||
+                   raw_spec->search != 0 ||
+                   raw_spec->reserved != 0 ||
+                   raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
+                   raw_spec->offset % 2 ||
+                   raw_spec->limit != 0 ||
+                   raw_spec->length != 2 ||
+                   /* pattern can't be 0xffff */
+                   (raw_spec->pattern[0] == 0xff &&
+                    raw_spec->pattern[1] == 0xff)) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               /* check pattern mask */
+               if (raw_mask->pattern[0] != 0xff ||
+                   raw_mask->pattern[1] != 0xff) {
+                       memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Not supported by fdir filter");
+                       return -rte_errno;
+               }
+
+               rule->mask.flex_bytes_mask = 0xffff;
+               rule->ixgbe_fdir.formatted.flex_bytes =
+                       (((uint16_t)raw_spec->pattern[1]) << 8) |
+                       raw_spec->pattern[0];
+               rule->flex_bytes_offset = raw_spec->offset;
                /* check if the next not void item is END */
                item = next_no_void_pattern(pattern, item);
-
                if (item->type != RTE_FLOW_ITEM_TYPE_END) {
                        memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
                        rte_flow_error_set(error, EINVAL,
@@ -2776,12 +2873,17 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
                hw->mac.type != ixgbe_mac_X550EM_a)
                return -ENOTSUP;
 
+       if (fdir_mode == RTE_FDIR_MODE_PERFECT_TUNNEL)
+               goto tunnel_filter;
+
        ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
                                        actions, rule, error);
 
        if (!ret)
                goto step_next;
 
+tunnel_filter:
+
        ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
                                        actions, rule, error);
 
-- 
2.7.5

Reply via email to