Ixgbe ntuple filter in rte_flow need to support diversion data
with less than 5 tuple parameters.So add this new support in parser
code.

Signed-off-by: Wei Zhao <wei.zh...@intel.com>
---
 drivers/net/ixgbe/ixgbe_flow.c | 86 ++++++++++++++++++++++--------------------
 1 file changed, 46 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 8f964cf..2ac58cf 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -310,48 +310,49 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                }
        }
 
-       /* get the IPv4 info */
-       if (!item->spec || !item->mask) {
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_ITEM,
-                       item, "Invalid ntuple mask");
-               return -rte_errno;
-       }
-       /*Not supported last point for range*/
-       if (item->last) {
-               rte_flow_error_set(error, EINVAL,
-                       RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
-                       item, "Not supported last point for range");
-               return -rte_errno;
-
-       }
+       if (item->mask) {
+               /* get the IPv4 info */
+               if (!item->spec || !item->mask) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_ITEM,
+                               item, "Invalid ntuple mask");
+                       return -rte_errno;
+               }
+               /*Not supported last point for range*/
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                               RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                               item, "Not supported last point for range");
+                       return -rte_errno;
+               }
 
-       ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
-       /**
-        * Only support src & dst addresses, protocol,
-        * others should be masked.
-        */
-       if (ipv4_mask->hdr.version_ihl ||
-           ipv4_mask->hdr.type_of_service ||
-           ipv4_mask->hdr.total_length ||
-           ipv4_mask->hdr.packet_id ||
-           ipv4_mask->hdr.fragment_offset ||
-           ipv4_mask->hdr.time_to_live ||
-           ipv4_mask->hdr.hdr_checksum) {
-                       rte_flow_error_set(error,
-                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
-                       item, "Not supported by ntuple filter");
-               return -rte_errno;
-       }
+               ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+               /**
+                * Only support src & dst addresses, protocol,
+                * others should be masked.
+                */
+               if (ipv4_mask->hdr.version_ihl ||
+                       ipv4_mask->hdr.type_of_service ||
+                       ipv4_mask->hdr.total_length ||
+                       ipv4_mask->hdr.packet_id ||
+                       ipv4_mask->hdr.fragment_offset ||
+                       ipv4_mask->hdr.time_to_live ||
+                       ipv4_mask->hdr.hdr_checksum) {
+                               rte_flow_error_set(error,
+                                       EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+                                       item, "Not supported by ntuple filter");
+                               return -rte_errno;
+               }
 
-       filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
-       filter->src_ip_mask = ipv4_mask->hdr.src_addr;
-       filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
+               filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
+               filter->src_ip_mask = ipv4_mask->hdr.src_addr;
+               filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
 
-       ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
-       filter->dst_ip = ipv4_spec->hdr.dst_addr;
-       filter->src_ip = ipv4_spec->hdr.src_addr;
-       filter->proto  = ipv4_spec->hdr.next_proto_id;
+               ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+               filter->dst_ip = ipv4_spec->hdr.dst_addr;
+               filter->src_ip = ipv4_spec->hdr.src_addr;
+               filter->proto  = ipv4_spec->hdr.next_proto_id;
+       }
 
        /* check if the next not void item is TCP or UDP */
        item = next_no_void_pattern(pattern, item);
@@ -366,7 +367,12 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
                return -rte_errno;
        }
 
-       /* get the TCP/UDP info */
+       if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
+               (!item->spec && !item->mask)) {
+               goto action;
+       }
+
+       /* get the TCP/UDP/SCTP info */
        if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
                (!item->spec || !item->mask)) {
                memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
-- 
2.7.5

Reply via email to