From: Junyu Jiang <junyux.ji...@intel.com>

This patch adds VXLAN-GPE flow parsing function to support
VXLAN-GPE classification.

Signed-off-by: Junyu Jiang <junyux.ji...@intel.com>
---
 drivers/net/i40e/i40e_ethdev.c |  14 +-
 drivers/net/i40e/i40e_ethdev.h |   5 +
 drivers/net/i40e/i40e_flow.c   | 375 +++++++++++++++++++++++++++++++++
 3 files changed, 392 insertions(+), 2 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index 4778aaf29..011796ad2 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -7904,10 +7904,17 @@ i40e_tunnel_filter_convert(
        tunnel_filter->input.inner_vlan = cld_filter->element.inner_vlan;
        if ((rte_le_to_cpu_16(cld_filter->element.flags) &
             I40E_AQC_ADD_CLOUD_FLAGS_IPV6) ==
-           I40E_AQC_ADD_CLOUD_FLAGS_IPV6)
+           I40E_AQC_ADD_CLOUD_FLAGS_IPV6) {
+               rte_memcpy(tunnel_filter->input.ip_addr.ipv6_addr,
+                          &cld_filter->element.ipaddr.v6.data,
+                          sizeof(cld_filter->element.ipaddr.v6.data));
                tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6;
-       else
+       } else {
+               rte_memcpy(&tunnel_filter->input.ip_addr.ipv4_addr,
+                          &cld_filter->element.ipaddr.v4.data,
+                          sizeof(cld_filter->element.ipaddr.v4.data));
                tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+       }
        tunnel_filter->input.flags = cld_filter->element.flags;
        tunnel_filter->input.tenant_id = cld_filter->element.tenant_id;
        tunnel_filter->queue = cld_filter->element.queue_number;
@@ -8625,6 +8632,9 @@ i40e_dev_consistent_tunnel_filter_set(struct i40e_pf *pf,
        case I40E_TUNNEL_TYPE_IP_IN_GRE:
                tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP;
                break;
+       case I40E_TUNNEL_TYPE_VXLAN_GPE:
+               tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE;
+               break;
        case I40E_TUNNEL_TYPE_MPLSoUDP:
                if (!pf->mpls_replace_flag) {
                        i40e_replace_mpls_l1_filter(pf);
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index 1466998aa..756f3e6fd 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -862,6 +862,10 @@ struct i40e_tunnel_filter_input {
        uint16_t flags;          /* Filter type flag */
        uint32_t tenant_id;      /* Tenant id to match */
        uint16_t general_fields[32];  /* Big buffer */
+       union {
+               uint32_t ipv4_addr;   /**< IPv4 address in big endian. */
+               uint32_t ipv6_addr[4];/**< IPv6 address in big endian. */
+       } ip_addr;
 };
 
 struct i40e_tunnel_filter {
@@ -890,6 +894,7 @@ enum i40e_tunnel_type {
        I40E_TUNNEL_TYPE_TEREDO,
        I40E_TUNNEL_TYPE_NVGRE,
        I40E_TUNNEL_TYPE_IP_IN_GRE,
+       I40E_TUNNEL_TYPE_VXLAN_GPE,
        I40E_L2_TUNNEL_TYPE_E_TAG,
        I40E_TUNNEL_TYPE_MPLSoUDP,
        I40E_TUNNEL_TYPE_MPLSoGRE,
diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index adc5da1c5..e29ea6da2 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -108,6 +108,12 @@ static int i40e_flow_parse_gtp_filter(struct rte_eth_dev 
*dev,
                                      const struct rte_flow_action actions[],
                                      struct rte_flow_error *error,
                                      union i40e_filter_t *filter);
+static int i40e_flow_parse_vxlan_gpe_filter(struct rte_eth_dev *dev,
+                                       const struct rte_flow_attr *attr,
+                                       const struct rte_flow_item pattern[],
+                                       const struct rte_flow_action actions[],
+                                       struct rte_flow_error *error,
+                                       union i40e_filter_t *filter);
 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
                                      struct i40e_ethertype_filter *filter);
 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
@@ -1672,6 +1678,65 @@ static enum rte_flow_item_type 
pattern_fdir_ipv6_udp_esp[] = {
        RTE_FLOW_ITEM_TYPE_END,
 };
 
+/* Pattern matched VXLAN-GPE tunnel filter */
+static enum rte_flow_item_type pattern_vxlan_gpe_1[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_gpe_2[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_gpe_3[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_VLAN,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_gpe_4[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_VLAN,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_gpe_5[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV4,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
+static enum rte_flow_item_type pattern_vxlan_gpe_6[] = {
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_UDP,
+       RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
+       RTE_FLOW_ITEM_TYPE_ETH,
+       RTE_FLOW_ITEM_TYPE_IPV6,
+       RTE_FLOW_ITEM_TYPE_END,
+};
+
 static struct i40e_valid_pattern i40e_supported_patterns[] = {
        /* Ethertype */
        { pattern_ethertype, i40e_flow_parse_ethertype_filter },
@@ -1866,6 +1931,13 @@ static struct i40e_valid_pattern 
i40e_supported_patterns[] = {
        { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
        { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
        { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
+       /* VXLAN_GPE */
+       { pattern_vxlan_gpe_1, i40e_flow_parse_vxlan_gpe_filter },
+       { pattern_vxlan_gpe_2, i40e_flow_parse_vxlan_gpe_filter },
+       { pattern_vxlan_gpe_3, i40e_flow_parse_vxlan_gpe_filter },
+       { pattern_vxlan_gpe_4, i40e_flow_parse_vxlan_gpe_filter },
+       { pattern_vxlan_gpe_5, i40e_flow_parse_vxlan_gpe_filter },
+       { pattern_vxlan_gpe_6, i40e_flow_parse_vxlan_gpe_filter },
 };
 
 #define NEXT_ITEM_OF_ACTION(act, actions, index)                        \
@@ -3782,6 +3854,8 @@ static uint16_t i40e_supported_tunnel_filter_types[] = {
        ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
        ETH_TUNNEL_FILTER_IMAC,
        ETH_TUNNEL_FILTER_IMAC,
+       ETH_TUNNEL_FILTER_OIP,
+       ETH_TUNNEL_FILTER_IIP,
 };
 
 static int
@@ -4699,6 +4773,298 @@ i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
        return ret;
 }
 
+/* 1. Last in item should be NULL as range is not supported.
+ * 2. Supported filter types: IMAC_IVLAN, IMAC_IVLAN_TENID, IMAC_TENID,
+ *    IMAC, OMAC_TENID_IMAC, OIP and IIP.
+ * 3. Mask of fields which need to be matched should be
+ *    filled with 1.
+ * 4. Mask of fields which needn't to be matched should be
+ *    filled with 0.
+ */
+static int
+i40e_flow_parse_vxlan_gpe_pattern(__rte_unused struct rte_eth_dev *dev,
+                             const struct rte_flow_item *pattern,
+                             struct rte_flow_error *error,
+                             struct i40e_tunnel_filter_conf *filter)
+{
+       const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
+       const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+       const struct rte_flow_item_vxlan_gpe *vxlan_gpe_spec;
+       const struct rte_flow_item_vxlan_gpe *vxlan_gpe_mask;
+       const struct rte_flow_item *item = pattern;
+       const struct rte_flow_item_vlan *vlan_spec;
+       const struct rte_flow_item_vlan *vlan_mask;
+       const struct rte_flow_item_eth *eth_spec;
+       const struct rte_flow_item_eth *eth_mask;
+       uint8_t  ipv6_src_addr_mask[16] = {0x00};
+       uint8_t  ipv6_dst_addr_mask[16] = {
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF,
+                                       0xFF, 0xFF, 0xFF, 0xFF
+       };
+       uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
+       enum rte_flow_item_type item_type;
+       uint32_t tenant_id_be = 0;
+       uint8_t filter_type = 0;
+       bool vxlan_gpe_flag = 0;
+       bool is_vni_masked = 0;
+       int ret;
+
+       for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               if (item->last) {
+                       rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Not support range");
+                       return -rte_errno;
+               }
+               item_type = item->type;
+               switch (item_type) {
+               case RTE_FLOW_ITEM_TYPE_ETH:
+                       eth_spec = item->spec;
+                       eth_mask = item->mask;
+
+                       /* Check if ETH item is used for place holder.
+                        * If yes, both spec and mask should be NULL.
+                        * If no, both spec and mask shouldn't be NULL.
+                        */
+                       if ((!eth_spec && eth_mask) ||
+                           (eth_spec && !eth_mask)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid ether spec/mask");
+                               return -rte_errno;
+                       }
+
+                       if (eth_spec && eth_mask) {
+                               /* DST address of inner MAC shouldn't be masked.
+                                * SRC address of Inner MAC should be masked.
+                                */
+                               if 
(!rte_is_broadcast_ether_addr(&eth_mask->dst) ||
+                                   !rte_is_zero_ether_addr(&eth_mask->src) ||
+                                   eth_mask->type) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid ether spec/mask");
+                                       return -rte_errno;
+                               }
+
+                               if (!vxlan_gpe_flag) {
+                                       rte_memcpy(&filter->outer_mac,
+                                                  &eth_spec->dst,
+                                                  RTE_ETHER_ADDR_LEN);
+                                       filter_type |= ETH_TUNNEL_FILTER_OMAC;
+                               } else {
+                                       rte_memcpy(&filter->inner_mac,
+                                                  &eth_spec->dst,
+                                                  RTE_ETHER_ADDR_LEN);
+                                       filter_type |= ETH_TUNNEL_FILTER_IMAC;
+                               }
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VLAN:
+                       vlan_spec = item->spec;
+                       vlan_mask = item->mask;
+                       if (!(vlan_spec && vlan_mask) ||
+                           vlan_mask->inner_type) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid vlan item");
+                               return -rte_errno;
+                       }
+
+                       if (vlan_spec && vlan_mask) {
+                               if (vlan_mask->tci ==
+                                   rte_cpu_to_be_16(I40E_TCI_MASK))
+                                       filter->inner_vlan =
+                                             rte_be_to_cpu_16(vlan_spec->tci) &
+                                             I40E_TCI_MASK;
+                               filter_type |= ETH_TUNNEL_FILTER_IVLAN;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV4:
+                       filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
+                       ipv4_spec = item->spec;
+                       ipv4_mask = item->mask;
+
+                       if ((!ipv4_spec && ipv4_mask) ||
+                           (ipv4_spec && !ipv4_mask)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid IPv4 spec/mask");
+                               return -rte_errno;
+                       }
+
+                       if (ipv4_spec && ipv4_mask) {
+                               if (ipv4_mask->hdr.version_ihl ||
+                                   ipv4_mask->hdr.total_length ||
+                                   ipv4_mask->hdr.packet_id ||
+                                   ipv4_mask->hdr.fragment_offset ||
+                                   ipv4_mask->hdr.hdr_checksum ||
+                                   ipv4_mask->hdr.type_of_service ||
+                                   ipv4_mask->hdr.time_to_live ||
+                                   ipv4_mask->hdr.next_proto_id ||
+                                   ipv4_mask->hdr.src_addr ||
+                                   ipv4_mask->hdr.dst_addr != UINT32_MAX) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                       
RTE_FLOW_ERROR_TYPE_ITEM,
+                                                       item,
+                                                       "Invalid IPv4 item");
+                                       return -rte_errno;
+                               }
+
+                               filter->ip_addr.ipv4_addr =
+                               ipv4_spec->hdr.dst_addr;
+                               if (!vxlan_gpe_flag)
+                                       filter_type |= ETH_TUNNEL_FILTER_OIP;
+                               else
+                                       filter_type |= ETH_TUNNEL_FILTER_IIP;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV6:
+                       filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
+                       ipv6_spec = item->spec;
+                       ipv6_mask = item->mask;
+
+                       if ((!ipv6_spec && ipv6_mask) ||
+                           (ipv6_spec && !ipv6_mask)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid IPv6 spec/mask");
+                               return -rte_errno;
+                       }
+
+                       if (ipv6_spec && ipv6_mask) {
+                               if (ipv6_mask->hdr.vtc_flow ||
+                                   ipv6_mask->hdr.payload_len ||
+                                   ipv6_mask->hdr.proto ||
+                                   ipv6_mask->hdr.hop_limits ||
+                                       memcmp(ipv6_mask->hdr.src_addr,
+                                              ipv6_src_addr_mask,
+                                              
RTE_DIM(ipv6_mask->hdr.src_addr)) ||
+                                       memcmp(ipv6_mask->hdr.dst_addr,
+                                              ipv6_dst_addr_mask,
+                                              
RTE_DIM(ipv6_mask->hdr.dst_addr))) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                       
RTE_FLOW_ERROR_TYPE_ITEM,
+                                                       item,
+                                                       "Invalid IPv6 item");
+                                       return -rte_errno;
+                               }
+
+                               rte_memcpy(filter->ip_addr.ipv6_addr,
+                                       ipv6_spec->hdr.dst_addr, 16);
+                               if (!vxlan_gpe_flag)
+                                       filter_type |= ETH_TUNNEL_FILTER_OIP;
+                               else
+                                       filter_type |= ETH_TUNNEL_FILTER_IIP;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_UDP:
+                       /* UDP is used to describe protocol,
+                        * spec and mask should be NULL.
+                        */
+                       if (item->spec || item->mask) {
+                               rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Invalid UDP item");
+                               return -rte_errno;
+                       }
+                       break;
+               case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+                       vxlan_gpe_spec = item->spec;
+                       vxlan_gpe_mask = item->mask;
+                       /* Check if VXLAN-GPE item is used to describe protocol.
+                        * If yes, both spec and mask should be NULL.
+                        * If no, both spec and mask shouldn't be NULL.
+                        */
+                       if ((!vxlan_gpe_spec && vxlan_gpe_mask) ||
+                           (vxlan_gpe_spec && !vxlan_gpe_mask)) {
+                               rte_flow_error_set(error, EINVAL,
+                                          RTE_FLOW_ERROR_TYPE_ITEM,
+                                          item,
+                                          "Invalid VXLAN item");
+                               return -rte_errno;
+                       }
+
+                       /* Check if VNI is masked. */
+                       if (vxlan_gpe_spec && vxlan_gpe_mask) {
+                               is_vni_masked =
+                                       !!memcmp(vxlan_gpe_mask->vni, vni_mask,
+                                                RTE_DIM(vni_mask));
+                               if (is_vni_masked) {
+                                       rte_flow_error_set(error, EINVAL,
+                                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                                  item,
+                                                  "Invalid VNI mask");
+                                       return -rte_errno;
+                               }
+
+                               rte_memcpy(((uint8_t *)&tenant_id_be + 1),
+                                          vxlan_gpe_spec->vni, 3);
+                               filter->tenant_id =
+                                       rte_be_to_cpu_32(tenant_id_be);
+                               filter_type |= ETH_TUNNEL_FILTER_TENID;
+                       }
+
+                       vxlan_gpe_flag = 1;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       ret = i40e_check_tunnel_filter_type(filter_type);
+       if (ret < 0) {
+               rte_flow_error_set(error, EINVAL,
+                                  RTE_FLOW_ERROR_TYPE_ITEM,
+                                  NULL,
+                                  "Invalid filter type");
+               return -rte_errno;
+       }
+       filter->filter_type = filter_type;
+
+       filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN_GPE;
+
+       return 0;
+}
+
+static int
+i40e_flow_parse_vxlan_gpe_filter(struct rte_eth_dev *dev,
+                            const struct rte_flow_attr *attr,
+                            const struct rte_flow_item pattern[],
+                            const struct rte_flow_action actions[],
+                            struct rte_flow_error *error,
+                            union i40e_filter_t *filter)
+{
+       struct i40e_tunnel_filter_conf *tunnel_filter =
+               &filter->consistent_tunnel_filter;
+       int ret;
+       ret = i40e_flow_parse_vxlan_gpe_pattern(dev, pattern,
+                                           error, tunnel_filter);
+       if (ret)
+               return ret;
+
+       ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
+       if (ret)
+               return ret;
+
+       ret = i40e_flow_parse_attr(attr, error);
+       if (ret)
+               return ret;
+
+       cons_filter_type = RTE_ETH_FILTER_TUNNEL;
+
+       return ret;
+}
+
 /**
  * This function is used to do configuration i40e existing RSS with rte_flow.
  * It also enable queue region configuration using flow API for i40e.
@@ -5556,6 +5922,15 @@ i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
        cld_filter.element.flags = filter->input.flags;
        cld_filter.element.tenant_id = filter->input.tenant_id;
        cld_filter.element.queue_number = filter->queue;
+       if (filter->input.ip_type == I40E_TUNNEL_IPTYPE_IPV4)
+               rte_memcpy(&cld_filter.element.ipaddr.v4.data,
+                                  &filter->input.ip_addr.ipv4_addr,
+                                  sizeof(cld_filter.element.ipaddr.v4.data));
+       else
+               rte_memcpy(&cld_filter.element.ipaddr.v6.data,
+                              filter->input.ip_addr.ipv6_addr,
+                              sizeof(cld_filter.element.ipaddr.v6.data));
+
        rte_memcpy(cld_filter.general_fields,
                   filter->input.general_fields,
                   sizeof(cld_filter.general_fields));
-- 
2.17.1

Reply via email to