> -----Original Message----- > From: Lu, Wenzhuo > Sent: Wednesday, June 7, 2017 1:46 PM > To: Xing, Beilei <beilei.x...@intel.com>; Wu, Jingjing <jingjing...@intel.com> > Cc: dev@dpdk.org > Subject: RE: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing > function > > Hi Beilei, > > > -----Original Message----- > > From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Beilei Xing > > Sent: Thursday, June 1, 2017 2:57 PM > > To: Wu, Jingjing > > Cc: dev@dpdk.org > > Subject: [dpdk-dev] [PATCH v2 2/2] net/i40e: add NVGRE parsing function > > > > This patch adds NVGRE parsing function to support NVGRE classification. > > > > Signed-off-by: Beilei Xing <beilei.x...@intel.com> > > --- > > drivers/net/i40e/i40e_flow.c | 271 > > ++++++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 269 insertions(+), 2 deletions(-) > > > > > /* 1. Last in item should be NULL as range is not supported. > > + * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN, > > + * IMAC_TENID, OMAC_TENID_IMAC and IMAC. > > + * 3. Mask of fields which need to be matched should be > > + * filled with 1. > > + * 4. Mask of fields which needn't to be matched should be > > + * filled with 0. > > + */ > > +static int > > +i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev, > > + const struct rte_flow_item *pattern, > > + struct rte_flow_error *error, > > + struct i40e_tunnel_filter_conf *filter) { > > + const struct rte_flow_item *item = pattern; > > + const struct rte_flow_item_eth *eth_spec; > > + const struct rte_flow_item_eth *eth_mask; > > + const struct rte_flow_item_nvgre *nvgre_spec; > > + const struct rte_flow_item_nvgre *nvgre_mask; > > + const struct rte_flow_item_vlan *vlan_spec; > > + const struct rte_flow_item_vlan *vlan_mask; > > + enum rte_flow_item_type item_type; > > + uint8_t filter_type = 0; > > + bool is_tni_masked = 0; > > + uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF}; > > + bool nvgre_flag = 0; > > + uint32_t tenant_id_be = 0; > > + int ret; > > + > > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { > > + if (item->last) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Not support range"); > > + return -rte_errno; > > + } > > + item_type = item->type; > > + switch (item_type) { > > + case RTE_FLOW_ITEM_TYPE_ETH: > > + eth_spec = (const struct rte_flow_item_eth *)item- > > >spec; > > + eth_mask = (const struct rte_flow_item_eth *)item- > > >mask; > > + if ((!eth_spec && eth_mask) || > > + (eth_spec && !eth_mask)) { > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid ether spec/mask"); > > + return -rte_errno; > > + } > > + > > + if (eth_spec && eth_mask) { > > + /* DST address of inner MAC shouldn't be > > masked. > > + * SRC address of Inner MAC should be > > masked. > > + */ > > + if (!is_broadcast_ether_addr(ð_mask- > >dst) > > || > > + !is_zero_ether_addr(ð_mask->src) || > > + eth_mask->type) { > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid ether spec/mask"); > > + return -rte_errno; > > + } > > + > > + if (!nvgre_flag) { > > + rte_memcpy(&filter->outer_mac, > > + ð_spec->dst, > > + ETHER_ADDR_LEN); > > + filter_type |= > > ETH_TUNNEL_FILTER_OMAC; > > + } else { > > + rte_memcpy(&filter->inner_mac, > > + ð_spec->dst, > > + ETHER_ADDR_LEN); > > + filter_type |= > > ETH_TUNNEL_FILTER_IMAC; > > + } > > + } > Nothing to do if both spec and mask are NULL, right? If so, would you like to > add comments here?
OK. Will update in v2. > > > + > > + break; > > + case RTE_FLOW_ITEM_TYPE_VLAN: > > + vlan_spec = > > + (const struct rte_flow_item_vlan *)item- > > >spec; > > + vlan_mask = > > + (const struct rte_flow_item_vlan *)item- > > >mask; > > + if (nvgre_flag) { > Why need to check nvgre_flag? Seems VLAN must be after NVGRE, so this > flag is always 1. It's used to distinguish outer mac or inner mac. > > > + if (!(vlan_spec && vlan_mask)) { > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid vlan item"); > > + return -rte_errno; > > + } > > + } else { > > + if (vlan_spec || vlan_mask) > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid vlan item"); > > + return -rte_errno; > > + } > > + > > + if (vlan_spec && vlan_mask) { > > + if (vlan_mask->tci == > > + rte_cpu_to_be_16(I40E_TCI_MASK)) > > + filter->inner_vlan = > > + rte_be_to_cpu_16(vlan_spec->tci) > > & > > + I40E_TCI_MASK; > > + filter_type |= ETH_TUNNEL_FILTER_IVLAN; > > + } > > + break; > > + case RTE_FLOW_ITEM_TYPE_IPV4: > > + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4; > > + /* IPv4 is used to describe protocol, > > + * spec and mask should be NULL. > > + */ > > + if (item->spec || item->mask) { > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid IPv4 item"); > > + return -rte_errno; > > + } > > + break; > > + case RTE_FLOW_ITEM_TYPE_IPV6: > > + filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6; > > + /* IPv6 is used to describe protocol, > > + * spec and mask should be NULL. > > + */ > > + if (item->spec || item->mask) { > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid IPv6 item"); > > + return -rte_errno; > > + } > > + break; > > + case RTE_FLOW_ITEM_TYPE_NVGRE: > > + nvgre_spec = > > + (const struct rte_flow_item_nvgre *)item- > > >spec; > > + nvgre_mask = > > + (const struct rte_flow_item_nvgre *)item- > > >mask; > > + /* Check if NVGRE item is used to describe protocol. > > + * If yes, both spec and mask should be NULL. > > + * If no, either spec or mask shouldn't be NULL. > > + */ > > + if ((!nvgre_spec && nvgre_mask) || > > + (nvgre_spec && !nvgre_mask)) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid NVGRE item"); > > + return -rte_errno; > > + } > > + > > + if (nvgre_spec && nvgre_mask) { > > + is_tni_masked = > > + !!memcmp(nvgre_mask->tni, > > tni_mask, > > + RTE_DIM(tni_mask)); > > + if (is_tni_masked) { > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid TNI mask"); > > + return -rte_errno; > > + } > > + rte_memcpy(((uint8_t *)&tenant_id_be + 1), > > + nvgre_spec->tni, 3); > > + filter->tenant_id = > > + rte_be_to_cpu_32(tenant_id_be); > > + filter_type |= ETH_TUNNEL_FILTER_TENID; > > + } > A similar concern. Is here a comments for NULL spec and mask better? OK. > > > + > > + nvgre_flag = 1; > > + break; > > + default: > > + break; > > + } > > + } > > + > > + ret = i40e_check_tunnel_filter_type(filter_type); > > + if (ret < 0) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + NULL, > > + "Invalid filter type"); > > + return -rte_errno; > > + } > > + filter->filter_type = filter_type; > > + > > + filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE; > > + > > + return 0; > > +} > > + > > +static int > > +i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev, > > + const struct rte_flow_attr *attr, > > + const struct rte_flow_item pattern[], > > + const struct rte_flow_action actions[], > > + struct rte_flow_error *error, > > + union i40e_filter_t *filter) > > +{ > > + struct i40e_tunnel_filter_conf *tunnel_filter = > > + &filter->consistent_tunnel_filter; > > + int ret; > > + > > + ret = i40e_flow_parse_nvgre_pattern(dev, pattern, > > + error, tunnel_filter); > > + if (ret) > > + return ret; > > + > > + ret = i40e_flow_parse_tunnel_action(dev, actions, error, > > tunnel_filter); > > + if (ret) > > + return ret; > > + > > + ret = i40e_flow_parse_attr(attr, error); > > + if (ret) > > + return ret; > > + > > + cons_filter_type = RTE_ETH_FILTER_TUNNEL; > > + > > + return ret; > > +} > > + > > +/* 1. Last in item should be NULL as range is not supported. > > * 2. Supported filter types: MPLS label. > > * 3. Mask of fields which need to be matched should be > > * filled with 1. > > -- > > 2.5.5