Hi, yigit > -----Original Message----- > From: Yigit, Ferruh > Sent: Saturday, January 7, 2017 12:56 AM > To: Zhao1, Wei <wei.zh...@intel.com>; dev@dpdk.org > Cc: Lu, Wenzhuo <wenzhuo...@intel.com> > Subject: Re: [dpdk-dev] [PATCH v2 11/18] net/ixgbe: parse n-tuple filter > > On 12/30/2016 7:53 AM, Wei Zhao wrote: > > Add rule validate function and check if the rule is a n-tuple rule, > > and get the n-tuple info. > > > > Signed-off-by: Wei Zhao <wei.zh...@intel.com> > > Signed-off-by: Wenzhuo Lu <wenzhuo...@intel.com> > > > > --- > > > > v2:add new error set function > > --- > > drivers/net/ixgbe/ixgbe_ethdev.c | 414 > > ++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 409 insertions(+), 5 deletions(-) > > > > diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c > > b/drivers/net/ixgbe/ixgbe_ethdev.c > > index 0de1318..198cc4b 100644 > > --- a/drivers/net/ixgbe/ixgbe_ethdev.c > > +++ b/drivers/net/ixgbe/ixgbe_ethdev.c > > @@ -388,6 +388,24 @@ static int ixgbe_dev_udp_tunnel_port_del(struct > rte_eth_dev *dev, > > struct rte_eth_udp_tunnel > *udp_tunnel); > > <...> > > > + > > +/** > > + * Parse the rule to see if it is a n-tuple rule. > > + * And get the n-tuple filter info BTW. > > + */ > > It would be nice to comment here valid/expected pattern values > (spec/mask/last). Otherwise it is hard to decode from code also it is good to > document intention, so makes easy if there is any defect. >
I will do as your suggestion in v3. > Also valid actions. > > > +static int > > +cons_parse_ntuple_filter(const struct rte_flow_attr *attr, > > + const struct rte_flow_item pattern[], > > + const struct rte_flow_action actions[], > > + struct rte_eth_ntuple_filter *filter, > > + struct rte_flow_error *error) > > +{ > > + const struct rte_flow_item *item; > > + const struct rte_flow_action *act; > > + const struct rte_flow_item_ipv4 *ipv4_spec; > > + const struct rte_flow_item_ipv4 *ipv4_mask; > > + const struct rte_flow_item_tcp *tcp_spec; > > + const struct rte_flow_item_tcp *tcp_mask; > > + const struct rte_flow_item_udp *udp_spec; > > + const struct rte_flow_item_udp *udp_mask; > > + uint32_t index; > > + > > + if (!pattern) { > > + rte_flow_error_set(error, EINVAL, > RTE_FLOW_ERROR_TYPE_ITEM_NUM, > > + NULL, "NULL pattern."); > > + return -rte_errno; > > + } > > + > > + /* parse pattern */ > > + index = 0; > > + > > + /* the first not void item can be MAC or IPv4 */ > > + NEXT_ITEM_OF_PATTERN(item, pattern, index); > > + > > + if (item->type != RTE_FLOW_ITEM_TYPE_ETH && > > + item->type != RTE_FLOW_ITEM_TYPE_IPV4) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + /* Skip Ethernet */ > > + if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { > > + /*Not supported last point for range*/ > > + if (item->last) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, > > + item, "Not supported last point for range"); > > + return -rte_errno; > > + > > + } > > + /* if the first item is MAC, the content should be NULL */ > > + if (item->spec || item->mask) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + /* check if the next not void item is IPv4 */ > > + index++; > > + NEXT_ITEM_OF_PATTERN(item, pattern, index); > > + if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) { > > + rte_flow_error_set(error, > > + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > Wrong indentation. I will do as your suggestion in v3. > > > + return -rte_errno; > > + } > > + } > > + > > + /* get the IPv4 info */ > > + if (!item->spec || !item->mask) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Invalid ntuple mask"); > > + return -rte_errno; > > + } > > + /*Not supported last point for range*/ > > + if (item->last) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, > > + item, "Not supported last point for range"); > > + return -rte_errno; > > + > > + } > > + > > + ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask; > > + /** > > + * Only support src & dst addresses, protocol, > > + * others should be masked. > > + */ > > + if (ipv4_mask->hdr.version_ihl || > > + ipv4_mask->hdr.type_of_service || > > + ipv4_mask->hdr.total_length || > > + ipv4_mask->hdr.packet_id || > > + ipv4_mask->hdr.fragment_offset || > > + ipv4_mask->hdr.time_to_live || > > + ipv4_mask->hdr.hdr_checksum) { > > + rte_flow_error_set(error, > > + EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + > > + filter->dst_ip_mask = ipv4_mask->hdr.dst_addr; > > + filter->src_ip_mask = ipv4_mask->hdr.src_addr; > > + filter->proto_mask = ipv4_mask->hdr.next_proto_id; > > + > > + ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec; > > + filter->dst_ip = ipv4_spec->hdr.dst_addr; > > + filter->src_ip = ipv4_spec->hdr.src_addr; > > + filter->proto = ipv4_spec->hdr.next_proto_id; > > + > > + /* check if the next not void item is TCP or UDP */ > > + index++; > > + NEXT_ITEM_OF_PATTERN(item, pattern, index); > > + if (item->type != RTE_FLOW_ITEM_TYPE_TCP && > > + item->type != RTE_FLOW_ITEM_TYPE_UDP) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > Sometimes meset filter before return from error, sometimes not. Is memset > required at all? Not all necessary, at the beginning ,filter is not config any value, so it do not need to memset . > > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + > > + /* get the TCP/UDP info */ > > + if (!item->spec || !item->mask) { > > For example there is no memset here for filter .. > > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Invalid ntuple mask"); > > + return -rte_errno; > > + } > > + > > + /*Not supported last point for range*/ > > + if (item->last) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, > > + item, "Not supported last point for range"); > > + return -rte_errno; > > + > > + } > > + > > + if (item->type == RTE_FLOW_ITEM_TYPE_TCP) { > > + tcp_mask = (const struct rte_flow_item_tcp *)item->mask; > > + > > + /** > > + * Only support src & dst ports, tcp flags, > > + * others should be masked. > > + */ > > + if (tcp_mask->hdr.sent_seq || > > + tcp_mask->hdr.recv_ack || > > + tcp_mask->hdr.data_off || > > + tcp_mask->hdr.rx_win || > > + tcp_mask->hdr.cksum || > > + tcp_mask->hdr.tcp_urp) { > > + memset(filter, 0, sizeof(struct > rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + > > + filter->dst_port_mask = tcp_mask->hdr.dst_port; > > + filter->src_port_mask = tcp_mask->hdr.src_port; > > + if (tcp_mask->hdr.tcp_flags == 0xFF) { > > + filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG; > > + } else if (!tcp_mask->hdr.tcp_flags) { > > + filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG; > > + } else { > > + memset(filter, 0, sizeof(struct > rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + > > + tcp_spec = (const struct rte_flow_item_tcp *)item->spec; > > + filter->dst_port = tcp_spec->hdr.dst_port; > > + filter->src_port = tcp_spec->hdr.src_port; > > + filter->tcp_flags = tcp_spec->hdr.tcp_flags; > > + } else { > > + udp_mask = (const struct rte_flow_item_udp *)item->mask; > > + > > + /** > > + * Only support src & dst ports, > > + * others should be masked. > > + */ > > + if (udp_mask->hdr.dgram_len || > > + udp_mask->hdr.dgram_cksum) { > > + memset(filter, 0, sizeof(struct > rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + > > + filter->dst_port_mask = udp_mask->hdr.dst_port; > > + filter->src_port_mask = udp_mask->hdr.src_port; > > + > > + udp_spec = (const struct rte_flow_item_udp *)item->spec; > > + filter->dst_port = udp_spec->hdr.dst_port; > > + filter->src_port = udp_spec->hdr.src_port; > > + } > > + > > + /* check if the next not void item is END */ > > + index++; > > + NEXT_ITEM_OF_PATTERN(item, pattern, index); > > + if (item->type != RTE_FLOW_ITEM_TYPE_END) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, "Not supported by ntuple filter"); > > + return -rte_errno; > > + } > > + > > + /* parse action */ > > + index = 0; > > + > > + if (!actions) { > > Although there is no harm, I would do input check at the beginning of the > function, to not do extra work if we hit this case. I will do as your suggestion in v3. > > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ACTION_NUM, > > + NULL, "NULL action."); > > + return -rte_errno; > > + } > > + > > + /** > > + * n-tuple only supports forwarding, > > + * check if the first not void action is QUEUE. > > + */ > > + NEXT_ITEM_OF_ACTION(act, actions, index); > > + if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ACTION, > > + item, "Not supported action."); > > + return -rte_errno; > > + } > > + filter->queue = > > + ((const struct rte_flow_action_queue *)act->conf)->index; > > + > > + /* check if the next not void item is END */ > > + index++; > > + NEXT_ITEM_OF_ACTION(act, actions, index); > > + if (act->type != RTE_FLOW_ACTION_TYPE_END) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ACTION, > > + act, "Not supported action."); > > + return -rte_errno; > > + } > > + > > + /* parse attr */ > > + /* must be input direction */ > > May be good idea to check if attr is NULL. I will do as your suggestion in v3. Add it at the beginning of function. > > > + if (!attr->ingress) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, > > + attr, "Only support ingress."); > > + return -rte_errno; > > + } > > + > > + /* not supported */ > > + if (attr->egress) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, > > + attr, "Not support egress."); > > + return -rte_errno; > > + } > > + > > + if (attr->priority > 0xFFFF) { > > + memset(filter, 0, sizeof(struct rte_eth_ntuple_filter)); > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, > > + attr, "Error priority."); > > + return -rte_errno; > > + } > > + filter->priority = (uint16_t)attr->priority; > > Should check attr->group? Do we support groups? No , we do not. > > > + > > + return 0; > > +} > > + > > <...>