Hi Wenzhuo, > -----Original Message----- > From: Lu, Wenzhuo > Sent: Friday, March 31, 2017 1:59 AM > To: Iremonger, Bernard <bernard.iremon...@intel.com>; dev@dpdk.org; > Xing, Beilei <beilei.x...@intel.com>; Wu, Jingjing <jingjing...@intel.com> > Cc: Zhang, Helin <helin.zh...@intel.com> > Subject: RE: [PATCH v5 3/5] net/i40e: parse QinQ pattern > > Hi Bernard, > > > -----Original Message----- > > From: Iremonger, Bernard > > Sent: Friday, March 31, 2017 12:10 AM > > To: dev@dpdk.org; Xing, Beilei; Wu, Jingjing > > Cc: Zhang, Helin; Lu, Wenzhuo; Iremonger, Bernard > > Subject: [PATCH v5 3/5] net/i40e: parse QinQ pattern > > > > add QinQ pattern. > > add i40e_flow_parse_qinq_pattern function. > > add i40e_flow_parse_qinq_filter function. > > > > Signed-off-by: Bernard Iremonger <bernard.iremon...@intel.com> > > --- > > drivers/net/i40e/i40e_flow.c | 145 > > ++++++++++++++++++++++++++++++++++++++++++- > > 1 file changed, 143 insertions(+), 2 deletions(-) > > > > diff --git a/drivers/net/i40e/i40e_flow.c > > b/drivers/net/i40e/i40e_flow.c index dc456c338..bbec7dc1c 100644 > > --- a/drivers/net/i40e/i40e_flow.c > > +++ b/drivers/net/i40e/i40e_flow.c > > @@ -1,7 +1,7 @@ > > /*- > > * BSD LICENSE > > * > > - * Copyright (c) 2016 Intel Corporation. All rights reserved. > > + * Copyright (c) 2016-2017 Intel Corporation. All rights reserved. > > * > > * Redistribution and use in source and binary forms, with or without > > * modification, are permitted provided that the following conditions > > @@ -127,6 +127,18 @@ static int i40e_flow_destroy_tunnel_filter(struct > > i40e_pf *pf, static int i40e_flow_flush_fdir_filter(struct i40e_pf > > *pf); static int i40e_flow_flush_ethertype_filter(struct i40e_pf > > *pf); static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf); > > +static int > > +i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev, > > + const struct rte_flow_attr *attr, > > + const struct rte_flow_item pattern[], > > + const struct rte_flow_action actions[], > > + struct rte_flow_error *error, > > + union i40e_filter_t *filter); static int > > +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, > > + const struct rte_flow_item *pattern, > > + struct rte_flow_error *error, > > + struct i40e_tunnel_filter_conf *filter); > > > > const struct rte_flow_ops i40e_flow_ops = { > > .validate = i40e_flow_validate, > > @@ -317,6 +329,14 @@ static enum rte_flow_item_type pattern_mpls_4[] > = > > { > > RTE_FLOW_ITEM_TYPE_END, > > }; > > > > +/* Pattern matched QINQ */ > > +static enum rte_flow_item_type pattern_qinq_1[] = { > > + RTE_FLOW_ITEM_TYPE_ETH, > > + RTE_FLOW_ITEM_TYPE_VLAN, > > + RTE_FLOW_ITEM_TYPE_VLAN, > > + RTE_FLOW_ITEM_TYPE_END, > > +}; > > + > > static struct i40e_valid_pattern i40e_supported_patterns[] = { > > /* Ethertype */ > > { pattern_ethertype, i40e_flow_parse_ethertype_filter }, @@ -347,6 > > +367,8 @@ static struct i40e_valid_pattern i40e_supported_patterns[] = > > +{ > > { pattern_mpls_2, i40e_flow_parse_mpls_filter }, > > { pattern_mpls_3, i40e_flow_parse_mpls_filter }, > > { pattern_mpls_4, i40e_flow_parse_mpls_filter }, > > + /* QINQ */ > > + { pattern_qinq_1, i40e_flow_parse_qinq_filter }, > > }; > > > > #define NEXT_ITEM_OF_ACTION(act, actions, index) \ > > @@ -1170,7 +1192,7 @@ i40e_flow_parse_fdir_filter(struct rte_eth_dev > > *dev, > > return 0; > > } > > > > -/* Parse to get the action info of a tunnle filter > > +/* Parse to get the action info of a tunnel filter > > * Tunnel action only supports PF, VF and QUEUE. > > */ > > static int > > @@ -1719,6 +1741,125 @@ i40e_flow_parse_mpls_filter(struct > rte_eth_dev > > *dev, } > > > > static int > > +i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev, > > + const struct rte_flow_item *pattern, > > + struct rte_flow_error *error, > > + struct i40e_tunnel_filter_conf *filter) { > > + const struct rte_flow_item *item = pattern; > > + const struct rte_flow_item_eth *eth_spec; > > + const struct rte_flow_item_eth *eth_mask; > > + const struct rte_flow_item_vlan *vlan_spec = NULL; > > + const struct rte_flow_item_vlan *vlan_mask = NULL; > > + const struct rte_flow_item_vlan *i_vlan_spec = NULL; > > + const struct rte_flow_item_vlan *i_vlan_mask = NULL; > > + const struct rte_flow_item_vlan *o_vlan_spec = NULL; > > + const struct rte_flow_item_vlan *o_vlan_mask = NULL; > > + > > + enum rte_flow_item_type item_type; > > + bool vlan_flag = 0; > > + > > + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { > > + if (item->last) { > > + rte_flow_error_set(error, EINVAL, > > + RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Not support range"); > > + return -rte_errno; > > + } > > + item_type = item->type; > > + switch (item_type) { > > + case RTE_FLOW_ITEM_TYPE_ETH: > > + eth_spec = (const struct rte_flow_item_eth *)item- > > >spec; > > + eth_mask = (const struct rte_flow_item_eth *)item- > > >mask; > > + if (eth_spec && eth_mask) { > Should it be (eth_spec || eth_mask)? > All the other is good to me.
No, I believe this is correct ( I tested it and it worked correctly). > > > + rte_flow_error_set(error, EINVAL, > > + > > RTE_FLOW_ERROR_TYPE_ITEM, > > + item, > > + "Invalid ether spec/mask"); > > + return -rte_errno; > > + } > > + break; Regards, Bernard.