Hi, 

> -----Original Message-----
> From: Zhang, Qi Z <qi.z.zh...@intel.com>
> Sent: Monday, June 29, 2020 9:56 AM
> To: Zhao1, Wei <wei.zh...@intel.com>; dev@dpdk.org
> Cc: sta...@dpdk.org; Lu, Nannan <nannan...@intel.com>
> Subject: RE: [PATCH v3 3/4] net/ice: support switch flow for specific L4 type
> 
> 
> 
> > -----Original Message-----
> > From: Zhao1, Wei <wei.zh...@intel.com>
> > Sent: Sunday, June 28, 2020 1:02 PM
> > To: dev@dpdk.org
> > Cc: sta...@dpdk.org; Zhang, Qi Z <qi.z.zh...@intel.com>; Lu, Nannan
> > <nannan...@intel.com>; Zhao1, Wei <wei.zh...@intel.com>
> > Subject: [PATCH v3 3/4] net/ice: support switch flow for specific L4
> > type
> >
> > This patch add more specific tunnel type for ipv4/ipv6 packet, it
> > enable tcp/udp layer of ipv4/ipv6 as L4 payload but without
> > L4 dst/src port number as input set for the switch filter rule.
> >
> > Fixes: 47d460d63233 ("net/ice: rework switch filter")
> > Cc: sta...@dpdk.org
> >
> > Signed-off-by: Wei Zhao <wei.zh...@intel.com>
> > ---
> >  drivers/net/ice/ice_switch_filter.c | 27 ++++++++++++++++++++-------
> >  1 file changed, 20 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/net/ice/ice_switch_filter.c
> > b/drivers/net/ice/ice_switch_filter.c
> > index c607e8d17..c1ea74c73 100644
> > --- a/drivers/net/ice/ice_switch_filter.c
> > +++ b/drivers/net/ice/ice_switch_filter.c
> > @@ -29,6 +29,8 @@
> >  #define ICE_PPP_IPV4_PROTO 0x0021
> >  #define ICE_PPP_IPV6_PROTO 0x0057
> >  #define ICE_IPV4_PROTO_NVGRE       0x002F
> > +#define ICE_TUN_VXLAN_VALID        0x0001
> > +#define ICE_TUN_NVGRE_VALID        0x0002
> 
> Why not apply the same pattern with other valid flag?
> I mean use vxlan_valid and nvgre_valid.
> Could be tunnel_valid = vxlan_valid | nvgre_valid.
Because we will extend to gtp-u or other kinds of packet, there will be more 
and more xxx_valid variable.
I think we can follow rte layer to use bit define kinds of tunnel packet.
It is too complex to define too many valid flag.

> 
> >
> >  #define ICE_SW_INSET_ETHER ( \
> >     ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) @@
> > -471,11 +473,11 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> >     const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask;
> >     const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask;
> >     uint64_t input_set = ICE_INSET_NONE;
> > +   uint16_t tunnel_valid = 0;
> >     bool pppoe_elem_valid = 0;
> >     bool pppoe_patt_valid = 0;
> >     bool pppoe_prot_valid = 0;
> >     bool profile_rule = 0;
> > -   bool tunnel_valid = 0;
> >     bool ipv6_valiad = 0;
> >     bool ipv4_valiad = 0;
> >     bool udp_valiad = 0;
> > @@ -924,7 +926,7 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> >                             return 0;
> >                     }
> >
> > -                   tunnel_valid = 1;
> > +                   tunnel_valid = ICE_TUN_VXLAN_VALID;
> >                     if (vxlan_spec && vxlan_mask) {
> >                             list[t].type = ICE_VXLAN;
> >                             if (vxlan_mask->vni[0] ||
> > @@ -960,7 +962,7 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> >                                        "Invalid NVGRE item");
> >                             return 0;
> >                     }
> > -                   tunnel_valid = 1;
> > +                   tunnel_valid = ICE_TUN_NVGRE_VALID;
> >                     if (nvgre_spec && nvgre_mask) {
> >                             list[t].type = ICE_NVGRE;
> >                             if (nvgre_mask->tni[0] ||
> > @@ -1325,6 +1327,21 @@ ice_switch_inset_get(const struct rte_flow_item
> > pattern[],
> >                     *tun_type = ICE_SW_TUN_PPPOE;
> >     }
> >
> > +   if (*tun_type == ICE_NON_TUN) {
> > +           if (tunnel_valid == ICE_TUN_VXLAN_VALID)
> > +                   *tun_type = ICE_SW_TUN_VXLAN;
> > +           else if (tunnel_valid == ICE_TUN_NVGRE_VALID)
> > +                   *tun_type = ICE_SW_TUN_NVGRE;
> > +           else if (ipv4_valiad && tcp_valiad)
> > +                   *tun_type = ICE_SW_IPV4_TCP;
> > +           else if (ipv4_valiad && udp_valiad)
> > +                   *tun_type = ICE_SW_IPV4_UDP;
> > +           else if (ipv6_valiad && tcp_valiad)
> > +                   *tun_type = ICE_SW_IPV6_TCP;
> > +           else if (ipv6_valiad && udp_valiad)
> > +                   *tun_type = ICE_SW_IPV6_UDP;
> > +   }
> > +
> >     *lkups_num = t;
> >
> >     return input_set;
> > @@ -1536,10 +1553,6 @@ ice_switch_parse_pattern_action(struct
> > ice_adapter *ad,
> >
> >     for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
> >             item_num++;
> > -           if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
> > -                   tun_type = ICE_SW_TUN_VXLAN;
> > -           if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE)
> > -                   tun_type = ICE_SW_TUN_NVGRE;
> >             if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
> >                     const struct rte_flow_item_eth *eth_mask;
> >                     if (item->mask)
> > --
> > 2.19.1
> 

Reply via email to