Re: [dpdk-dev] mbuf->packet_type

2020-11-03 Thread Juhamatti Kuusisaari
Hello Levend,

The packet type is indeed filled by hw in case there is a support for
it. I think your application may be missing a ptype handling function
which checks the supported ptype handling of the hw and registers a
callback handling in case the hw does not support it.

Please see more details e.g. in l3fwd example application about the
ptype checks and callback registration.

Best regards,
--
 Juhamatti

On Tue, 3 Nov 2020 at 09:33, Levend Sayar  wrote:
>
> Hi all.
>
> Nobody cared about my mail. Am I in the wrong place?
> If so, can you forward me to the right place please?
>
> Best,
> Levend
>
> On Mon, Nov 2, 2020 at 1:45 PM Levend Sayar  wrote:
>
> > Hi all.
> >
> > Recently I encountered a dont-know-why issue about mbuf->packet_type.
> >
> > Two different setups behave differently.
> >
> > One is a bare metal server, one KVM based OpenStack VM (virtio).
> > Both use the uio driver. Namely only hardware is different. Software stack
> > is the same.
> >
> > But this macro does not work on OpenStack platform.
> >
> > *RTE_ETH_IS_IPV4_HDR(mbuf->packet_type))*
> >
> > where
> >
> > *#define  RTE_ETH_IS_IPV4_HDR(ptype) ((ptype) & RTE_PTYPE_L3_IPV4)*
> >
> > I manually coded to check IPv4 like this:
> >
> >
> >
> >
> > *struct rte_ether_hdr* ether_hdr = rte_pktmbuf_mtod(mbuf, struct
> > rte_ether_hdr*);uint16_t ether_type = N2H16(ether_hdr->ether_type);if
> > (likely(ether_type == RTE_ETHER_TYPE_IPV4)) {*
> >
> > Do you have any idea about this?
> > I suppose whoever fills that mbuf data packet_type field is not working in
> > a virtual environment.
> > Which entity fills that fields? Hardware driver, uio, dpdk?
> >
> > Best,
> > Levend
> >


Re: [dpdk-dev] [PATCH v3 07/16] net/mlx5: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Slava Ovsiienko
> -Original Message-
> From: Thomas Monjalon 
> Sent: Tuesday, November 3, 2020 2:14
> To: dev@dpdk.org
> Cc: ferruh.yi...@intel.com; david.march...@redhat.com;
> bruce.richard...@intel.com; olivier.m...@6wind.com;
> andrew.rybche...@oktetlabs.ru; jer...@marvell.com; Slava Ovsiienko
> ; Ruifeng Wang ; David
> Christensen ; Matan Azrad ;
> Shahaf Shuler ; Konstantin Ananyev
> 
> Subject: [PATCH v3 07/16] net/mlx5: switch Rx timestamp to dynamic mbuf
> field
> 
> The mbuf timestamp is moved to a dynamic field in order to allow removal of
> the deprecated static field.
> The related mbuf flag is also replaced.
> 
> The dynamic offset and flag are stored in struct mlx5_rxq_data to favor cache
> locality.
> 
> Signed-off-by: Thomas Monjalon 
> Reviewed-by: Ruifeng Wang 
> Reviewed-by: David Christensen 
Reviewed-by: Viacheslav Ovsiienko 

> ---
>  drivers/net/mlx5/mlx5_rxq.c  |  8 +
>  drivers/net/mlx5/mlx5_rxtx.c |  4 +--
>  drivers/net/mlx5/mlx5_rxtx.h | 19 +++
>  drivers/net/mlx5/mlx5_rxtx_vec_altivec.h | 41 +++---
>  drivers/net/mlx5/mlx5_rxtx_vec_neon.h| 43 
>  drivers/net/mlx5/mlx5_rxtx_vec_sse.h | 35 +--
>  6 files changed, 90 insertions(+), 60 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index
> f1d8373079..52519910ee 100644
> --- a/drivers/net/mlx5/mlx5_rxq.c
> +++ b/drivers/net/mlx5/mlx5_rxq.c
> @@ -1492,7 +1492,15 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t
> idx, uint16_t desc,
>   mlx5_max_lro_msg_size_adjust(dev, idx, max_lro_size);
>   /* Toggle RX checksum offload if hardware supports it. */
>   tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
> + /* Configure Rx timestamp. */
>   tmpl->rxq.hw_timestamp = !!(offloads &
> DEV_RX_OFFLOAD_TIMESTAMP);
> + tmpl->rxq.timestamp_rx_flag = 0;
> + if (tmpl->rxq.hw_timestamp &&
> rte_mbuf_dyn_rx_timestamp_register(
> + &tmpl->rxq.timestamp_offset,
> + &tmpl->rxq.timestamp_rx_flag) != 0) {
> + DRV_LOG(ERR, "Cannot register Rx timestamp field/flag");
> + goto error;
> + }
>   /* Configure VLAN stripping. */
>   tmpl->rxq.vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
>   /* By default, FCS (CRC) is stripped by hardware. */ diff --git
> a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> e86468b67a..b577aab00b 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -1287,8 +1287,8 @@ rxq_cq_to_mbuf(struct mlx5_rxq_data *rxq, struct
> rte_mbuf *pkt,
> 
>   if (rxq->rt_timestamp)
>   ts = mlx5_txpp_convert_rx_ts(rxq->sh, ts);
> - pkt->timestamp = ts;
> - pkt->ol_flags |= PKT_RX_TIMESTAMP;
> + mlx5_timestamp_set(pkt, rxq->timestamp_offset, ts);
> + pkt->ol_flags |= rxq->timestamp_rx_flag;
>   }
>  }
> 
> diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index
> 674296ee98..e9eca36b40 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.h
> +++ b/drivers/net/mlx5/mlx5_rxtx.h
> @@ -151,6 +151,8 @@ struct mlx5_rxq_data {
>   /* CQ (UAR) access lock required for 32bit implementations */  #endif
>   uint32_t tunnel; /* Tunnel information. */
> + int timestamp_offset; /* Dynamic mbuf field for timestamp. */
> + uint64_t timestamp_rx_flag; /* Dynamic mbuf flag for timestamp. */
>   uint64_t flow_meta_mask;
>   int32_t flow_meta_offset;
>  } __rte_cache_aligned;
> @@ -681,4 +683,21 @@ mlx5_txpp_convert_tx_ts(struct
> mlx5_dev_ctx_shared *sh, uint64_t mts)
>   return ci;
>  }
> 
> +/**
> + * Set timestamp in mbuf dynamic field.
> + *
> + * @param mbuf
> + *   Structure to write into.
> + * @param offset
> + *   Dynamic field offset in mbuf structure.
> + * @param timestamp
> + *   Value to write.
> + */
> +static __rte_always_inline void
> +mlx5_timestamp_set(struct rte_mbuf *mbuf, int offset,
> + rte_mbuf_timestamp_t timestamp)
> +{
> + *RTE_MBUF_DYNFIELD(mbuf, offset, rte_mbuf_timestamp_t *) =
> timestamp;
> +}
> +
>  #endif /* RTE_PMD_MLX5_RXTX_H_ */
> diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> index 6bf0c9b540..171d7bb0f8 100644
> --- a/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> +++ b/drivers/net/mlx5/mlx5_rxtx_vec_altivec.h
> @@ -330,13 +330,13 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data
> *rxq,
>   vector unsigned char ol_flags = (vector unsigned char)
>   (vector unsigned int){
>   rxq->rss_hash * PKT_RX_RSS_HASH |
> - rxq->hw_timestamp * PKT_RX_TIMESTAMP,
> + rxq->hw_timestamp * rxq-
> >timestamp_rx_flag,
>   rxq->rss_hash * PKT_RX_RSS_HASH |
> - rxq->hw_timestamp * PKT_RX_TIMESTAMP,
> +

Re: [dpdk-dev] [PATCH v3 06/16] net/mlx5: fix dynamic mbuf offset lookup check

2020-11-03 Thread Slava Ovsiienko
> -Original Message-
> From: Thomas Monjalon 
> Sent: Tuesday, November 3, 2020 2:14
> To: dev@dpdk.org
> Cc: ferruh.yi...@intel.com; david.march...@redhat.com;
> bruce.richard...@intel.com; olivier.m...@6wind.com;
> andrew.rybche...@oktetlabs.ru; jer...@marvell.com; Slava Ovsiienko
> ; sta...@dpdk.org; Matan Azrad
> ; Shahaf Shuler ; Ori Kam
> 
> Subject: [PATCH v3 06/16] net/mlx5: fix dynamic mbuf offset lookup check
> 
> The functions rte_mbuf_dynfield_lookup() and rte_mbuf_dynflag_lookup() can
> return an offset starting with 0 or a negative error code.
> 
> In reality the first offsets are probably reserved forever, but for the sake 
> of
> strict API compliance, the checks which considered 0 as an error are fixed.
> 
> Fixes: efa79e68c8cd ("net/mlx5: support fine grain dynamic flag")
> Fixes: 3172c471b86f ("net/mlx5: prepare Tx queue structures to support
> timestamp")
> Fixes: 0febfcce3693 ("net/mlx5: prepare Tx to support scheduling")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Thomas Monjalon 
Acked-by: Viacheslav Ovsiienko 

> ---
>  drivers/net/mlx5/mlx5_rxtx.c| 4 ++--
>  drivers/net/mlx5/mlx5_trigger.c | 2 +-
>  drivers/net/mlx5/mlx5_txq.c | 2 +-
>  3 files changed, 4 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index
> b530ff421f..e86468b67a 100644
> --- a/drivers/net/mlx5/mlx5_rxtx.c
> +++ b/drivers/net/mlx5/mlx5_rxtx.c
> @@ -5661,9 +5661,9 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
>   }
>   if (tx_offloads & DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP &&
>   rte_mbuf_dynflag_lookup
> - (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL)
> > 0 &&
> + (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL)
> >= 0 &&
>   rte_mbuf_dynfield_lookup
> - (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >
> 0) {
> + (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL) >=
> 0) {
>   /* Offload configured, dynamic entities registered. */
>   olx |= MLX5_TXOFF_CONFIG_TXPP;
>   }
> diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c
> index 7735f022a3..917b433c4a 100644
> --- a/drivers/net/mlx5/mlx5_trigger.c
> +++ b/drivers/net/mlx5/mlx5_trigger.c
> @@ -302,7 +302,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
>   DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
>   fine_inline = rte_mbuf_dynflag_lookup
>   (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
> - if (fine_inline > 0)
> + if (fine_inline >= 0)
>   rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
>   else
>   rte_net_mlx5_dynf_inline_mask = 0;
> diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index
> af84f5f72b..8ed2bcff7b 100644
> --- a/drivers/net/mlx5/mlx5_txq.c
> +++ b/drivers/net/mlx5/mlx5_txq.c
> @@ -1305,7 +1305,7 @@ mlx5_txq_dynf_timestamp_set(struct rte_eth_dev
> *dev)
> 
>   (RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
>   off = rte_mbuf_dynfield_lookup
>   (RTE_MBUF_DYNFIELD_TIMESTAMP_NAME,
> NULL);
> - if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
> + if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
>   mask = 1ULL << nbit;
>   for (i = 0; i != priv->txqs_n; ++i) {
>   data = (*priv->txqs)[i];
> --
> 2.28.0



Re: [dpdk-dev] [PATCH 1/8] regex/octeontx2: fix unnecessary name override

2020-11-03 Thread David Marchand
On Tue, Nov 3, 2020 at 1:30 AM Thomas Monjalon  wrote:
> > > -name = 'octeontx2_regex'
>
> But it is not the same?
>
> The name will default to "octeontx2", which is fine.
> But the fmt_name should not take this default.
> I believe fmt_name should be "octeontx2_regex" as I did in my patch.

fmt_name is only for maintaining config compat.
This driver is new to 20.11.
We can drop fmt_name too.


-- 
David Marchand



[dpdk-dev] [PATCH] net/mlx5: fix meter packet missing

2020-11-03 Thread Xueming Li
For transfer flow with meter, packet was passed without applying flow
action. The group level was multipled by 10 for group level 65531.

This patch fixes this issue by correcting suffix table group level
calculation.

Fixes: 3e8f3e51fd93 ("net/mlx5: fix meter table definitions")
Cc: suanmi...@nvidia.com
Cc: sta...@dpdk.org

Signed-off-by: Xueming Li 
---
 drivers/net/mlx5/mlx5.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 63d263384b..a28f30a5ab 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -610,9 +610,9 @@ struct mlx5_flow_tbl_resource {
 #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
 #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
 /* Tables for metering splits should be added here. */
-#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3)
-#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
 #define MLX5_FLOW_TABLE_LEVEL_SUFFIX (MLX5_MAX_TABLES - 3)
+#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
+#define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_METER
 #define MLX5_MAX_TABLES_FDB UINT16_MAX
 #define MLX5_FLOW_TABLE_FACTOR 10
 
-- 
2.25.1



[dpdk-dev] [PATCH] net/mlx5: fix unix socket server file name

2020-11-03 Thread Xueming Li
mlx_steering_dump_parser.py tool failed to dump flow due to socket file
name changed.

Change socket file name back to make it consistent.

Fixes: b1e15224849b ("common/mlx5: fix PCI driver name")
Cc: bi...@nvidia.com
Cc: sta...@dpdk.org

Signed-off-by: Xueming Li 
---
 drivers/net/mlx5/linux/mlx5_socket.c | 14 +-
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_socket.c 
b/drivers/net/mlx5/linux/mlx5_socket.c
index 08af905126..1938453980 100644
--- a/drivers/net/mlx5/linux/mlx5_socket.c
+++ b/drivers/net/mlx5/linux/mlx5_socket.c
@@ -20,16 +20,11 @@
 
 /* PMD socket service for tools. */
 
+#define MLX5_SOCKET_PATH "/var/tmp/dpdk_net_mlx5_%d"
+
 int server_socket; /* Unix socket for primary process. */
 struct rte_intr_handle server_intr_handle; /* Interrupt handler. */
 
-static void
-mlx5_pmd_make_path(struct sockaddr_un *addr, int pid)
-{
-   snprintf(addr->sun_path, sizeof(addr->sun_path), "/var/tmp/dpdk_%s_%d",
-MLX5_DRIVER_NAME, pid);
-}
-
 /**
  * Handle server pmd socket interrupts.
  */
@@ -186,7 +181,8 @@ mlx5_pmd_socket_init(void)
ret = fcntl(server_socket, F_SETFL, flags | O_NONBLOCK);
if (ret < 0)
goto error;
-   mlx5_pmd_make_path(&sun, getpid());
+   snprintf(sun.sun_path, sizeof(sun.sun_path), MLX5_SOCKET_PATH,
+getpid());
remove(sun.sun_path);
ret = bind(server_socket, (const struct sockaddr *)&sun, sizeof(sun));
if (ret < 0) {
@@ -225,6 +221,6 @@ RTE_FINI(mlx5_pmd_socket_uninit)
mlx5_pmd_interrupt_handler_uninstall();
claim_zero(close(server_socket));
server_socket = 0;
-   MKSTR(path, "/var/tmp/dpdk_%s_%d", MLX5_DRIVER_NAME, getpid());
+   MKSTR(path, MLX5_SOCKET_PATH, getpid());
claim_zero(remove(path));
 }
-- 
2.25.1



[dpdk-dev] [RFC v2 3/6] net/i40e: use generic flow command to re-realize mirror rule

2020-11-03 Thread Steve Yang
When set follow sample rule's ratio equal to one, its behavior is same
as mirror-rule, so we can use "flow create * pattern * actions sample *"
to replace old "set port * mirror-rule *" command now.

The example of mirror rule command mapping to flow management command:
(in below command, port 0 is PF and port 1-2 is VF):
1): Ingress pf => pf
set port 0 mirror-rule 0 uplink-mirror dst-pool 2 on
or
flow create 0 ingress pattern pf / end \
  actions sample ratio 1 / port_id id 0 / end

2): Egress pf => pf
set port 0 mirror-rule 0 downlink-mirror dst-pool 2 on
or
flow create 0 egress pattern pf / end \
  actions sample ratio 1 / port_id id 0 / end
3): ingress pf => vf 1
set port 0 mirror-rule 0 uplink-mirror dst-pool 1 on
or
flow create 0 ingress pattern pf / end \
  actions sample ratio 1 / port_id id 2 / end
4): egress pf => vf 1
set port 0 mirror-rule 0 downlink-mirror dst-pool 1 on
or
flow create 0 egress pattern pf / end \
  actions sample ratio 1 / port_id id 2 / end
5): ingress vf 0 1 => pf
set port 0 mirror-rule 0 pool-mirror-up 0x3 dst-pool 2 on
or
flow create 0 ingress pattern vf id is 1 / end \
  actions sample ratio 1 / port_id id 0 / end
flow create 0 ingress pattern vf id is 0 / end \
  actions sample ratio 1 / port_id id 0 / end
or
flow create 0 ingress pattern vf id last 1 / end \
  actions sample ratio 1 / port_id id 0 / end
6): egress vf 0 1 => pf
set port 0 mirror-rule 0 pool-mirror-down 0x3 dst-pool 2 on
or
flow create 0 egress pattern  vf id is 0 / end \
  actions sample ratio 1 / port_id id 0 / end
flow create 0 egress pattern  vf id is 1 / end \
  actions sample ratio 1 / port_id id 0 / end
or
flow create 0 egress pattern  vf id last  1 / end \
  actions sample ratio 1 / port_id id 0 / end
7): ingress vf 0 => vf 1
set port 0 mirror-rule 0 pool-mirror-up 0x1 dst-pool 1 on
or
flow create 0 ingress pattern vf id is 0 / end \
  actions sample ratio 1 / port_id id 2 / end
8): egress vf 0 => vf 1
set port 0 mirror-rule 0 pool-mirror-down 0x1 dst-pool 1 on
or
flow create 0 egress pattern vf id is 0 / end \
  actions sample ratio 1 / port_id id 2 / end
9): ingress vlan 4,6 => vf 1
set port 0 mirror-rule 0 vlan-mirror 4,6 dst-pool 1 on
or
flow create 0 ingress pattern vlan vid is 4 / end \
  actions sample ratio 1 / port_id id 2 / end
flow create 0 ingress pattern vlan vid is 6 / end \
  actions sample ratio 1 / port_id id 2 / end
or
flow create 0 ingress pattern vlan vid is 4 vid last 6 \
  vid mask 0x5 / end \
  actions sample ratio 1 / port_id id 2 / end

Signed-off-by: Steve Yang 
---
 drivers/net/i40e/i40e_flow.c | 153 +++
 1 file changed, 153 insertions(+)

diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 7928871bf..d6c95415c 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -119,6 +119,7 @@ static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
+static int i40e_flow_flush_sample_filter(struct rte_eth_dev *dev);
 static int
 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
  const struct rte_flow_attr *attr,
@@ -5517,6 +5518,104 @@ i40e_parse_sample_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+static int
+i40e_config_sample_filter_set(struct rte_eth_dev *dev,
+ struct i40e_mirror_rule_conf *conf)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+   struct i40e_mirror_filter *it;
+   struct i40e_mirror_filter *mirror_filter;
+   uint16_t rule_id;
+   int ret;
+
+   if (pf->main_vsi->veb == NULL || pf->vfs == NULL) {
+   PMD_DRV_LOG(ERR,
+   "mirror rule can not be configured without veb or 
vfs.");
+   return -ENOSYS;
+   }
+   if (pf->nb_mirror_rule > I40E_MAX_MIRROR_RULES) {
+   PMD_DRV_LOG(ERR, "mirror table is full.");
+   return -ENOSPC;
+   }
+
+   TAILQ_FOREACH(it, &pf->mirror_filter_list, next) {
+   if (it->conf.dst_vsi_seid == conf->dst_vsi_seid &&
+   it->conf.rule_type == conf->rule_type &&
+   it->conf.num_entries == conf->num_entries &&
+   !memcmp(it->conf.entries, conf->entries,
+   conf->num_entries * sizeof(conf->entries[0]))) {
+ 

[dpdk-dev] [RFC v2 2/6] net/i40e: define the mirror filter parser

2020-11-03 Thread Steve Yang
Define the sample filter parser for mirror, it will divide to two phases,
the one is sample attributions pattern parsing, and the mirror config
will be filled in according to pattern type VF/PF/VLAN when sample ratio
is 1.
The another is sample action parsing that the port id of mirror config
will be filled in according to action type VF/PF.

Signed-off-by: Steve Yang 
---
 drivers/net/i40e/i40e_flow.c  | 264 +-
 lib/librte_ethdev/rte_ethdev_driver.h |   1 +
 2 files changed, 258 insertions(+), 7 deletions(-)

diff --git a/drivers/net/i40e/i40e_flow.c b/drivers/net/i40e/i40e_flow.c
index 5bec0c7a8..7928871bf 100644
--- a/drivers/net/i40e/i40e_flow.c
+++ b/drivers/net/i40e/i40e_flow.c
@@ -1871,15 +1871,18 @@ static struct i40e_valid_pattern 
i40e_supported_patterns[] = {
{ pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
 };
 
-#define NEXT_ITEM_OF_ACTION(act, actions, index)\
-   do {\
-   act = actions + index;  \
-   while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
-   index++;\
-   act = actions + index;  \
-   }   \
+#define NEXT_ITEM_OF_ACTION(act, actions, index)   \
+   do {\
+   act = (actions) + (index);  \
+   while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+   (index)++;  \
+   act = (actions) + (index);  \
+   }   \
} while (0)
 
+#define GET_VLAN_ID_FROM_TCI(vlan_item, default_vid) \
+   ((vlan_item) ? ntohs(vlan_item->tci) & 0x0fff : (default_vid))
+
 /* Find the first VOID or non-VOID item pointer */
 static const struct rte_flow_item *
 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
@@ -5267,6 +5270,253 @@ i40e_config_rss_filter_del(struct rte_eth_dev *dev,
return 0;
 }
 
+static int
+i40e_flow_parse_sample_attr_pattern(struct rte_eth_dev *dev,
+   const struct rte_flow_attr *attr,
+   const struct rte_flow_item pattern[],
+   struct rte_flow_error *error,
+   union i40e_filter_t *filter)
+{
+   struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
+   const struct rte_flow_item *item = pattern;
+   const struct rte_flow_item *next_item = pattern + 1;
+   enum rte_flow_item_type item_type, next_item_type;
+   const struct rte_flow_item_vf *vf_spec, *vf_mask, *vf_last;
+   const struct rte_flow_item_vlan *vlan_spec, *vlan_mask, *vlan_last;
+   struct i40e_mirror_rule_conf *mirror_config = &filter->mirror_conf;
+   uint16_t *entries = mirror_config->entries;
+   uint8_t *rule_type = &mirror_config->rule_type;
+   uint16_t vf_id, vf_id_last, vlan_id, vlan_id_mask, vlan_id_last;
+   uint16_t i, j = 0, k = 0;
+
+   if (attr->priority) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+  attr, "Not support priority.");
+   return -rte_errno;
+   }
+   if (attr->group) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+  attr, "Not support group.");
+   return -rte_errno;
+   }
+   if (attr->transfer) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+  attr, "Not support group.");
+   return -rte_errno;
+   }
+
+   item_type = item->type;
+   next_item_type = next_item->type;
+   if (!(next_item_type == RTE_FLOW_ITEM_TYPE_END &&
+ (item_type == RTE_FLOW_ITEM_TYPE_PF ||
+  item_type == RTE_FLOW_ITEM_TYPE_VF ||
+  item_type == RTE_FLOW_ITEM_TYPE_VLAN))) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item,
+   "Only support a pattern item that is pf or vf or 
vlan.");
+   return -rte_errno;
+   }
+
+   if (item_type == RTE_FLOW_ITEM_TYPE_PF) {
+   if (!attr->ingress && attr->egress) {
+   *rule_type = I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS;
+   } else if (attr->ingress && !attr->egress) {
+   *rule_type = I40E_AQC_MIRROR_RU

[dpdk-dev] [RFC v2 1/6] net/i40e: add mirror rule config and add/del rule APIs

2020-11-03 Thread Steve Yang
Define i40e_mirror_rule_conf structure that is used for set mirror flow
rule to i40e register, and relocate the mirror related MACORs to header
file.

Signed-off-by: Steve Yang 
---
 drivers/net/i40e/i40e_ethdev.c | 13 +
 drivers/net/i40e/i40e_ethdev.h | 34 ++
 2 files changed, 39 insertions(+), 8 deletions(-)

diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c
index f54769c29..b17c05eda 100644
--- a/drivers/net/i40e/i40e_ethdev.c
+++ b/drivers/net/i40e/i40e_ethdev.c
@@ -329,12 +329,6 @@ static int i40e_dev_sync_phy_type(struct i40e_hw *hw);
 static void i40e_configure_registers(struct i40e_hw *hw);
 static void i40e_hw_init(struct rte_eth_dev *dev);
 static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi);
-static enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
-uint16_t seid,
-uint16_t rule_type,
-uint16_t *entries,
-uint16_t count,
-uint16_t rule_id);
 static int i40e_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t sw_id, uint8_t on);
@@ -1742,6 +1736,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void 
*init_params __rte_unused)
/* initialize RSS rule list */
TAILQ_INIT(&pf->rss_config_list);
 
+   /* initialize mirror filter list */
+   TAILQ_INIT(&pf->mirror_filter_list);
+
/* initialize Traffic Manager configuration */
i40e_tm_conf_init(dev);
 
@@ -10223,7 +10220,7 @@ i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi 
*vsi)
  * Add a mirror rule for a given veb.
  *
  **/
-static enum i40e_status_code
+enum i40e_status_code
 i40e_aq_add_mirror_rule(struct i40e_hw *hw,
uint16_t seid, uint16_t dst_id,
uint16_t rule_type, uint16_t *entries,
@@ -10274,7 +10271,7 @@ i40e_aq_add_mirror_rule(struct i40e_hw *hw,
  * Delete a mirror rule for a given veb.
  *
  **/
-static enum i40e_status_code
+enum i40e_status_code
 i40e_aq_del_mirror_rule(struct i40e_hw *hw,
uint16_t seid, uint16_t rule_type, uint16_t *entries,
uint16_t count, uint16_t rule_id)
diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h
index ea59a3e60..290a54daa 100644
--- a/drivers/net/i40e/i40e_ethdev.h
+++ b/drivers/net/i40e/i40e_ethdev.h
@@ -1075,6 +1075,30 @@ struct i40e_rss_filter {
struct i40e_rte_flow_rss_conf rss_filter_info;
 };
 
+/**
+ * Mirror rule configuration
+ */
+struct i40e_mirror_rule_conf {
+   uint8_t  rule_type;
+   uint16_t rule_id;   /* the rule id assigned by firmware */
+   uint16_t dst_vsi_seid;  /* destination vsi for this mirror rule. */
+   uint16_t num_entries;
+   /**
+* the info stores depend on the rule type.
+* If type is I40E_MIRROR_TYPE_VLAN, vlan ids are stored here.
+*  If type is I40E_MIRROR_TYPE_VPORT_*, vsi's seid are stored.
+*/
+   uint16_t entries[I40E_MIRROR_MAX_ENTRIES_PER_RULE];
+};
+
+TAILQ_HEAD(i40e_mirror_filter_list, i40e_mirror_filter);
+
+/* Mirror rule list structure */
+struct i40e_mirror_filter {
+   TAILQ_ENTRY(i40e_mirror_filter) next;
+   struct i40e_mirror_rule_conf conf;
+};
+
 struct i40e_vf_msg_cfg {
/* maximal VF message during a statistic period */
uint32_t max_msg;
@@ -1145,6 +1169,7 @@ struct i40e_pf {
struct i40e_tunnel_rule tunnel; /* Tunnel filter rule */
struct i40e_rte_flow_rss_conf rss_info; /* RSS info */
struct i40e_rss_conf_list rss_config_list; /* RSS rule list */
+   struct i40e_mirror_filter_list mirror_filter_list;
struct i40e_queue_regions queue_region; /* queue region info */
struct i40e_fc_conf fc_conf; /* Flow control conf */
struct i40e_mirror_rule_list mirror_list;
@@ -1310,6 +1335,7 @@ union i40e_filter_t {
struct rte_eth_tunnel_filter_conf tunnel_filter;
struct i40e_tunnel_filter_conf consistent_tunnel_filter;
struct i40e_rte_flow_rss_conf rss_conf;
+   struct i40e_mirror_rule_conf mirror_conf;
 };
 
 typedef int (*parse_filter_t)(struct rte_eth_dev *dev,
@@ -1460,6 +1486,14 @@ int i40e_config_rss_filter(struct i40e_pf *pf,
 int i40e_vf_representor_init(struct rte_eth_dev *ethdev, void *init_params);
 int i40e_vf_representor_uninit(struct rte_eth_dev *ethdev);
 
+enum i40e_status_code i40e_aq_add_mirror_rule(struct i40e_hw *hw,
+   uint16_t seid, uint16_t dst_id,
+   uint16_t rule_type, uint16_t *entries,
+   uint16_t count, uint16_t *rule_id);
+enum i40e_status_code i40e_aq_del_mirror_rule(struct i40e_hw *hw,
+   uint16_t seid, uint16_t rule_type, uint

[dpdk-dev] [RFC v2 5/6] net/ixgbe: define the mirror filter parser

2020-11-03 Thread Steve Yang
Define the sample filter parser for mirror, it will divide to two phases,
the one is sample attributions pattern parsing, and the mirror config
will be filled in according to pattern type VF/PF/VLAN when sample ratio
is 1.
The another is sample action parsing that the port id of mirror config
will be filled in according to action type VF/PF.

Signed-off-by: Steve Yang 
---
 drivers/net/ixgbe/ixgbe_flow.c | 245 +
 1 file changed, 245 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index dff04c462..0ad49ca48 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -49,6 +49,18 @@
 #define IXGBE_MAX_N_TUPLE_PRIO 7
 #define IXGBE_MAX_FLX_SOURCE_OFF 62
 
+#define NEXT_ITEM_OF_ACTION(act, actions, index)   \
+   do {\
+   act = (actions) + (index);  \
+   while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
+   (index)++;  \
+   act = (actions) + (index);  \
+   }   \
+   } while (0)
+
+#define GET_VLAN_ID_FROM_TCI(vlan_item, default_vid) \
+   ((vlan_item) ? ntohs((vlan_item)->tci) & 0x0fff : (default_vid))
+
 /* ntuple filter list structure */
 struct ixgbe_ntuple_filter_ele {
TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
@@ -79,6 +91,11 @@ struct ixgbe_rss_conf_ele {
TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
struct ixgbe_rte_flow_rss_conf filter_info;
 };
+/* rss filter list structure */
+struct ixgbe_mirror_conf_ele {
+   TAILQ_ENTRY(ixgbe_mirror_conf_ele) entries;
+   struct ixgbe_flow_mirror_conf filter_info;
+};
 /* ixgbe_flow memory list structure */
 struct ixgbe_flow_mem {
TAILQ_ENTRY(ixgbe_flow_mem) entries;
@@ -91,6 +108,7 @@ TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
+TAILQ_HEAD(ixgbe_mirror_filter_list, ixgbe_mirror_conf_ele);
 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
 
 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
@@ -2931,6 +2949,233 @@ ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
 }
 
+static int
+ixgbe_flow_parse_sample_attr_pattern(struct rte_eth_dev *dev,
+   const struct rte_flow_attr *attr,
+   const struct rte_flow_item pattern[],
+   struct rte_flow_error *error,
+   struct ixgbe_flow_mirror_conf *conf)
+{
+   struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+   const struct rte_flow_item *item = pattern;
+   const struct rte_flow_item *next_item = pattern + 1;
+   enum rte_flow_item_type item_type, next_item_type;
+   const struct rte_flow_item_vf *vf_spec, *vf_mask, *vf_last;
+   const struct rte_flow_item_vlan *vlan_spec, *vlan_mask, *vlan_last;
+   struct ixgbe_flow_mirror_conf *mirror_config = conf;
+   uint16_t vf_id, vf_id_last;
+   uint16_t vlan_id, vlan_id_mask, vlan_id_last;
+   uint16_t i, j = 0, k = 0;
+
+   if (attr->priority) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+  attr, "Not support priority.");
+   return -rte_errno;
+   }
+   if (attr->group) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+  attr, "Not support group.");
+   return -rte_errno;
+   }
+   if (attr->transfer) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+  attr, "Not support group.");
+   return -rte_errno;
+   }
+
+   item_type = item->type;
+   next_item_type = next_item->type;
+   if (!(next_item_type == RTE_FLOW_ITEM_TYPE_END &&
+ (item_type == RTE_FLOW_ITEM_TYPE_PF ||
+  item_type == RTE_FLOW_ITEM_TYPE_VF ||
+  item_type == RTE_FLOW_ITEM_TYPE_VLAN))) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ITEM,
+  item,
+  "Only support pf or vf or vlan pattern.");
+   return -rte_errno;
+   }
+   if (item_type == RTE_FLOW_ITEM_TYPE_PF) {
+   if (!attr->ingress && attr->egress) {
+   mirror

[dpdk-dev] [RFC v2 6/6] net/ixgbe: use flow sample to re-realize mirror rule

2020-11-03 Thread Steve Yang
When set follow sample rule's ratio equal to one, its behavior is same
as mirror-rule, so we can use "flow create * pattern * actions sample *"
to replace old "set port * mirror-rule *" command now.

The example of mirror rule command mapping to flow management command:
(in below command, port 0 is PF and port 1-2 is VF):
1) ingress: pf => pf
set port 0 mirror-rule 0 uplink-mirror dst-pool 2 on
or
flow create 0 ingress pattern pf / end \
  actions sample ratio 1 / port_id id  0 / end
2) egress: pf => pf
set port 0 mirror-rule 0 downlink-mirror dst-pool 2 on
or
flow create 0 egress pattern pf / end \
 actions sample ratio 1 / port_id id  0 / end
3) ingress: pf => vf 2
set port 0 mirror-rule 0 uplink-mirror dst-pool 1 on
or
flow create 0 ingress pattern pf / end \
  actions sample ratio 1 / port_id id 2 / end
4) egress: pf => vf 2
set port 0 mirror-rule 0 downlink-mirror dst-pool 1 on
or
flow create 0 egress pattern pf / end \
 actions sample ratio 1 / port_id id 2 / end
5) ingress: vf 0,1 => pf
set port 0 mirror-rule 0 pool-mirror-up 0x3 dst-pool 2 on
or
flow create 0 ingress pattern vf id is 0 / end \
  actions sample ratio 1 / port_id id 0 / end
flow create 0 ingress pattern vf id is 1 / end \
  actions sample ratio 1 / port_id id 0 / end
6) ingress: vf 0 => vf 1
set port 0 mirror-rule 0 pool-mirror-up 0x1 dst-pool 1 on
or
flow create 0 ingress pattern vf id is 0 / end \
  actions sample ratio 1 / port_id id 2 / end
7) ingress: vlan 4,6 => vf 1
rx_vlan add 4 port 0 vf 0xf
rx_vlan add 6 port 0 vf 0xf
set port 0 mirror-rule 0 vlan-mirror 4,6 dst-pool 1 on
or
rx_vlan add 4 port 0 vf 0xf
rx_vlan add 6 port 0 vf 0xf
flow create 0 ingress pattern vlan vid is 4 / end \
  actions sample ratio 1 / port_id id 2 / end
flow create 0 ingress pattern vlan vid is 6 / end \
  actions sample ratio 1 / port_id id 2 / end

Signed-off-by: Steve Yang 
---
 drivers/net/ixgbe/ixgbe_flow.c | 228 +
 1 file changed, 228 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c
index 0ad49ca48..5635bf585 100644
--- a/drivers/net/ixgbe/ixgbe_flow.c
+++ b/drivers/net/ixgbe/ixgbe_flow.c
@@ -117,6 +117,7 @@ static struct ixgbe_syn_filter_list filter_syn_list;
 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
 static struct ixgbe_rss_filter_list filter_rss_list;
+static struct ixgbe_mirror_filter_list filter_mirror_list;
 static struct ixgbe_flow_mem_list ixgbe_flow_list;
 
 /**
@@ -3176,6 +3177,185 @@ ixgbe_parse_sample_filter(struct rte_eth_dev *dev,
return ixgbe_flow_parse_sample_action(dev, actions, error, conf);
 }
 
+static int
+ixgbe_config_mirror_filter_add(struct rte_eth_dev *dev,
+  struct ixgbe_flow_mirror_conf *mirror_conf)
+{
+   struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+   uint32_t mr_ctl, vlvf;
+   uint32_t mp_lsb = 0;
+   uint32_t mv_msb = 0;
+   uint32_t mv_lsb = 0;
+   uint32_t mp_msb = 0;
+   uint8_t i = 0;
+   int reg_index = 0;
+   uint64_t vlan_mask = 0;
+
+   const uint8_t pool_mask_offset = 32;
+   const uint8_t vlan_mask_offset = 32;
+   const uint8_t dst_pool_offset = 8;
+   const uint8_t rule_mr_offset  = 4;
+   const uint8_t mirror_rule_mask = 0x0F;
+
+   struct ixgbe_hw *hw =
+   IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+   struct ixgbe_filter_info *filter_info =
+   IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+   struct ixgbe_mirror_conf_ele *it;
+   int8_t rule_id;
+   uint8_t mirror_type = 0;
+
+   if (ixgbe_vt_check(hw) < 0)
+   return -ENOTSUP;
+
+   if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+   PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+   mirror_conf->rule_type);
+   return -EINVAL;
+   }
+
+   TAILQ_FOREACH(it, &filter_mirror_list, entries) {
+   if (it->filter_info.rule_type == mirror_conf->rule_type &&
+   it->filter_info.dst_pool == mirror_conf->dst_pool &&
+   it->filter_info.pool_mask == mirror_conf->pool_mask &&
+   it->filter_info.vlan_mask == mirror_conf->vlan_mask &&
+   !memcmp(it->filter_info.vlan_id, mirror_conf->vlan_id,
+   ETH_MIRROR_MAX_VLANS * sizeof(mirror_conf->vlan_id[0]))) {
+   PMD_DRV_LOG(ERR, "mirro

[dpdk-dev] [RFC v2 4/6] net/ixgbe: add mirror rule config and add/del rule APIs

2020-11-03 Thread Steve Yang
Define ixgbe_flow_mirror_conf structure that is used for set mirror flow
rule to ixgbe register, and relocate the mirror related MACORs to header
file.

Signed-off-by: Steve Yang 
---
 drivers/net/ixgbe/ixgbe_ethdev.c |  8 -
 drivers/net/ixgbe/ixgbe_ethdev.h | 54 ++--
 2 files changed, 52 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 9a47a8b26..cc07b0e31 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5724,14 +5724,6 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, 
uint32_t orig_val)
return new_val;
 }
 
-#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
-#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
-#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
-#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
-#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
-   ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
-   ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
-
 static int
 ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
  struct rte_eth_mirror_conf *mirror_conf,
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index 3d35ea791..db95a53f1 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -216,6 +216,27 @@ struct ixgbe_rte_flow_rss_conf {
uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
 };
 
+#define IXGBE_MAX_MIRROR_RULES 4  /* Maximum nb. of mirror rules. */
+#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+   ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP |\
+   ETH_MIRROR_UPLINK_PORT |\
+   ETH_MIRROR_DOWNLINK_PORT |  \
+   ETH_MIRROR_VLAN))
+
+struct ixgbe_flow_mirror_conf {
+   uint8_t  rule_type;
+   uint16_t rule_id;
+   uint8_t  dst_pool;  /* Destination pool for this mirror rule. */
+   uint64_t pool_mask; /* Bitmap of pool for virtual pool mirroring */
+   uint64_t vlan_mask; /* mask for valid VLAN ID. */
+   /* VLAN ID list for vlan mirroring. */
+   uint16_t vlan_id[ETH_MIRROR_MAX_VLANS];
+};
+
 /* structure for interrupt relative data */
 struct ixgbe_interrupt {
uint32_t flags;
@@ -250,8 +271,6 @@ struct ixgbe_uta_info {
uint32_t uta_shadow[IXGBE_MAX_UTA];
 };
 
-#define IXGBE_MAX_MIRROR_RULES 4  /* Maximum nb. of mirror rules. */
-
 struct ixgbe_mirror_info {
struct rte_eth_mirror_conf mr_conf[IXGBE_MAX_MIRROR_RULES];
/**< store PF mirror rules configuration*/
@@ -337,6 +356,8 @@ struct ixgbe_filter_info {
uint32_t syn_info;
/* store the rss filter info */
struct ixgbe_rte_flow_rss_conf rss_info;
+   uint8_t mirror_mask;  /* Bit mask for every used mirror filter */
+   struct ixgbe_flow_mirror_conf mirror_filters[IXGBE_MAX_MIRROR_RULES];
 };
 
 struct ixgbe_l2_tn_key {
@@ -830,4 +851,33 @@ ixgbe_ethertype_filter_remove(struct ixgbe_filter_info 
*filter_info,
return idx;
 }
 
+static inline int8_t
+ixgbe_mirror_filter_insert(struct ixgbe_filter_info *filter_info,
+  struct ixgbe_flow_mirror_conf *mirror_conf)
+{
+   int i;
+
+   for (i = 0; i < IXGBE_MAX_MIRROR_RULES; i++) {
+   if (!(filter_info->mirror_mask & (1 << i))) {
+   filter_info->mirror_mask |= 1 << i;
+   mirror_conf->rule_id = i;
+   filter_info->mirror_filters[i] = *mirror_conf;
+   return i;
+   }
+   }
+   return -1;
+}
+
+static inline int
+ixgbe_mirror_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+   if (idx >= IXGBE_MAX_MIRROR_RULES)
+   return -1;
+   filter_info->mirror_mask &= ~(1 << idx);
+   memset(&filter_info->mirror_filters[idx], 0,
+  sizeof(filter_info->mirror_filters[0]));
+   return idx;
+}
+
 #endif /* _IXGBE_ETHDEV_H_ */
-- 
2.17.1



Re: [dpdk-dev] [PATCH v2] net/iavf: fix PROT filed for rss hash

2020-11-03 Thread Huang, ZhiminX
Tested-by: Huang, ZhiminX 

Regards,
HuangZhiMin


> -Original Message-
> From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Jeff Guo
> Sent: Tuesday, November 3, 2020 3:35 PM
> To: Wu, Jingjing ; Zhang, Qi Z
> ; Xing, Beilei 
> Cc: dev@dpdk.org; Guo, Jia 
> Subject: [dpdk-dev] [PATCH v2] net/iavf: fix PROT filed for rss hash
> 
> Add PROT field into IPv4 and IPv6 protocol headers for rss hash.
> 
> Fixes: 91f27b2e39ab ("net/iavf: refactor RSS")
> Signed-off-by: Jeff Guo 
> ---
> v2:
> add prot into the new hdr replace of function modify
> ---
>  drivers/net/iavf/iavf_hash.c | 32 
>  1 file changed, 24 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c index
> b56152c5b8..804deec5fe 100644
> --- a/drivers/net/iavf/iavf_hash.c
> +++ b/drivers/net/iavf/iavf_hash.c
> @@ -101,11 +101,23 @@ iavf_hash_parse_pattern_action(struct
> iavf_adapter *ad,
>   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
>   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST),
> {BUFF_NOUSED} }
> 
> +#define proto_hdr_ipv4_with_prot { \
> + VIRTCHNL_PROTO_HDR_IPV4, \
> + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
> + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | \
> + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT),
> {BUFF_NOUSED} }
> +
>  #define proto_hdr_ipv6 { \
>   VIRTCHNL_PROTO_HDR_IPV6, \
>   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
>   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST),
> {BUFF_NOUSED} }
> 
> +#define proto_hdr_ipv6_with_prot { \
> + VIRTCHNL_PROTO_HDR_IPV6, \
> + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
> + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | \
> + FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT),
> {BUFF_NOUSED} }
> +
>  #define proto_hdr_udp { \
>   VIRTCHNL_PROTO_HDR_UDP, \
>   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | \ @@ -
> 151,13 +163,15 @@ struct virtchnl_proto_hdrs outer_ipv4_tmplt = {
> 
>  struct virtchnl_proto_hdrs outer_ipv4_udp_tmplt = {
>   TUNNEL_LEVEL_OUTER, 5,
> - {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
> + {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
> +  proto_hdr_ipv4_with_prot,
>proto_hdr_udp}
>  };
> 
>  struct virtchnl_proto_hdrs outer_ipv4_tcp_tmplt = {
>   TUNNEL_LEVEL_OUTER, 5,
> - {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
> + {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
> +  proto_hdr_ipv4_with_prot,
>proto_hdr_tcp}
>  };
> 
> @@ -174,13 +188,15 @@ struct virtchnl_proto_hdrs outer_ipv6_tmplt = {
> 
>  struct virtchnl_proto_hdrs outer_ipv6_udp_tmplt = {
>   TUNNEL_LEVEL_OUTER, 5,
> - {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
> + {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
> +  proto_hdr_ipv6_with_prot,
>proto_hdr_udp}
>  };
> 
>  struct virtchnl_proto_hdrs outer_ipv6_tcp_tmplt = {
>   TUNNEL_LEVEL_OUTER, 5,
> - {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
> + {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
> +  proto_hdr_ipv6_with_prot,
>proto_hdr_tcp}
>  };
> 
> @@ -195,11 +211,11 @@ struct virtchnl_proto_hdrs inner_ipv4_tmplt = {  };
> 
>  struct virtchnl_proto_hdrs inner_ipv4_udp_tmplt = {
> - TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_udp}
> + TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot,
> proto_hdr_udp}
>  };
> 
>  struct virtchnl_proto_hdrs inner_ipv4_tcp_tmplt = {
> - TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_tcp}
> + TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot,
> proto_hdr_tcp}
>  };
> 
>  struct virtchnl_proto_hdrs inner_ipv4_sctp_tmplt = { @@ -211,11 +227,11
> @@ struct virtchnl_proto_hdrs inner_ipv6_tmplt = {  };
> 
>  struct virtchnl_proto_hdrs inner_ipv6_udp_tmplt = {
> - TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_udp}
> + TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot,
> proto_hdr_udp}
>  };
> 
>  struct virtchnl_proto_hdrs inner_ipv6_tcp_tmplt = {
> - TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_tcp}
> + TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot,
> proto_hdr_tcp}
>  };
> 
>  struct virtchnl_proto_hdrs inner_ipv6_sctp_tmplt = {
> --
> 2.20.1



[dpdk-dev] [PATCH 0/4] code cleanup and improvements

2020-11-03 Thread Archana Muniganti
This series has code cleanup and improvements for
OCTEON TX and OCTEON TX2 crypto PMDs

Archana Muniganti (4):
  common/cpt: prepopulate word7 in sess
  common/cpt: remove temporary variable
  common/cpt: use predefined macros
  common/cpt: remove redundant structure

 drivers/common/cpt/cpt_common.h   |  20 +-
 drivers/common/cpt/cpt_hw_types.h |  10 +-
 drivers/common/cpt/cpt_mcode_defines.h|  21 +-
 drivers/common/cpt/cpt_pmd_ops_helper.c   |   5 +-
 drivers/common/cpt/cpt_ucode.h| 199 ++
 drivers/common/cpt/cpt_ucode_asym.h   |  60 ++
 .../crypto/octeontx/otx_cryptodev_hw_access.c |  10 +-
 .../crypto/octeontx/otx_cryptodev_hw_access.h |   4 +-
 drivers/crypto/octeontx/otx_cryptodev_ops.c   |  48 +++--
 drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  67 +++---
 drivers/crypto/octeontx2/otx2_cryptodev_sec.c |   4 +-
 drivers/crypto/octeontx2/otx2_cryptodev_sec.h |   2 +-
 drivers/crypto/octeontx2/otx2_ipsec_po_ops.h  |   2 -
 13 files changed, 183 insertions(+), 269 deletions(-)

-- 
2.22.0



[dpdk-dev] [PATCH 2/4] common/cpt: remove temporary variable

2020-11-03 Thread Archana Muniganti
Remove temporary variable used in datapath.

Signed-off-by: Archana Muniganti 
---
 drivers/common/cpt/cpt_hw_types.h | 10 ++-
 drivers/common/cpt/cpt_mcode_defines.h|  8 --
 drivers/common/cpt/cpt_ucode.h| 79 ++-
 drivers/common/cpt/cpt_ucode_asym.h   | 46 ---
 drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  2 +-
 5 files changed, 48 insertions(+), 97 deletions(-)

diff --git a/drivers/common/cpt/cpt_hw_types.h 
b/drivers/common/cpt/cpt_hw_types.h
index e2b127de41..a1f969eb14 100644
--- a/drivers/common/cpt/cpt_hw_types.h
+++ b/drivers/common/cpt/cpt_hw_types.h
@@ -31,7 +31,10 @@ typedef union {
uint64_t u64;
struct {
 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
-   uint16_t opcode;
+   struct {
+   uint8_t minor;
+   uint8_t major;
+   } opcode;
uint16_t param1;
uint16_t param2;
uint16_t dlen;
@@ -39,7 +42,10 @@ typedef union {
uint16_t dlen;
uint16_t param2;
uint16_t param1;
-   uint16_t opcode;
+   struct {
+   uint8_t major;
+   uint8_t minor;
+   } opcode;
 #endif
} s;
 } vq_cmd_word0_t;
diff --git a/drivers/common/cpt/cpt_mcode_defines.h 
b/drivers/common/cpt/cpt_mcode_defines.h
index 846ceb4a02..56a745f419 100644
--- a/drivers/common/cpt/cpt_mcode_defines.h
+++ b/drivers/common/cpt/cpt_mcode_defines.h
@@ -369,14 +369,6 @@ typedef struct{
buf_ptr_t bufs[0];
 } iov_ptr_t;
 
-typedef union opcode_info {
-   uint16_t flags;
-   struct {
-   uint8_t major;
-   uint8_t minor;
-   } s;
-} opcode_info_t;
-
 typedef struct fc_params {
/* 0th cache line */
union {
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 7d938d0c91..664a04e1a0 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -489,7 +489,6 @@ cpt_digest_gen_prep(uint32_t flags,
vq_cmd_word0_t vq_cmd_w0;
void *c_vaddr, *m_vaddr;
uint64_t c_dma, m_dma;
-   opcode_info_t opcode;
 
ctx = params->ctx_buf.vaddr;
meta_p = ¶ms->meta_buf;
@@ -524,31 +523,27 @@ cpt_digest_gen_prep(uint32_t flags,
data_len = AUTH_DLEN(d_lens);
 
/*GP op header */
-   vq_cmd_w0.u64 = 0;
+   vq_cmd_w0.s.opcode.minor = 0;
vq_cmd_w0.s.param2 = ((uint16_t)hash_type << 8);
if (ctx->hmac) {
-   opcode.s.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
+   vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = key_len;
vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
} else {
-   opcode.s.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
+   vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = 0;
vq_cmd_w0.s.dlen = data_len;
}
 
-   opcode.s.minor = 0;
-
/* Null auth only case enters the if */
if (unlikely(!hash_type && !ctx->enc_cipher)) {
-   opcode.s.major = CPT_MAJOR_OP_MISC;
+   vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_MISC;
/* Minor op is passthrough */
-   opcode.s.minor = 0x03;
+   vq_cmd_w0.s.opcode.minor = 0x03;
/* Send out completion code only */
vq_cmd_w0.s.param2 = 0x1;
}
 
-   vq_cmd_w0.s.opcode = opcode.flags;
-
/* DPTR has SG list */
in_buffer = m_vaddr;
dptr_dma = m_dma;
@@ -677,7 +672,6 @@ cpt_enc_hmac_prep(uint32_t flags,
vq_cmd_word0_t vq_cmd_w0;
void *c_vaddr;
uint64_t c_dma;
-   opcode_info_t opcode;
 
meta_p = &fc_params->meta_buf;
m_vaddr = meta_p->vaddr;
@@ -756,8 +750,8 @@ cpt_enc_hmac_prep(uint32_t flags,
}
 
/* Encryption */
-   opcode.s.major = CPT_MAJOR_OP_FC;
-   opcode.s.minor = 0;
+   vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_FC;
+   vq_cmd_w0.s.opcode.minor = 0;
 
if (hash_type == GMAC_TYPE) {
encr_offset = 0;
@@ -783,7 +777,6 @@ cpt_enc_hmac_prep(uint32_t flags,
}
 
/* GP op header */
-   vq_cmd_w0.u64 = 0;
vq_cmd_w0.s.param1 = encr_data_len;
vq_cmd_w0.s.param2 = auth_data_len;
/*
@@ -814,8 +807,6 @@ cpt_enc_hmac_prep(uint32_t flags,
 
vq_cmd_w0.s.dlen = inputlen + OFF_CTRL_LEN;
 
-   vq_cmd_w0.s.opcode = opcode.flags;
-
if (likely(iv_len)) {
uint64_t *dest = (uint64_t *)((uint8_t *)offset_vaddr
  + OFF_CTRL_LEN);
@@ -844,9 +835,7 @@ cpt_enc_hmac_prep(uint32_t flags,
m_vaddr = (uint8_t *)m_vaddr + size;
m_dma += s

[dpdk-dev] [PATCH 1/4] common/cpt: prepopulate word7 in sess

2020-11-03 Thread Archana Muniganti
CPT inst word7 is an immutable data for a session.
This data can be populated in a session.

Signed-off-by: Archana Muniganti 
---
 drivers/common/cpt/cpt_common.h   |   1 -
 drivers/common/cpt/cpt_mcode_defines.h|  13 ++-
 drivers/common/cpt/cpt_ucode.h| 109 +-
 .../crypto/octeontx/otx_cryptodev_hw_access.h |   4 +-
 drivers/crypto/octeontx/otx_cryptodev_ops.c   |  29 +++--
 drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  46 
 drivers/crypto/octeontx2/otx2_cryptodev_sec.c |   4 +-
 drivers/crypto/octeontx2/otx2_cryptodev_sec.h |   2 +-
 drivers/crypto/octeontx2/otx2_ipsec_po_ops.h  |   2 -
 9 files changed, 91 insertions(+), 119 deletions(-)

diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index 1ce28e90b7..eefe2755c1 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -69,7 +69,6 @@ struct cpt_request_info {
uint64_t ei0;
uint64_t ei1;
uint64_t ei2;
-   uint64_t ei3;
} ist;
uint8_t *rptr;
const struct otx2_cpt_qp *qp;
diff --git a/drivers/common/cpt/cpt_mcode_defines.h 
b/drivers/common/cpt/cpt_mcode_defines.h
index 0a05bd5639..846ceb4a02 100644
--- a/drivers/common/cpt/cpt_mcode_defines.h
+++ b/drivers/common/cpt/cpt_mcode_defines.h
@@ -245,8 +245,8 @@ struct cpt_sess_misc {
uint16_t is_null:1;
/** Flag for GMAC */
uint16_t is_gmac:1;
-   /** Engine group */
-   uint16_t egrp:3;
+   /** Unused field */
+   uint16_t rsvd1:3;
/** AAD length */
uint16_t aad_length;
/** MAC len in bytes */
@@ -255,14 +255,16 @@ struct cpt_sess_misc {
uint8_t iv_length;
/** Auth IV length in bytes */
uint8_t auth_iv_length;
-   /** Reserved field */
-   uint8_t rsvd1;
+   /** Unused field */
+   uint8_t rsvd2;
/** IV offset in bytes */
uint16_t iv_offset;
/** Auth IV offset in bytes */
uint16_t auth_iv_offset;
/** Salt */
uint32_t salt;
+   /** CPT inst word 7 */
+   uint64_t cpt_inst_w7;
/** Context DMA address */
phys_addr_t ctx_dma_addr;
 };
@@ -319,7 +321,7 @@ struct cpt_ctx {
mc_fc_context_t fctx;
mc_zuc_snow3g_ctx_t zs_ctx;
mc_kasumi_ctx_t k_ctx;
-   };
+   } mc_ctx;
uint8_t  auth_key[1024];
 };
 
@@ -350,6 +352,7 @@ struct cpt_asym_sess_misc {
struct rte_crypto_modex_xform mod_ctx;
struct cpt_asym_ec_ctx ec_ctx;
};
+   uint64_t cpt_inst_w7;
 };
 
 /* Buffer pointer */
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 5f28bd7591..7d938d0c91 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -77,11 +77,11 @@ cpt_mac_len_verify(struct rte_crypto_auth_xform *auth)
 }
 
 static __rte_always_inline void
-cpt_fc_salt_update(void *ctx,
+cpt_fc_salt_update(struct cpt_ctx *cpt_ctx,
   uint8_t *salt)
 {
-   struct cpt_ctx *cpt_ctx = ctx;
-   memcpy(&cpt_ctx->fctx.enc.encr_iv, salt, 4);
+   mc_fc_context_t *fctx = &cpt_ctx->mc_ctx.fctx;
+   memcpy(fctx->enc.encr_iv, salt, 4);
 }
 
 static __rte_always_inline int
@@ -190,10 +190,12 @@ static __rte_always_inline void
 cpt_fc_ciph_set_key_snow3g_uea2(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
 {
+   mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
uint32_t keyx[4];
+
cpt_ctx->snow3g = 1;
gen_key_snow3g(key, keyx);
-   memcpy(cpt_ctx->zs_ctx.ci_key, keyx, key_len);
+   memcpy(zs_ctx->ci_key, keyx, key_len);
cpt_ctx->zsk_flags = 0;
 }
 
@@ -201,9 +203,11 @@ static __rte_always_inline void
 cpt_fc_ciph_set_key_zuc_eea3(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
 {
+   mc_zuc_snow3g_ctx_t *zs_ctx = &cpt_ctx->mc_ctx.zs_ctx;
+
cpt_ctx->snow3g = 0;
-   memcpy(cpt_ctx->zs_ctx.ci_key, key, key_len);
-   memcpy(cpt_ctx->zs_ctx.zuc_const, zuc_d, 32);
+   memcpy(zs_ctx->ci_key, key, key_len);
+   memcpy(zs_ctx->zuc_const, zuc_d, 32);
cpt_ctx->zsk_flags = 0;
 }
 
@@ -211,8 +215,10 @@ static __rte_always_inline void
 cpt_fc_ciph_set_key_kasumi_f8_ecb(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
 {
+   mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
cpt_ctx->k_ecb = 1;
-   memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+   memcpy(k_ctx->ci_key, key, key_len);
cpt_ctx->zsk_flags = 0;
 }
 
@@ -220,16 +226,17 @@ static __rte_always_inline void
 cpt_fc_ciph_set_key_kasumi_f8_cbc(struct cpt_ctx *cpt_ctx, const uint8_t *key,
uint16_t key_len)
 {
-   memcpy(cpt_ctx->k_ctx.ci_key, key, key_len);
+   mc_kasumi_ctx_t *k_ctx = &cpt_ctx->mc_ctx.k_ctx;
+
+   memcpy(k_ctx->ci_key, key, 

[dpdk-dev] [PATCH 3/4] common/cpt: use predefined macros

2020-11-03 Thread Archana Muniganti
Replace redundant macro ROUNDUP* with predefined macros.

Signed-off-by: Archana Muniganti 
---
 drivers/common/cpt/cpt_common.h   | 12 
 drivers/common/cpt/cpt_pmd_ops_helper.c   |  5 +++--
 drivers/common/cpt/cpt_ucode.h| 11 +++
 drivers/common/cpt/cpt_ucode_asym.h   | 14 +++---
 drivers/crypto/octeontx/otx_cryptodev_ops.c   |  6 --
 drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  6 --
 6 files changed, 25 insertions(+), 29 deletions(-)

diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index eefe2755c1..f61495e458 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -19,18 +19,6 @@
 #define CPT_COUNT_THOLD32
 #define CPT_TIMER_THOLD0x3F
 
-#ifndef ROUNDUP4
-#define ROUNDUP4(val)  (((val) + 3) & 0xfffc)
-#endif
-
-#ifndef ROUNDUP8
-#define ROUNDUP8(val)  (((val) + 7) & 0xfff8)
-#endif
-
-#ifndef ROUNDUP16
-#define ROUNDUP16(val) (((val) + 15) & 0xfff0)
-#endif
-
 #define MOD_INC(i, l)   ((i) == (l - 1) ? (i) = 0 : (i)++)
 
 struct cpt_qp_meta_info {
diff --git a/drivers/common/cpt/cpt_pmd_ops_helper.c 
b/drivers/common/cpt/cpt_pmd_ops_helper.c
index 09b762f81e..2cf4ce 100644
--- a/drivers/common/cpt/cpt_pmd_ops_helper.c
+++ b/drivers/common/cpt/cpt_pmd_ops_helper.c
@@ -35,8 +35,9 @@ cpt_pmd_ops_helper_get_mlen_sg_mode(void)
 
len += sizeof(struct cpt_request_info);
len += CPT_OFFSET_CONTROL_BYTES + CPT_MAX_IV_LEN;
-   len += ROUNDUP8(SG_LIST_HDR_SIZE +
-   (ROUNDUP4(CPT_MAX_SG_IN_OUT_CNT) >> 2) * SG_ENTRY_SIZE);
+   len += RTE_ALIGN_CEIL((SG_LIST_HDR_SIZE +
+   (RTE_ALIGN_CEIL(CPT_MAX_SG_IN_OUT_CNT, 4) >> 2) *
+   SG_ENTRY_SIZE), 8);
len += 2 * COMPLETION_CODE_SIZE;
len += 2 * sizeof(cpt_res_s_t);
return len;
diff --git a/drivers/common/cpt/cpt_ucode.h b/drivers/common/cpt/cpt_ucode.h
index 664a04e1a0..557379ed01 100644
--- a/drivers/common/cpt/cpt_ucode.h
+++ b/drivers/common/cpt/cpt_ucode.h
@@ -528,7 +528,7 @@ cpt_digest_gen_prep(uint32_t flags,
if (ctx->hmac) {
vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HMAC | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = key_len;
-   vq_cmd_w0.s.dlen = data_len + ROUNDUP8(key_len);
+   vq_cmd_w0.s.dlen = data_len + RTE_ALIGN_CEIL(key_len, 8);
} else {
vq_cmd_w0.s.opcode.major = CPT_MAJOR_OP_HASH | CPT_DMA_MODE;
vq_cmd_w0.s.param1 = 0;
@@ -564,7 +564,8 @@ cpt_digest_gen_prep(uint32_t flags,
uint64_t k_dma = params->ctx_buf.dma_addr +
offsetof(struct cpt_ctx, auth_key);
/* Key */
-   i = fill_sg_comp(gather_comp, i, k_dma, ROUNDUP8(key_len));
+   i = fill_sg_comp(gather_comp, i, k_dma,
+RTE_ALIGN_CEIL(key_len, 8));
}
 
/* input data */
@@ -762,10 +763,12 @@ cpt_enc_hmac_prep(uint32_t flags,
enc_dlen = encr_data_len + encr_offset;
if (unlikely(encr_data_len & 0xf)) {
if ((cipher_type == DES3_CBC) || (cipher_type == DES3_ECB))
-   enc_dlen = ROUNDUP8(encr_data_len) + encr_offset;
+   enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
+   encr_offset;
else if (likely((cipher_type == AES_CBC) ||
(cipher_type == AES_ECB)))
-   enc_dlen = ROUNDUP16(encr_data_len) + encr_offset;
+   enc_dlen = RTE_ALIGN_CEIL(encr_data_len, 8) +
+   encr_offset;
}
 
if (unlikely(auth_dlen > enc_dlen)) {
diff --git a/drivers/common/cpt/cpt_ucode_asym.h 
b/drivers/common/cpt/cpt_ucode_asym.h
index 286f155849..50c6f58d3a 100644
--- a/drivers/common/cpt/cpt_ucode_asym.h
+++ b/drivers/common/cpt/cpt_ucode_asym.h
@@ -623,10 +623,10 @@ cpt_ecdsa_sign_prep(struct rte_crypto_ecdsa_op_param 
*ecdsa,
/* Truncate input length to curve prime length */
if (message_len > prime_len)
message_len = prime_len;
-   m_align = ROUNDUP8(message_len);
+   m_align = RTE_ALIGN_CEIL(message_len, 8);
 
-   p_align = ROUNDUP8(prime_len);
-   k_align = ROUNDUP8(k_len);
+   p_align = RTE_ALIGN_CEIL(prime_len, 8);
+   k_align = RTE_ALIGN_CEIL(k_len, 8);
 
/* Set write offset for order and private key */
o_offset = prime_len - order_len;
@@ -723,8 +723,8 @@ cpt_ecdsa_verify_prep(struct rte_crypto_ecdsa_op_param 
*ecdsa,
if (message_len > prime_len)
message_len = prime_len;
 
-   m_align = ROUNDUP8(message_len);
-   p_align = ROUNDUP8(prime_len);
+   m_align = RTE_ALIGN_CEIL(message_len, 8);
+   p_align = RTE_ALIGN_CEIL(prime_len, 8);
 
/* Set write offset for sign, o

[dpdk-dev] [PATCH 4/4] common/cpt: remove redundant structure

2020-11-03 Thread Archana Muniganti
Replaced structure 'rid' which has single field with its
field itself.

Signed-off-by: Archana Muniganti 
---
 drivers/common/cpt/cpt_common.h   |  7 +--
 drivers/crypto/octeontx/otx_cryptodev_hw_access.c | 10 +-
 drivers/crypto/octeontx/otx_cryptodev_ops.c   | 13 +++--
 drivers/crypto/octeontx2/otx2_cryptodev_ops.c | 13 ++---
 4 files changed, 19 insertions(+), 24 deletions(-)

diff --git a/drivers/common/cpt/cpt_common.h b/drivers/common/cpt/cpt_common.h
index f61495e458..7fea0ca879 100644
--- a/drivers/common/cpt/cpt_common.h
+++ b/drivers/common/cpt/cpt_common.h
@@ -27,11 +27,6 @@ struct cpt_qp_meta_info {
int lb_mlen;
 };
 
-struct rid {
-   /** Request id of a crypto operation */
-   uintptr_t rid;
-};
-
 /*
  * Pending queue structure
  *
@@ -40,7 +35,7 @@ struct pending_queue {
/** Pending requests count */
uint64_t pending_count;
/** Array of pending requests */
-   struct rid *rid_queue;
+   uintptr_t *req_queue;
/** Tail of queue to be used for enqueue */
uint16_t enq_tail;
/** Head of queue to be used for dequeue */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.c 
b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
index ce546c2ffe..c6b1a5197d 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
@@ -535,7 +535,7 @@ otx_cpt_get_resource(const struct rte_cryptodev *dev, 
uint8_t group,
len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
 
/* For pending queue */
-   len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+   len += qlen * sizeof(uintptr_t);
 
/* So that instruction queues start as pg size aligned */
len = RTE_ALIGN(len, pg_sz);
@@ -570,14 +570,14 @@ otx_cpt_get_resource(const struct rte_cryptodev *dev, 
uint8_t group,
}
 
/* Pending queue setup */
-   cptvf->pqueue.rid_queue = (struct rid *)mem;
+   cptvf->pqueue.req_queue = (uintptr_t *)mem;
cptvf->pqueue.enq_tail = 0;
cptvf->pqueue.deq_head = 0;
cptvf->pqueue.pending_count = 0;
 
-   mem +=  qlen * RTE_ALIGN(sizeof(struct rid), 8);
-   len -=  qlen * RTE_ALIGN(sizeof(struct rid), 8);
-   dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+   mem +=  qlen * sizeof(uintptr_t);
+   len -=  qlen * sizeof(uintptr_t);
+   dma_addr += qlen * sizeof(uintptr_t);
 
/* Alignment wastage */
used_len = alloc_len - len;
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c 
b/drivers/crypto/octeontx/otx_cryptodev_ops.c
index 0a0c50a363..9f731f8cc9 100644
--- a/drivers/crypto/octeontx/otx_cryptodev_ops.c
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -430,7 +430,7 @@ otx_cpt_request_enqueue(struct cpt_instance *instance,
/* Default mode of software queue */
mark_cpt_inst(instance);
 
-   pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
+   pqueue->req_queue[pqueue->enq_tail] = (uintptr_t)user_req;
 
/* We will use soft queue length here to limit requests */
MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
@@ -823,7 +823,6 @@ otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, 
uint16_t nb_ops,
struct cpt_instance *instance = (struct cpt_instance *)qptr;
struct cpt_request_info *user_req;
struct cpt_vf *cptvf = (struct cpt_vf *)instance;
-   struct rid *rid_e;
uint8_t cc[nb_ops];
int i, count, pcount;
uint8_t ret;
@@ -837,11 +836,13 @@ otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op 
**ops, uint16_t nb_ops,
count = (nb_ops > pcount) ? pcount : nb_ops;
 
for (i = 0; i < count; i++) {
-   rid_e = &pqueue->rid_queue[pqueue->deq_head];
-   user_req = (struct cpt_request_info *)(rid_e->rid);
+   user_req = (struct cpt_request_info *)
+   pqueue->req_queue[pqueue->deq_head];
 
-   if (likely((i+1) < count))
-   rte_prefetch_non_temporal((void *)rid_e[1].rid);
+   if (likely((i+1) < count)) {
+   rte_prefetch_non_temporal(
+   (void *)pqueue->req_queue[i+1]);
+   }
 
ret = check_nb_command_id(user_req, instance);
 
diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c 
b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
index fe76fe38c2..c337398242 100644
--- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
+++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c
@@ -192,7 +192,7 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, 
uint16_t qp_id,
size_div40 = (iq_len + 40 - 1) / 40 + 1;
 
/* For pending queue */
-   len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
+   len = iq_len * sizeof(uintptr_t);
 
/* Space for instruction group memory */
len += size_div40 * 16;

Re: [dpdk-dev] [PATCH 0/4] code cleanup and improvements

2020-11-03 Thread Anoob Joseph
> 
> This series has code cleanup and improvements for OCTEON TX and OCTEON
> TX2 crypto PMDs
> 
> Archana Muniganti (4):
>   common/cpt: prepopulate word7 in sess
>   common/cpt: remove temporary variable
>   common/cpt: use predefined macros
>   common/cpt: remove redundant structure
> 
>  drivers/common/cpt/cpt_common.h   |  20 +-
>  drivers/common/cpt/cpt_hw_types.h |  10 +-
>  drivers/common/cpt/cpt_mcode_defines.h|  21 +-
>  drivers/common/cpt/cpt_pmd_ops_helper.c   |   5 +-
>  drivers/common/cpt/cpt_ucode.h| 199 ++
>  drivers/common/cpt/cpt_ucode_asym.h   |  60 ++
>  .../crypto/octeontx/otx_cryptodev_hw_access.c |  10 +-
>  .../crypto/octeontx/otx_cryptodev_hw_access.h |   4 +-
>  drivers/crypto/octeontx/otx_cryptodev_ops.c   |  48 +++--
>  drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  67 +++---
>  drivers/crypto/octeontx2/otx2_cryptodev_sec.c |   4 +-
>  drivers/crypto/octeontx2/otx2_cryptodev_sec.h |   2 +-
>  drivers/crypto/octeontx2/otx2_ipsec_po_ops.h  |   2 -
>  13 files changed, 183 insertions(+), 269 deletions(-)
> 
> --
> 2.22.0

Series Acked-by: Anoob Joseph 


[dpdk-dev] [PATCH v2] common/mlx5: split relaxed ordering set for read and write

2020-11-03 Thread Tal Shnaiderman
The current DevX implementation of the relaxed ordering feature is
enabling relaxed ordering usage only if both relaxed ordering read AND
write are supported.  In that case both relaxed ordering read and write
are activated.

This commit will optimize the usage of relaxed ordering by enabling it
when the read OR write features are supported.  Each relaxed ordering
type will be activated according to its own capability bit.

This will align the DevX flow with the verbs implementation of
ibv_reg_mr when using the flag IBV_ACCESS_RELAXED_ORDERING

Fixes: 53ac93f71ad1 ("net/mlx5: create relaxed ordering memory regions")
Cc: sta...@dpdk.org

Signed-off-by: Tal Shnaiderman 

---

v2:fix compilation failure in mlx5_flow_age.c
---
 drivers/common/mlx5/mlx5_devx_cmds.c |  8 
 drivers/common/mlx5/mlx5_devx_cmds.h |  3 ++-
 drivers/net/mlx5/linux/mlx5_os.c | 13 +
 drivers/net/mlx5/mlx5.h  |  3 ++-
 drivers/net/mlx5/mlx5_flow.c |  3 ++-
 drivers/net/mlx5/mlx5_flow_age.c |  3 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_lm.c |  3 ++-
 drivers/vdpa/mlx5/mlx5_vdpa_mem.c|  3 ++-
 8 files changed, 25 insertions(+), 14 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_devx_cmds.c 
b/drivers/common/mlx5/mlx5_devx_cmds.c
index b792ce1aa3..5998c4b2ff 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.c
+++ b/drivers/common/mlx5/mlx5_devx_cmds.c
@@ -267,10 +267,10 @@ mlx5_devx_cmd_mkey_create(void *ctx,
MLX5_SET(mkc, mkc, pd, attr->pd);
MLX5_SET(mkc, mkc, mkey_7_0, attr->umem_id & 0xFF);
MLX5_SET(mkc, mkc, translations_octword_size, translation_size);
-   if (attr->relaxed_ordering == 1) {
-   MLX5_SET(mkc, mkc, relaxed_ordering_write, 0x1);
-   MLX5_SET(mkc, mkc, relaxed_ordering_read, 0x1);
-   }
+   MLX5_SET(mkc, mkc, relaxed_ordering_write,
+   attr->relaxed_ordering_write);
+   MLX5_SET(mkc, mkc, relaxed_ordering_read,
+   attr->relaxed_ordering_read);
MLX5_SET64(mkc, mkc, start_addr, attr->addr);
MLX5_SET64(mkc, mkc, len, attr->size);
mkey->obj = mlx5_glue->devx_obj_create(ctx, in, in_size_dw * 4, out,
diff --git a/drivers/common/mlx5/mlx5_devx_cmds.h 
b/drivers/common/mlx5/mlx5_devx_cmds.h
index 553b26c0ba..8d66f1dde5 100644
--- a/drivers/common/mlx5/mlx5_devx_cmds.h
+++ b/drivers/common/mlx5/mlx5_devx_cmds.h
@@ -20,7 +20,8 @@ struct mlx5_devx_mkey_attr {
uint32_t pd;
uint32_t log_entity_size;
uint32_t pg_access:1;
-   uint32_t relaxed_ordering:1;
+   uint32_t relaxed_ordering_write:1;
+   uint32_t relaxed_ordering_read:1;
struct mlx5_klm *klm_array;
int klm_num;
 };
diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c
index 79dc65d18e..c78d56fae3 100644
--- a/drivers/net/mlx5/linux/mlx5_os.c
+++ b/drivers/net/mlx5/linux/mlx5_os.c
@@ -1141,10 +1141,15 @@ mlx5_dev_spawn(struct rte_device *dpdk_dev,
}
 #endif /* HAVE_MLX5DV_DR_ACTION_FLOW_HIT */
/* Check relax ordering support. */
-   if (config->hca_attr.relaxed_ordering_write &&
-   config->hca_attr.relaxed_ordering_read  &&
-   !haswell_broadwell_cpu)
-   sh->cmng.relaxed_ordering = 1;
+   if (!haswell_broadwell_cpu) {
+   sh->cmng.relaxed_ordering_write =
+   config->hca_attr.relaxed_ordering_write;
+   sh->cmng.relaxed_ordering_read =
+   config->hca_attr.relaxed_ordering_read;
+   } else {
+   sh->cmng.relaxed_ordering_read = 0;
+   sh->cmng.relaxed_ordering_write = 0;
+   }
/* Check for LRO support. */
if (config->dest_tir && config->hca_attr.lro_cap &&
config->dv_flow_en) {
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 63d263384b..b43a8c9bf7 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -467,7 +467,8 @@ struct mlx5_flow_counter_mng {
uint8_t pending_queries;
uint16_t pool_index;
uint8_t query_thread_on;
-   bool relaxed_ordering;
+   bool relaxed_ordering_read;
+   bool relaxed_ordering_write;
bool counter_fallback; /* Use counter fallback management. */
LIST_HEAD(mem_mngs, mlx5_counter_stats_mem_mng) mem_mngs;
LIST_HEAD(stat_raws, mlx5_counter_stats_raw) free_stat_raws;
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index f9420e7117..8b071a5a4d 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -6658,7 +6658,8 @@ mlx5_flow_create_counter_stat_mem_mng(struct 
mlx5_dev_ctx_shared *sh)
mkey_attr.pg_access = 0;
mkey_attr.klm_array = NULL;
mkey_attr.klm_num = 0;
-   mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
+   mkey_attr

Re: [dpdk-dev] [PATCH v3 00/16] remove mbuf timestamp

2020-11-03 Thread David Marchand
On Tue, Nov 3, 2020 at 1:14 AM Thomas Monjalon  wrote:
>
> The mbuf field timestamp was announced to be removed for three reasons:
>   - a dynamic field already exist, used for Tx only
>   - this field always used 8 bytes even if unneeded
>   - this field is in the first half (cacheline) of mbuf
>
> After this series, the dynamic field timestamp is used for both Rx and Tx
> with separate dynamic flags to distinguish when the value is meaningful
> without resetting the field during forwarding.
>
> As a consequence, 8 bytes can be re-allocated to dynamic fields
> in the first half of mbuf structure.
> It is still open to change more the mbuf layout.
>
> This mbuf layout change is important to allow adding more features
> (consuming more dynamic fields) during the next year,
> and can allow performance improvements with new usages in the first half.

For the series:
Acked-by: David Marchand 


-- 
David Marchand



Re: [dpdk-dev] [PATCH 1/8] regex/octeontx2: fix unnecessary name override

2020-11-03 Thread Thomas Monjalon
03/11/2020 09:19, David Marchand:
> On Tue, Nov 3, 2020 at 1:30 AM Thomas Monjalon  wrote:
> > > > -name = 'octeontx2_regex'
> >
> > But it is not the same?
> >
> > The name will default to "octeontx2", which is fine.
> > But the fmt_name should not take this default.
> > I believe fmt_name should be "octeontx2_regex" as I did in my patch.
> 
> fmt_name is only for maintaining config compat.
> This driver is new to 20.11.
> We can drop fmt_name too.

If we don't set fmt_name, it defaults to "name", "octeontx2" here.
What is the consequence in compat definitions?




Re: [dpdk-dev] [PATCH v2] common/mlx5: split relaxed ordering set for read and write

2020-11-03 Thread Matan Azrad



From: Tal Shnaiderman
> The current DevX implementation of the relaxed ordering feature is enabling
> relaxed ordering usage only if both relaxed ordering read AND write are
> supported.  In that case both relaxed ordering read and write are activated.
> 
> This commit will optimize the usage of relaxed ordering by enabling it when
> the read OR write features are supported.  Each relaxed ordering type will be
> activated according to its own capability bit.
> 
> This will align the DevX flow with the verbs implementation of ibv_reg_mr
> when using the flag IBV_ACCESS_RELAXED_ORDERING
> 
> Fixes: 53ac93f71ad1 ("net/mlx5: create relaxed ordering memory regions")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Tal Shnaiderman 
Acked-by: Matan Azrad 


Re: [dpdk-dev] [PATCH v3 05/16] net/dpaa2: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Hemant Agrawal
Acked-by: Hemant Agrawal 


Re: [dpdk-dev] [PATCH 1/8] regex/octeontx2: fix unnecessary name override

2020-11-03 Thread David Marchand
On Tue, Nov 3, 2020 at 10:06 AM Thomas Monjalon  wrote:
>
> 03/11/2020 09:19, David Marchand:
> > On Tue, Nov 3, 2020 at 1:30 AM Thomas Monjalon  wrote:
> > > > > -name = 'octeontx2_regex'
> > >
> > > But it is not the same?
> > >
> > > The name will default to "octeontx2", which is fine.
> > > But the fmt_name should not take this default.
> > > I believe fmt_name should be "octeontx2_regex" as I did in my patch.
> >
> > fmt_name is only for maintaining config compat.
> > This driver is new to 20.11.
> > We can drop fmt_name too.
>
> If we don't set fmt_name, it defaults to "name", "octeontx2" here.
> What is the consequence in compat definitions?

$ git reset --hard origin/main
HEAD is now at 30cf171352 app/regex: add job context
$ ninja-build -C build >/dev/null 2>&1 && ls -rt
build/drivers/*regex*octeo*.so.21.0 |tail -1; grep OCTEONTX2
build/rte_build_config.h  |grep REG
build/drivers/librte_regex_octeontx2_regex.so.21.0
#define RTE_LIBRTE_OCTEONTX2_REGEX_PMD 1
#define RTE_REGEX_OCTEONTX2_REGEX 1

$ git reset --hard origin/main
HEAD is now at 30cf171352 app/regex: add job context
$ git pw patch apply 81961
Applying: regex/octeontx2: fix driver name
$ ninja-build -C build >/dev/null 2>&1 && ls -rt
build/drivers/*regex*octeo*.so.21.0 |tail -1; grep OCTEONTX2
build/rte_build_config.h  |grep REG
build/drivers/librte_regex_octeontx2.so.21.0
#define RTE_LIBRTE_OCTEONTX2_REGEX_PMD 1
#define RTE_REGEX_OCTEONTX2 1

$ git reset --hard origin/main
HEAD is now at 30cf171352 app/regex: add job context
$ git pw patch apply --no-deps 83425
Applying: regex/octeontx2: fix unnecessary name override
$ ninja-build -C build >/dev/null 2>&1 && ls -rt
build/drivers/*regex*octeo*.so.21.0 |tail -1; grep OCTEONTX2
build/rte_build_config.h  |grep REG
build/drivers/librte_regex_octeontx2.so.21.0
#define RTE_REGEX_OCTEONTX2 1

-- 
David Marchand



Re: [dpdk-dev] [PATCH v3 02/16] mbuf: add Rx timestamp flag and helpers

2020-11-03 Thread Olivier Matz
Hi Thomas,

On Tue, Nov 03, 2020 at 01:13:53AM +0100, Thomas Monjalon wrote:
> There is already a dynamic field for timestamp,
> used only for Tx scheduling with the dedicated Tx offload flag.
> The same field can be used for Rx timestamp filled by drivers.
> 
> A new dynamic flag is defined for Rx usage.
> A new function wraps the registration of both field and Rx flag.
> The type rte_mbuf_timestamp_t is defined for the API users.
> 
> After migrating all Rx timestamp usages, it will be possible
> to remove the deprecated timestamp field.
> 
> Signed-off-by: Thomas Monjalon 
> ---
>  lib/librte_mbuf/rte_mbuf_dyn.c | 43 ++
>  lib/librte_mbuf/rte_mbuf_dyn.h | 33 ++
>  lib/librte_mbuf/version.map|  1 +
>  3 files changed, 72 insertions(+), 5 deletions(-)
> 
> diff --git a/lib/librte_mbuf/rte_mbuf_dyn.c b/lib/librte_mbuf/rte_mbuf_dyn.c
> index 538a43f695..e279b23aea 100644
> --- a/lib/librte_mbuf/rte_mbuf_dyn.c
> +++ b/lib/librte_mbuf/rte_mbuf_dyn.c
> @@ -13,6 +13,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  #include 
>  #include 
>  
> @@ -569,3 +570,45 @@ void rte_mbuf_dyn_dump(FILE *out)
>  
>   rte_mcfg_tailq_write_unlock();
>  }
> +
> +static int
> +rte_mbuf_dyn_timestamp_register(int *field_offset, uint64_t *flag,
> + const char *direction, const char *flag_name)
> +{
> + static const struct rte_mbuf_dynfield field_desc = {
> + .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME,
> + .size = sizeof(rte_mbuf_timestamp_t),
> + .align = __alignof__(rte_mbuf_timestamp_t),
> + };
> + struct rte_mbuf_dynflag flag_desc;
> + int offset;
> +
> + offset = rte_mbuf_dynfield_register(&field_desc);
> + if (offset < 0) {
> + RTE_LOG(ERR, MBUF,
> + "Failed to register mbuf field for timestamp\n");
> + return -1;
> + }
> + if (field_offset != NULL)
> + *field_offset = offset;
> +
> + strlcpy(flag_desc.name, flag_name, sizeof flag_desc.name);

The rest of the flag_desc structure is not initialized to 0 (the "flags"
field).

I suggest to do it at declaration:

struct rte_mbuf_dynflag flag_desc = { 0 };


> + offset = rte_mbuf_dynflag_register(&flag_desc);
> + if (offset < 0) {
> + RTE_LOG(ERR, MBUF,
> + "Failed to register mbuf flag for %s timestamp\n",
> + direction);
> + return -1;
> + }
> + if (flag != NULL)
> + *flag = RTE_BIT64(offset);
> +
> + return 0;
> +}
> +
> +int
> +rte_mbuf_dyn_rx_timestamp_register(int *field_offset, uint64_t *rx_flag)
> +{
> + return rte_mbuf_dyn_timestamp_register(field_offset, rx_flag,
> + "Rx", RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME);
> +}
> diff --git a/lib/librte_mbuf/rte_mbuf_dyn.h b/lib/librte_mbuf/rte_mbuf_dyn.h
> index 0ebac88b83..2e729ddaca 100644
> --- a/lib/librte_mbuf/rte_mbuf_dyn.h
> +++ b/lib/librte_mbuf/rte_mbuf_dyn.h
> @@ -258,13 +258,36 @@ void rte_mbuf_dyn_dump(FILE *out);
>   * timestamp. The dynamic Tx timestamp flag tells whether the field contains
>   * actual timestamp value for the packets being sent, this value can be
>   * used by PMD to schedule packet sending.
> - *
> - * After PKT_RX_TIMESTAMP flag and fixed timestamp field deprecation
> - * and obsoleting, the dedicated Rx timestamp flag is supposed to be
> - * introduced and the shared dynamic timestamp field will be used
> - * to handle the timestamps on receiving datapath as well.
>   */
>  #define RTE_MBUF_DYNFIELD_TIMESTAMP_NAME "rte_dynfield_timestamp"
> +typedef uint64_t rte_mbuf_timestamp_t;
> +
> +/**
> + * Indicate that the timestamp field in the mbuf was filled by the driver.
> + */
> +#define RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME "rte_dynflag_rx_timestamp"
> +
> +/**
> + * @warning
> + * @b EXPERIMENTAL: this API may change without prior notice.
> + *
> + * Register dynamic mbuf field and flag for Rx timestamp.
> + *
> + * @param field_offset
> + *   Pointer to the offset of the registered mbuf field, can be NULL.
> + *   The same field is shared for Rx and Tx timestamp.
> + * @param rx_flag
> + *   Pointer to the mask of the registered offload flag, can be NULL.
> + * @return
> + *   0 on success, -1 otherwise.
> + *   Possible values for rte_errno:
> + *   - EEXIST: already registered with different parameters.
> + *   - EPERM: called from a secondary process.
> + *   - ENOENT: no more field or flag available.
> + *   - ENOMEM: allocation failure.
> + */
> +__rte_experimental
> +int rte_mbuf_dyn_rx_timestamp_register(int *field_offset, uint64_t *rx_flag);
>  
>  /**
>   * When PMD sees the RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME flag set on the
> diff --git a/lib/librte_mbuf/version.map b/lib/librte_mbuf/version.map
> index a011aaead3..0b8bff 100644
> --- a/lib/librte_mbuf/version.map
> +++ b/lib/librte_mbuf/version.map
> @@ -42,6 +42,7 @@ EXPERIMENTAL {
>   rte_mbuf_dy

Re: [dpdk-dev] [RFC PATCH v3 3/6] build: automatic NUMA and cpu counts detection

2020-11-03 Thread Bruce Richardson
On Mon, Nov 02, 2020 at 07:01:44PM +, Honnappa Nagarahalli wrote:
> 
> 
> > >
> > Part of the confusion arises from the fact that originally that was the only
> > parameter set by this - and on x86 it still is. Perhaps this parameter 
> > needs to
> Just wondering, for x86, what does it mean if we set the max_num_cores and 
> max_numa_nodes based on dynamic detection for 'native' build?
> ISA still remains the same as before. But, the build might not work on 
> machines with higher number of cores and numa nodes.
> At the same time, the build also might not work on a machine with a different 
> ISA. The users need to be aware that the target machine has the same ISA and 
> same number of cores/numa nodes as the target machine.
> 
Yes, that is a fair summary.

> > be renamed to "isa-level" or "architecture-flag" or similar to reflect its
> > meaning. This would then allow a new "machine" setting, which can be
> > considered separately. The question then is how much that helps with the
> > main issue under discussion, that of cores and numa node values.
> If we rename it, we will have backward compatibility issue (i.e. 'native' 
> build on x86 will have different meaning and whoever wants the original 
> meaning, have to change to using this new name). Not sure about the 
> complexity in meson scripts.
> 

Yep, it was just a thought to see if it could help in this situation.

> 
> > 
> > > But, I think other DPDK specific parameters should also be considered.
> > > For ex: RTE_MAX_LCORE should have a particular value for 'generic' build 
> > > in
> > all the supported architectures. The value could be different for each
> > architecture, but it is fixed for the 'generic' build for a given 
> > architecture.
> > Otherwise, the 'generic' build might not run on all the machines of that
> > architecture.
> > >
> > > Similarly, for 'native' build, is there any reason not to include other 
> > > DPDK
> > parameters as part of the definition? IMO, 'native' should refer to the 
> > entire
> > build machine, not just the ISA. i.e. build on the target machine.
> > >
> > 
> > While I understand the idea here, it is somewhat complicated by the fact 
> > that
> > the meson options given in "meson_options.txt" cannot be set by meson
> > code, which means that when we change the machine flag to "native" we
> > can only use or ignore the user-provided lcores and numa nodes setting - we
> > have no way to change them and reflect those changes back to the user. :-(
> > This leads to the situation in the discussion in this thread, where we start
> > needing all sorts of magic values to indicate use of machine-type defaults 
> > or
> > detected defaults.
> I am wondering why we need to take the max_num_cores and max_numa_nodes from 
> the user? This option was not provided in the make build system. I ask this 
> question because for 'generic' this has to be a static/known configuration. 
> For cross builds, this info can come (or derived) from the cross build file.
> Was it supposed to be used in conjunction with 'native' build?
> 

Well, it was configurable in the build config files same as all other DPDK
build settings with make. When working first on meson, I felt it was a
setting the user might be likely to want to tune, which is why I put it
into the meson_options.txt and nobody suggested otherwise on review [which
is the reason why many of the current options are the way they are :-)].

>From my side, I have a couple of unknowns:
1. How big a difference in terms of memory use etc. of DPDK does it make by
   having really big values for these core/numa counts? If there is not much
   difference, then there is indeed little value in having them configurable
   at all, and we should just use big defaults and be done with it.
2. If there is a noticable difference in these settings, how many users are
   going to want to actually go to the trouble of tweaking these?
3. How big an effort is it to switch to having these settings made entirely
   dynamic at runtime? Doing so would naturally make the need for these
   settings completely go away.

With all that said, I'd be ok with a number of solutions. I'm ok to have
these dropped as meson options and just have them specified in other ways,
e.g. cross-file, or from meson.build files. [For x86, I'd tend towards
having them defined in rte_config.h inside x86-specific ifdefs].
Alternatively, I'm also happy enough with the proposed scheme here of
allowing user override, with platform defaults using "0"-value and
detection using "-1".

Regards,
/Bruce


Re: [dpdk-dev] [PATCH 1/8] regex/octeontx2: fix unnecessary name override

2020-11-03 Thread David Marchand
On Tue, Nov 3, 2020 at 10:19 AM David Marchand
 wrote:
>
> On Tue, Nov 3, 2020 at 10:06 AM Thomas Monjalon  wrote:
> >
> > 03/11/2020 09:19, David Marchand:
> > > On Tue, Nov 3, 2020 at 1:30 AM Thomas Monjalon  
> > > wrote:
> > > > > > -name = 'octeontx2_regex'
> > > >
> > > > But it is not the same?
> > > >
> > > > The name will default to "octeontx2", which is fine.
> > > > But the fmt_name should not take this default.
> > > > I believe fmt_name should be "octeontx2_regex" as I did in my patch.
> > >
> > > fmt_name is only for maintaining config compat.
> > > This driver is new to 20.11.
> > > We can drop fmt_name too.
> >
> > If we don't set fmt_name, it defaults to "name", "octeontx2" here.
> > What is the consequence in compat definitions?
>

Ok, got it, the problem is when we disable the net/octeontx2 driver.
Your patch correctly sets a RTE_LIBRTE_OCTEONTX2_REGEX_PMD compat
option that is unused but that does not overwrite the
RTE_LIBRTE_OCTEONTX2_PMD compat option (which indicates the presence
of the net/octeontx2 driver).


-- 
David Marchand



[dpdk-dev] [PATCH v3] net/iavf: fix PROT filed for rss hash

2020-11-03 Thread Jeff Guo
Add PROT field into IPv4 and IPv6 protocol headers for rss hash.

Fixes: 91f27b2e39ab ("net/iavf: refactor RSS")
Signed-off-by: Jeff Guo 
---
v3:
handle unexpected PROT clearing when refine l2 hdrs

v2:
add prot into the new hdr replace of function modify
---
 drivers/net/iavf/iavf_hash.c | 56 
 1 file changed, 38 insertions(+), 18 deletions(-)

diff --git a/drivers/net/iavf/iavf_hash.c b/drivers/net/iavf/iavf_hash.c
index b56152c5b8..8a5a6bb5a4 100644
--- a/drivers/net/iavf/iavf_hash.c
+++ b/drivers/net/iavf/iavf_hash.c
@@ -101,11 +101,23 @@ iavf_hash_parse_pattern_action(struct iavf_adapter *ad,
FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST), {BUFF_NOUSED} }
 
+#define proto_hdr_ipv4_with_prot { \
+   VIRTCHNL_PROTO_HDR_IPV4, \
+   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_SRC) | \
+   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_DST) | \
+   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV4_PROT), {BUFF_NOUSED} }
+
 #define proto_hdr_ipv6 { \
VIRTCHNL_PROTO_HDR_IPV6, \
FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST), {BUFF_NOUSED} }
 
+#define proto_hdr_ipv6_with_prot { \
+   VIRTCHNL_PROTO_HDR_IPV6, \
+   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_SRC) | \
+   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_DST) | \
+   FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_IPV6_PROT), {BUFF_NOUSED} }
+
 #define proto_hdr_udp { \
VIRTCHNL_PROTO_HDR_UDP, \
FIELD_SELECTOR(VIRTCHNL_PROTO_HDR_UDP_SRC_PORT) | \
@@ -151,13 +163,15 @@ struct virtchnl_proto_hdrs outer_ipv4_tmplt = {
 
 struct virtchnl_proto_hdrs outer_ipv4_udp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
-   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
+   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+proto_hdr_ipv4_with_prot,
 proto_hdr_udp}
 };
 
 struct virtchnl_proto_hdrs outer_ipv4_tcp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
-   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv4,
+   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+proto_hdr_ipv4_with_prot,
 proto_hdr_tcp}
 };
 
@@ -174,13 +188,15 @@ struct virtchnl_proto_hdrs outer_ipv6_tmplt = {
 
 struct virtchnl_proto_hdrs outer_ipv6_udp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
-   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
+   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+proto_hdr_ipv6_with_prot,
 proto_hdr_udp}
 };
 
 struct virtchnl_proto_hdrs outer_ipv6_tcp_tmplt = {
TUNNEL_LEVEL_OUTER, 5,
-   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan, proto_hdr_ipv6,
+   {proto_hdr_eth, proto_hdr_svlan, proto_hdr_cvlan,
+proto_hdr_ipv6_with_prot,
 proto_hdr_tcp}
 };
 
@@ -195,11 +211,11 @@ struct virtchnl_proto_hdrs inner_ipv4_tmplt = {
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_udp_tmplt = {
-   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_udp}
+   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_udp}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_tcp_tmplt = {
-   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4, proto_hdr_tcp}
+   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv4_with_prot, proto_hdr_tcp}
 };
 
 struct virtchnl_proto_hdrs inner_ipv4_sctp_tmplt = {
@@ -211,11 +227,11 @@ struct virtchnl_proto_hdrs inner_ipv6_tmplt = {
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_udp_tmplt = {
-   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_udp}
+   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_udp}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_tcp_tmplt = {
-   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6, proto_hdr_tcp}
+   TUNNEL_LEVEL_INNER, 2, {proto_hdr_ipv6_with_prot, proto_hdr_tcp}
 };
 
 struct virtchnl_proto_hdrs inner_ipv6_sctp_tmplt = {
@@ -581,14 +597,16 @@ iavf_refine_proto_hdrs_l234(struct virtchnl_proto_hdrs 
*proto_hdrs,
 ETH_RSS_NONFRAG_IPV4_UDP |
 ETH_RSS_NONFRAG_IPV4_TCP |
 ETH_RSS_NONFRAG_IPV4_SCTP)) {
-   if (rss_type & ETH_RSS_L3_SRC_ONLY)
+   if (rss_type & ETH_RSS_L3_SRC_ONLY) {
REFINE_PROTO_FLD(DEL, IPV4_DST);
-   else if (rss_type & ETH_RSS_L3_DST_ONLY)
+   } else if (rss_type & ETH_RSS_L3_DST_ONLY) {
REFINE_PROTO_FLD(DEL, IPV4_SRC);
-   else if (rss_type &
+   } else if (rss_type &
 (ETH_RSS_L4_SRC_ONLY |
- ETH_RSS_L4_DST_ONLY))
-   hdr->field_selector = 0;
+ ETH_RSS_L4_DST_ONLY)) {
+

Re: [dpdk-dev] [PATCH v3 02/16] mbuf: add Rx timestamp flag and helpers

2020-11-03 Thread Thomas Monjalon
03/11/2020 10:33, Olivier Matz:
> On Tue, Nov 03, 2020 at 01:13:53AM +0100, Thomas Monjalon wrote:
> > +static int
> > +rte_mbuf_dyn_timestamp_register(int *field_offset, uint64_t *flag,
> > +   const char *direction, const char *flag_name)
> > +{
> > +   static const struct rte_mbuf_dynfield field_desc = {
> > +   .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME,
> > +   .size = sizeof(rte_mbuf_timestamp_t),
> > +   .align = __alignof__(rte_mbuf_timestamp_t),
> > +   };
> > +   struct rte_mbuf_dynflag flag_desc;
> > +   int offset;
> > +
> > +   offset = rte_mbuf_dynfield_register(&field_desc);
> > +   if (offset < 0) {
> > +   RTE_LOG(ERR, MBUF,
> > +   "Failed to register mbuf field for timestamp\n");
> > +   return -1;
> > +   }
> > +   if (field_offset != NULL)
> > +   *field_offset = offset;
> > +
> > +   strlcpy(flag_desc.name, flag_name, sizeof flag_desc.name);
> 
> The rest of the flag_desc structure is not initialized to 0 (the "flags"
> field).
> 
> I suggest to do it at declaration:
> 
>   struct rte_mbuf_dynflag flag_desc = { 0 };

Yes I forgot, thanks for catching.





Re: [dpdk-dev] [PATCH] eal: fix incorrect API doc for power intrinsics

2020-11-03 Thread Liang, Ma
On 02 Nov 12:57, Anatoly Burakov wrote:
> Currently, the intrinsics documentation refers to `rte_cpu_get_features`
> as a check for whether these intrinsics are supported at runtime. This
> is incorrect, because actually the user should use the
> `rte_cpu_get_intrinsics_support` API to do said check. Fix the typo.
> 
> Fixes: 128021421256 ("eal: add intrinsics support check infrastructure")
> Cc: liang.j...@intel.com
> 
> Signed-off-by: Anatoly Burakov 
Acked-by: Liang Ma 
> ---
>  lib/librte_eal/include/generic/rte_power_intrinsics.h | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/lib/librte_eal/include/generic/rte_power_intrinsics.h 
> b/lib/librte_eal/include/generic/rte_power_intrinsics.h
> index 9622c7f9ce..dd520d90fa 100644
> --- a/lib/librte_eal/include/generic/rte_power_intrinsics.h
> +++ b/lib/librte_eal/include/generic/rte_power_intrinsics.h
> @@ -33,7 +33,7 @@
>   * optimized power state may be aborted.
>   *
>   * @warning It is responsibility of the user to check if this function is
> - *   supported at runtime using `rte_cpu_get_features()` API call.
> + *   supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
>   *   Failing to do so may result in an illegal CPU instruction error.
>   *
>   * @param p
> @@ -74,7 +74,7 @@ static inline void rte_power_monitor(const volatile void *p,
>   * waking up the CPU.
>   *
>   * @warning It is responsibility of the user to check if this function is
> - *   supported at runtime using `rte_cpu_get_features()` API call.
> + *   supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
>   *   Failing to do so may result in an illegal CPU instruction error.
>   *
>   * @param p
> @@ -110,7 +110,7 @@ static inline void rte_power_monitor_sync(const volatile 
> void *p,
>   * timestamp is reached.
>   *
>   * @warning It is responsibility of the user to check if this function is
> - *   supported at runtime using `rte_cpu_get_features()` API call.
> + *   supported at runtime using `rte_cpu_get_intrinsics_support()` API call.
>   *   Failing to do so may result in an illegal CPU instruction error.
>   *
>   * @param tsc_timestamp
> -- 
> 2.17.1


Re: [dpdk-dev] [pull-request] next-eventdev 20.11 RC2

2020-11-03 Thread Thomas Monjalon
02/11/2020 16:57, McDaniel, Timothy:
> From: David Marchand 
> > On Mon, Nov 2, 2020 at 3:31 PM Jerin Jacob Kollanukkaran
> >  wrote:
> > >
> > > The following changes since commit
> > 79d69c6dcf0debea38ac258d230e2f8c93e5ad12:
> > >
> > >   mbuf: remove seqn field (2020-10-31 22:14:44 +0100)
> > >
> > > are available in the Git repository at:
> > >
> > >   http://dpdk.org/git/next/dpdk-next-eventdev
> > >
> > > for you to fetch changes up to
> > e06cd1ddfeac30b4926a52a69336b4f25a9cd209:
> > >
> > >   eventdev: check input parameter for dump op (2020-11-02 19:16:51 +0530)
> > >
> > > 
> > > David Marchand (1):
> > >   eventdev: check input parameter for dump op
> > 
> > About this patch, could we squash some change in the dlb/dlb2 drivers?
> > I guess Thomas could do it while pulling if nobody objects?
> > 
> > 
> > diff --git a/drivers/event/dlb/dlb_xstats.c b/drivers/event/dlb/dlb_xstats.c
> > index 89186d506e..5f4c590307 100644
> > --- a/drivers/event/dlb/dlb_xstats.c
> > +++ b/drivers/event/dlb/dlb_xstats.c
> > @@ -921,11 +921,6 @@ dlb_eventdev_dump(struct rte_eventdev *dev, FILE *f)
> > struct dlb_hw_dev *handle;
> > int i;
> > 
> > -   if (f == NULL) {
> > -   DLB_LOG_ERR("Invalid file pointer\n");
> > -   return;
> > -   }
> > -
> > dlb = dlb_pmd_priv(dev);
> > 
> > if (dlb == NULL) {
> > diff --git a/drivers/event/dlb2/dlb2_xstats.c 
> > b/drivers/event/dlb2/dlb2_xstats.c
> > index a274779fee..8c3c3cda94 100644
> > --- a/drivers/event/dlb2/dlb2_xstats.c
> > +++ b/drivers/event/dlb2/dlb2_xstats.c
> > @@ -972,11 +972,6 @@ dlb2_eventdev_dump(struct rte_eventdev *dev, FILE
> > *f)
> > struct dlb2_hw_dev *handle;
> > int i;
> > 
> > -   if (f == NULL) {
> > -   DLB2_LOG_ERR("Invalid file pointer\n");
> > -   return;
> > -   }
> > -
> > dlb2 = dlb2_pmd_priv(dev);
> > 
> > if (dlb2 == NULL) {
> > 
> > 
> > 
> > --
> > David Marchand
> 
> I am fine with Thomas pulling these changes in.

OK will do, after moving David's patch at first position.




Re: [dpdk-dev] [kmods PATCH] windows: normalize line-endings

2020-11-03 Thread Luca Boccassi
On Tue, 2020-11-03 at 10:42 +0300, Dmitry Kozlyuk wrote:
> Hi Naty,
> 
> > Doesn't the 'input' option gets you original (unmodified) line endings on 
> > checkout?
> > If a file in the repo has CRLF endings, Unix devs will get it as-is (with 
> > CRLF), right?
> 
> The "input" option is for Unix devs only.
> 
> > Regardless, this is the best option for Unix and since we're normalizing 
> > line endings for 
> > existing files, Unix devs should get LF on checkout.
> > I will send a patch to normalize netuio as well.
> 
> AFAIK, you don't need to. Whatever you settings were when committing netuio,
> it now checks out with CRLF on Windows (core.autocrlf=true, installer
> default) and with LF on Linux (core.autocrlf=input, although unset also
> works). I'd wait for confirmation from Luca, though.

Hi,

Thank you for looking into this. Yes the netuio folder is ok as-is:

$ dos2unix -id windows/netuio/netuio.vcxproj* windows/netuio/netuio.sln
   0  windows/netuio/netuio.vcxproj
   0  windows/netuio/netuio.vcxproj.filters
   0  windows/netuio/netuio.sln

The patch looks good to me, as I really need all files to be checked in
with LF only as a hard requirement before I can upload to
Debian/Ubuntu, so:

Acked-by: Luca Boccassi 

Does the change also stop future check-ins of CRLF files?

-- 
Kind regards,
Luca Boccassi


[dpdk-dev] [PATCH 01/37] net/txgbe: add ntuple filter init and uninit

2020-11-03 Thread Jiawen Wu
Add ntuple filter init and uninit.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_type.h |  1 +
 drivers/net/txgbe/txgbe_ethdev.c| 28 
 drivers/net/txgbe/txgbe_ethdev.h| 33 +
 3 files changed, 62 insertions(+)

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index b322a2cac..69aa8993a 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -15,6 +15,7 @@
 #define TXGBE_FRAME_SIZE_DFT   (1518) /* Default frame size, +FCS */
 #define TXGBE_NUM_POOL (64)
 #define TXGBE_PBTXSIZE_MAX 0x00028000 /* 160KB Packet Buffer */
+#define TXGBE_MAX_FTQF_FILTERS 128
 #define TXGBE_TXPKT_SIZE_MAX   0xA /* Max Tx Packet size */
 #define TXGBE_MAX_UP   8
 #define TXGBE_MAX_QP   (128)
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 189caf2e9..08f19a2ef 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -470,6 +470,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
const struct rte_memzone *mz;
@@ -677,6 +678,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* enable support intr */
txgbe_enable_intr(eth_dev);
 
+   /* initialize filter info */
+   memset(filter_info, 0,
+  sizeof(struct txgbe_filter_info));
+
+   /* initialize 5tuple filter list */
+   TAILQ_INIT(&filter_info->fivetuple_list);
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
@@ -696,6 +704,23 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
return 0;
 }
 
+static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+   struct txgbe_5tuple_filter *p_5tuple;
+
+   while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+   TAILQ_REMOVE(&filter_info->fivetuple_list,
+p_5tuple,
+entries);
+   rte_free(p_5tuple);
+   }
+   memset(filter_info->fivetuple_mask, 0,
+  sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
+
+   return 0;
+}
+
 static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
@@ -1774,6 +1799,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
 
+   /* Remove all ntuple filters of the device */
+   txgbe_ntuple_filter_uninit(dev);
+
return ret;
 }
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2c3680218..60687bc49 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -110,6 +110,36 @@ struct txgbe_vf_info {
uint16_t mac_count;
 };
 
+TAILQ_HEAD(txgbe_5tuple_filter_list, txgbe_5tuple_filter);
+
+struct txgbe_5tuple_filter_info {
+   uint32_t dst_ip;
+   uint32_t src_ip;
+   uint16_t dst_port;
+   uint16_t src_port;
+   enum txgbe_5tuple_protocol proto;/* l4 protocol. */
+   uint8_t priority;/* seven levels (001b-111b), 111b is highest,
+ * used when more than one filter matches.
+ */
+   uint8_t dst_ip_mask:1,   /* if mask is 1b, do not compare dst ip. */
+   src_ip_mask:1,   /* if mask is 1b, do not compare src ip. */
+   dst_port_mask:1, /* if mask is 1b, do not compare dst port. */
+   src_port_mask:1, /* if mask is 1b, do not compare src port. */
+   proto_mask:1;/* if mask is 1b, do not compare protocol. */
+};
+
+/* 5tuple filter structure */
+struct txgbe_5tuple_filter {
+   TAILQ_ENTRY(txgbe_5tuple_filter) entries;
+   uint16_t index;   /* the index of 5tuple filter */
+   struct txgbe_5tuple_filter_info filter_info;
+   uint16_t queue;   /* rx queue assigned to */
+};
+
+#define TXGBE_5TUPLE_ARRAY_SIZE \
+   (RTE_ALIGN(TXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
+(sizeof(uint32_t) * NBBY))
+
 struct txgbe_ethertype_filter {
uint16_t ethertype;
uint32_t etqf;
@@ -128,6 +158,9 @@ struct txgbe_filter_info {
uint8_t ethertype_mask;  /* Bit mask for every used ethertype filter */
/* store used ethertype f

[dpdk-dev] [PATCH 00/37] net: add txgbe PMD part 2

2020-11-03 Thread Jiawen Wu
Add the remaining part of txgbe PMD.
Support include flow API, traffic manager, macsec and ipsec.

Jiawen Wu (37):
  net/txgbe: add ntuple filter init and uninit
  net/txgbe: support ntuple filter add and delete
  net/txgbe: add ntuple parse rule
  net/txgbe: support ntuple filter remove operaion
  net/txgbe: support ethertype filter add and delete
  net/txgbe: add ethertype parse rule
  net/txgbe: support syn filter add and delete
  net/txgbe: add syn filter parse rule
  net/txgbe: add L2 tunnel filter init and uninit
  net/txgbe: config L2 tunnel filter with e-tag
  net/txgbe: support L2 tunnel filter add and delete
  net/txgbe: add L2 tunnel filter parse rule
  net/txgbe: add FDIR filter init and uninit.
  net/txgbe: configure FDIR filter
  net/txgbe: support FDIR add and delete operations
  net/txgbe: add FDIR parse normal rule
  net/txgbe: add FDIR parse tunnel rule
  net/txgbe: add FDIR restore operation
  net/txgbe: add RSS filter parse rule
  net/txgbe: add RSS filter restore operation
  net/txgbe: add filter list init and uninit
  net/txgbe: add flow API
  net/txgbe: add flow API create function
  net/txgbe: add flow API destroy function
  net/txgbe: add flow API flush function
  net/txgbe: support UDP tunnel port add and delete
  net/txgbe: add TM configuration init and uninit
  net/txgbe: add TM capabilities get operation
  net/txgbe: support TM shaper profile add and delete
  net/txgbe: support TM node add and delete
  net/txgbe: add TM hierarchy commit
  net/txgbe: add macsec setting
  net/txgbe: add IPsec context creation
  net/txgbe: add security session create operation
  net/txgbe: support security session destroy
  net/txgbe: add security offload in Rx and Tx process
  net/txgbe: add security type in flow action

 doc/guides/nics/features/txgbe.ini  |3 +
 doc/guides/nics/txgbe.rst   |1 +
 drivers/net/txgbe/base/txgbe_hw.c   |   87 +
 drivers/net/txgbe/base/txgbe_hw.h   |1 +
 drivers/net/txgbe/base/txgbe_type.h |   65 +
 drivers/net/txgbe/meson.build   |6 +-
 drivers/net/txgbe/txgbe_ethdev.c| 1244 +++
 drivers/net/txgbe/txgbe_ethdev.h|  316 +++
 drivers/net/txgbe/txgbe_fdir.c  |  985 +
 drivers/net/txgbe/txgbe_flow.c  | 3172 +++
 drivers/net/txgbe/txgbe_ipsec.c |  734 +++
 drivers/net/txgbe/txgbe_ipsec.h |   98 +
 drivers/net/txgbe/txgbe_rxtx.c  |  192 +-
 drivers/net/txgbe/txgbe_rxtx.h  |   13 +
 drivers/net/txgbe/txgbe_tm.c| 1022 +
 15 files changed, 7936 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/txgbe/txgbe_fdir.c
 create mode 100644 drivers/net/txgbe/txgbe_flow.c
 create mode 100644 drivers/net/txgbe/txgbe_ipsec.c
 create mode 100644 drivers/net/txgbe/txgbe_ipsec.h
 create mode 100644 drivers/net/txgbe/txgbe_tm.c

-- 
2.18.4





[dpdk-dev] [PATCH 02/37] net/txgbe: support ntuple filter add and delete

2020-11-03 Thread Jiawen Wu
Support add and delete operations on ntuple filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 310 +++
 drivers/net/txgbe/txgbe_ethdev.h |   6 +
 2 files changed, 316 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 08f19a2ef..fe32194a7 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -109,6 +109,8 @@ static void txgbe_dev_interrupt_handler(void *param);
 static void txgbe_dev_interrupt_delayed_handler(void *param);
 static void txgbe_configure_msix(struct rte_eth_dev *dev);
 
+static int txgbe_filter_restore(struct rte_eth_dev *dev);
+
 #define TXGBE_SET_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
@@ -1611,6 +1613,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 
/* resume enabled intr since hw reset */
txgbe_enable_intr(dev);
+   txgbe_filter_restore(dev);
 
/*
 * Update link status right before return, because it may
@@ -3693,6 +3696,293 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
 }
 
+static inline enum txgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+   if (protocol_value == IPPROTO_TCP)
+   return TXGBE_5TF_PROT_TCP;
+   else if (protocol_value == IPPROTO_UDP)
+   return TXGBE_5TF_PROT_UDP;
+   else if (protocol_value == IPPROTO_SCTP)
+   return TXGBE_5TF_PROT_SCTP;
+   else
+   return TXGBE_5TF_PROT_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+  struct txgbe_5tuple_filter *filter)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   int i;
+   uint32_t ftqf, sdpqf;
+   uint32_t l34timir = 0;
+   uint32_t mask = TXGBE_5TFCTL0_MASK;
+
+   i = filter->index;
+   sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
+   sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
+
+   ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
+   ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
+   if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+   mask &= ~TXGBE_5TFCTL0_MSADDR;
+   if (filter->filter_info.dst_ip_mask == 0)
+   mask &= ~TXGBE_5TFCTL0_MDADDR;
+   if (filter->filter_info.src_port_mask == 0)
+   mask &= ~TXGBE_5TFCTL0_MSPORT;
+   if (filter->filter_info.dst_port_mask == 0)
+   mask &= ~TXGBE_5TFCTL0_MDPORT;
+   if (filter->filter_info.proto_mask == 0)
+   mask &= ~TXGBE_5TFCTL0_MPROTO;
+   ftqf |= mask;
+   ftqf |= TXGBE_5TFCTL0_MPOOL;
+   ftqf |= TXGBE_5TFCTL0_ENA;
+
+   wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
+   wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
+   wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+   wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+
+   l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+   wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *- On success, zero.
+ *- On failure, a negative value.
+ */
+static int
+txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+   struct txgbe_5tuple_filter *filter)
+{
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   int i, idx, shift;
+
+   /*
+* look for an unused 5tuple filter index,
+* and insert the filter to list.
+*/
+   for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
+   idx = i / (sizeof(uint32_t) * NBBY);
+   shift = i % (sizeof(uint32_t) * NBBY);
+   if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+   filter_info->fivetuple_mask[idx] |= 1 << shift;
+   filter->index = i;
+   TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+   break;
+   }
+   }
+   if (i >= TXGBE_MAX_FTQF_FILTERS) {
+   PMD_DRV_LOG(ERR, "5tuple filters are full.");
+   return -ENOSYS;
+   }
+
+   txgbe_inject_5tuple_filter(dev, filter);
+
+   return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+   struct txgbe_5tuple_filter *filte

[dpdk-dev] [PATCH 05/37] net/txgbe: support ethertype filter add and delete

2020-11-03 Thread Jiawen Wu
Support add and delete operaions on ethertype filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 111 +++
 drivers/net/txgbe/txgbe_ethdev.h |  19 ++
 2 files changed, 130 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 1804802e5..b479c9152 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -3983,6 +3983,77 @@ txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+int
+txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+   struct rte_eth_ethertype_filter *filter,
+   bool add)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   uint32_t etqf = 0;
+   uint32_t etqs = 0;
+   int ret;
+   struct txgbe_ethertype_filter ethertype_filter;
+
+   if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+   return -EINVAL;
+
+   if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+   filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+   PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+   " ethertype filter.", filter->ether_type);
+   return -EINVAL;
+   }
+
+   if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+   PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+   return -EINVAL;
+   }
+   if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+   PMD_DRV_LOG(ERR, "drop option is unsupported.");
+   return -EINVAL;
+   }
+
+   ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+   if (ret >= 0 && add) {
+   PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+   filter->ether_type);
+   return -EEXIST;
+   }
+   if (ret < 0 && !add) {
+   PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+   filter->ether_type);
+   return -ENOENT;
+   }
+
+   if (add) {
+   etqf = TXGBE_ETFLT_ENA;
+   etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+   etqs |= TXGBE_ETCLS_QPID(filter->queue);
+   etqs |= TXGBE_ETCLS_QENA;
+
+   ethertype_filter.ethertype = filter->ether_type;
+   ethertype_filter.etqf = etqf;
+   ethertype_filter.etqs = etqs;
+   ethertype_filter.conf = FALSE;
+   ret = txgbe_ethertype_filter_insert(filter_info,
+   ðertype_filter);
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR, "ethertype filters are full.");
+   return -ENOSPC;
+   }
+   } else {
+   ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+   if (ret < 0)
+   return -ENOSYS;
+   }
+   wr32(hw, TXGBE_ETFLT(ret), etqf);
+   wr32(hw, TXGBE_ETCLS(ret), etqs);
+   txgbe_flush(hw);
+
+   return 0;
+}
+
 static u8 *
 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
@@ -4516,10 +4587,30 @@ txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
}
 }
 
+/* restore ethernet type filter */
+static inline void
+txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   int i;
+
+   for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+   if (filter_info->ethertype_mask & (1 << i)) {
+   wr32(hw, TXGBE_ETFLT(i),
+   filter_info->ethertype_filters[i].etqf);
+   wr32(hw, TXGBE_ETCLS(i),
+   filter_info->ethertype_filters[i].etqs);
+   txgbe_flush(hw);
+   }
+   }
+}
+
 static int
 txgbe_filter_restore(struct rte_eth_dev *dev)
 {
txgbe_ntuple_filter_restore(dev);
+   txgbe_ethertype_filter_restore(dev);
 
return 0;
 }
@@ -4535,6 +4626,26 @@ txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
txgbe_remove_5tuple_filter(dev, p_5tuple);
 }
 
+/* remove all the ether type filters */
+void
+txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   int i;
+
+   for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+   if (filter_info->ethertype_mask & (1 << i) &&
+   !filter_info->ethertype_filters[i].conf) {
+   (void)txgbe_ethertype_filter_remove(filter_info,
+   (uint8_t)i);
+   wr32(hw, TXGBE_ETFLT(i), 0)

[dpdk-dev] [PATCH 04/37] net/txgbe: support ntuple filter remove operaion

2020-11-03 Thread Jiawen Wu
Support remove operation on ntuple filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 11 +++
 drivers/net/txgbe/txgbe_ethdev.h |  2 ++
 2 files changed, 13 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index fe32194a7..1804802e5 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4524,6 +4524,17 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
return 0;
 }
 
+/* remove all the n-tuple filters */
+void
+txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
+{
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   struct txgbe_5tuple_filter *p_5tuple;
+
+   while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
+   txgbe_remove_5tuple_filter(dev, p_5tuple);
+}
+
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure  = txgbe_dev_configure,
.dev_infos_get  = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index caccf342d..c82f0c832 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -337,6 +337,8 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
 
 uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
+void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+
 int txgbe_vt_check(struct txgbe_hw *hw);
 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
-- 
2.18.4





[dpdk-dev] [PATCH 03/37] net/txgbe: add ntuple parse rule

2020-11-03 Thread Jiawen Wu
Add support to parse flow for ntuple filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/meson.build  |   1 +
 drivers/net/txgbe/txgbe_flow.c | 536 +
 2 files changed, 537 insertions(+)
 create mode 100644 drivers/net/txgbe/txgbe_flow.c

diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 345dffaf6..45379175d 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -6,6 +6,7 @@ objs = [base_objs]
 
 sources = files(
'txgbe_ethdev.c',
+   'txgbe_flow.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
new file mode 100644
index 0..6f8be3b7f
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -0,0 +1,536 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_rxtx.h"
+
+#define TXGBE_MIN_N_TUPLE_PRIO 1
+#define TXGBE_MAX_N_TUPLE_PRIO 7
+
+/**
+ * Endless loop will never happen with below assumption
+ * 1. there is at least one no-void item(END)
+ * 2. cur is before END.
+ */
+static inline
+const struct rte_flow_item *next_no_void_pattern(
+   const struct rte_flow_item pattern[],
+   const struct rte_flow_item *cur)
+{
+   const struct rte_flow_item *next =
+   cur ? cur + 1 : &pattern[0];
+   while (1) {
+   if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
+   return next;
+   next++;
+   }
+}
+
+static inline
+const struct rte_flow_action *next_no_void_action(
+   const struct rte_flow_action actions[],
+   const struct rte_flow_action *cur)
+{
+   const struct rte_flow_action *next =
+   cur ? cur + 1 : &actions[0];
+   while (1) {
+   if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
+   return next;
+   next++;
+   }
+}
+
+/**
+ * Please aware there's an asumption for all the parsers.
+ * rte_flow_item is using big endian, rte_flow_attr and
+ * rte_flow_action are using CPU order.
+ * Because the pattern is used to describe the packets,
+ * normally the packets should use network order.
+ */
+
+/**
+ * Parse the rule to see if it is a n-tuple rule.
+ * And get the n-tuple filter info BTW.
+ * pattern:
+ * The first not void item can be ETH or IPV4.
+ * The second not void item must be IPV4 if the first one is ETH.
+ * The third not void item must be UDP or TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEMSpecMask
+ * ETH NULLNULL
+ * IPV4src_addr 192.168.1.20   0x
+ * dst_addr 192.167.3.50   0x
+ * next_proto_id   17  0xFF
+ * UDP/TCP/src_port80  0x
+ * SCTPdst_port80  0x
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
+const struct rte_flow_item pattern[],
+const struct rte_flow_action actions[],
+struct rte_eth_ntuple_filter *filter,
+struct rte_flow_error *error)
+{
+   const struct rte_flow_item *item;
+   const struct rte_flow_action *act;
+   const struct rte_flow_item_ipv4 *ipv4_spec;
+   const struct rte_flow_item_ipv4 *ipv4_mask;
+   const struct rte_flow_item_tcp *tcp_spec;
+   const struct rte_flow_item_tcp *tcp_mask;
+   const struct rte_flow_item_udp *udp_spec;
+   const struct rte_flow_item_udp *udp_mask;
+   const struct rte_flow_item_sctp *sctp_spec;
+   const struct rte_flow_item_sctp *sctp_mask;
+   const struct rte_flow_item_eth *eth_spec;
+   const struct rte_flow_item_eth *eth_mask;
+   const struct rte_flow_item_vlan *vlan_spec;
+   const struct rte_flow_item_vlan *vlan_mask;
+   struct rte_flow_item_eth eth_null;
+   struct rte_flow_item_vlan vlan_null;
+
+   if (!pattern) {
+   rte_flow_error_set(error,
+   EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+   NULL, "NULL pattern.");
+   return -rte_errno;
+   }
+
+   if (!actions) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+  NULL, "NULL action.");
+   return -rte_errno;
+   }
+   if (!attr) {
+   rte_

[dpdk-dev] [PATCH 08/37] net/txgbe: add syn filter parse rule

2020-11-03 Thread Jiawen Wu
Add support to parse flow for syn filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_flow.c | 256 +
 1 file changed, 256 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index fc2505ddc..7110e594b 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -784,3 +784,259 @@ txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+/**
+ * Parse the rule to see if it is a TCP SYN rule.
+ * And get the TCP SYN filter info BTW.
+ * pattern:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4 or IPV6.
+ * The third not void item must be TCP.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEMSpecMask
+ * ETH NULLNULL
+ * IPV4/IPV6   NULLNULL
+ * TCP tcp_flags   0x020xFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_syn_filter(const struct rte_flow_attr *attr,
+   const struct rte_flow_item pattern[],
+   const struct rte_flow_action actions[],
+   struct rte_eth_syn_filter *filter,
+   struct rte_flow_error *error)
+{
+   const struct rte_flow_item *item;
+   const struct rte_flow_action *act;
+   const struct rte_flow_item_tcp *tcp_spec;
+   const struct rte_flow_item_tcp *tcp_mask;
+   const struct rte_flow_action_queue *act_q;
+
+   if (!pattern) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+   NULL, "NULL pattern.");
+   return -rte_errno;
+   }
+
+   if (!actions) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+   NULL, "NULL action.");
+   return -rte_errno;
+   }
+
+   if (!attr) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR,
+  NULL, "NULL attribute.");
+   return -rte_errno;
+   }
+
+
+   /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
+   item = next_no_void_pattern(pattern, NULL);
+   if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+   item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+   item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+   item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by syn filter");
+   return -rte_errno;
+   }
+   /*Not supported last point for range*/
+   if (item->last) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+   item, "Not supported last point for range");
+   return -rte_errno;
+   }
+
+   /* Skip Ethernet */
+   if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
+   /* if the item is MAC, the content should be NULL */
+   if (item->spec || item->mask) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Invalid SYN address mask");
+   return -rte_errno;
+   }
+
+   /* check if the next not void item is IPv4 or IPv6 */
+   item = next_no_void_pattern(pattern, item);
+   if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+   item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by syn filter");
+   return -rte_errno;
+   }
+   }
+
+   /* Skip IP */
+   if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+   item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
+   /* if the item is IP, the content should be NULL */
+   if (item->spec || item->mask) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Invalid SYN mask");
+   return -rte_errno;
+   }
+
+   /* check if the next not void item is TCP */
+   item = next_no_void_pattern(pattern, item);
+   if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
+   rte_flow_error_set(error, EINVAL,

[dpdk-dev] [PATCH 07/37] net/txgbe: support syn filter add and delete

2020-11-03 Thread Jiawen Wu
Support add and delete operations on syn filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 70 
 drivers/net/txgbe/txgbe_ethdev.h |  6 +++
 2 files changed, 76 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index b479c9152..e2599e429 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -3696,6 +3696,44 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
return 0;
 }
 
+int
+txgbe_syn_filter_set(struct rte_eth_dev *dev,
+   struct rte_eth_syn_filter *filter,
+   bool add)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   uint32_t syn_info;
+   uint32_t synqf;
+
+   if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+   return -EINVAL;
+
+   syn_info = filter_info->syn_info;
+
+   if (add) {
+   if (syn_info & TXGBE_SYNCLS_ENA)
+   return -EINVAL;
+   synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+   synqf |= TXGBE_SYNCLS_ENA;
+
+   if (filter->hig_pri)
+   synqf |= TXGBE_SYNCLS_HIPRIO;
+   else
+   synqf &= ~TXGBE_SYNCLS_HIPRIO;
+   } else {
+   synqf = rd32(hw, TXGBE_SYNCLS);
+   if (!(syn_info & TXGBE_SYNCLS_ENA))
+   return -ENOENT;
+   synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
+   }
+
+   filter_info->syn_info = synqf;
+   wr32(hw, TXGBE_SYNCLS, synqf);
+   txgbe_flush(hw);
+   return 0;
+}
+
 static inline enum txgbe_5tuple_protocol
 convert_protocol_type(uint8_t protocol_value)
 {
@@ -4606,11 +4644,28 @@ txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
}
 }
 
+/* restore SYN filter */
+static inline void
+txgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+   uint32_t synqf;
+
+   synqf = filter_info->syn_info;
+
+   if (synqf & TXGBE_SYNCLS_ENA) {
+   wr32(hw, TXGBE_SYNCLS, synqf);
+   txgbe_flush(hw);
+   }
+}
+
 static int
 txgbe_filter_restore(struct rte_eth_dev *dev)
 {
txgbe_ntuple_filter_restore(dev);
txgbe_ethertype_filter_restore(dev);
+   txgbe_syn_filter_restore(dev);
 
return 0;
 }
@@ -4646,6 +4701,21 @@ txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev)
}
 }
 
+/* remove the SYN filter */
+void
+txgbe_clear_syn_filter(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+   if (filter_info->syn_info & TXGBE_SYNCLS_ENA) {
+   filter_info->syn_info = 0;
+
+   wr32(hw, TXGBE_SYNCLS, 0);
+   txgbe_flush(hw);
+   }
+}
+
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure  = txgbe_dev_configure,
.dev_infos_get  = txgbe_dev_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index d89af2150..bb53dd74a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -164,6 +164,8 @@ struct txgbe_filter_info {
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[TXGBE_5TUPLE_ARRAY_SIZE];
struct txgbe_5tuple_filter_list fivetuple_list;
+   /* store the SYN filter info */
+   uint32_t syn_info;
 };
 
 /* The configuration of bandwidth */
@@ -320,6 +322,9 @@ int txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
 int txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
struct rte_eth_ethertype_filter *filter,
bool add);
+int txgbe_syn_filter_set(struct rte_eth_dev *dev,
+   struct rte_eth_syn_filter *filter,
+   bool add);
 
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
   uint8_t queue, uint8_t msix_vector);
@@ -343,6 +348,7 @@ uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, 
uint32_t orig_val);
 
 void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
 void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
 
 int txgbe_vt_check(struct txgbe_hw *hw);
 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
-- 
2.18.4





[dpdk-dev] [PATCH 10/37] net/txgbe: config L2 tunnel filter with e-tag

2020-11-03 Thread Jiawen Wu
Config L2 tunnel filter with e-tag.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 63 
 1 file changed, 63 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 79f1f9535..0be894b7b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -112,6 +112,7 @@ static void txgbe_dev_interrupt_delayed_handler(void 
*param);
 static void txgbe_configure_msix(struct rte_eth_dev *dev);
 
 static int txgbe_filter_restore(struct rte_eth_dev *dev);
+static void txgbe_l2_tunnel_conf(struct rte_eth_dev *dev);
 
 #define TXGBE_SET_HWSTRIP(h, q) do {\
uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
@@ -1675,6 +1676,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 
/* resume enabled intr since hw reset */
txgbe_enable_intr(dev);
+   txgbe_l2_tunnel_conf(dev);
txgbe_filter_restore(dev);
 
/*
@@ -4678,6 +4680,52 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
return 0;
 }
 
+/* Update e-tag ether type */
+static int
+txgbe_update_e_tag_eth_type(struct txgbe_hw *hw,
+   uint16_t ether_type)
+{
+   uint32_t etag_etype;
+
+   etag_etype = rd32(hw, TXGBE_EXTAG);
+   etag_etype &= ~TXGBE_EXTAG_ETAG_MASK;
+   etag_etype |= ether_type;
+   wr32(hw, TXGBE_EXTAG, etag_etype);
+   txgbe_flush(hw);
+
+   return 0;
+}
+
+/* Enable e-tag tunnel */
+static int
+txgbe_e_tag_enable(struct txgbe_hw *hw)
+{
+   uint32_t etag_etype;
+
+   etag_etype = rd32(hw, TXGBE_PORTCTL);
+   etag_etype |= TXGBE_PORTCTL_ETAG;
+   wr32(hw, TXGBE_PORTCTL, etag_etype);
+   txgbe_flush(hw);
+
+   return 0;
+}
+
+static int
+txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+   int ret = 0;
+   uint32_t ctrl;
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+   ctrl = rd32(hw, TXGBE_POOLCTL);
+   ctrl &= ~TXGBE_POOLCTL_MODE_MASK;
+   if (en)
+   ctrl |= TXGBE_PSRPOOL_MODE_ETAG;
+   wr32(hw, TXGBE_POOLCTL, ctrl);
+
+   return ret;
+}
+
 /* restore n-tuple filter */
 static inline void
 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -4735,6 +4783,21 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
return 0;
 }
 
+static void
+txgbe_l2_tunnel_conf(struct rte_eth_dev *dev)
+{
+   struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+   if (l2_tn_info->e_tag_en)
+   (void)txgbe_e_tag_enable(hw);
+
+   if (l2_tn_info->e_tag_fwd_en)
+   (void)txgbe_e_tag_forwarding_en_dis(dev, 1);
+
+   (void)txgbe_update_e_tag_eth_type(hw, l2_tn_info->e_tag_ether_type);
+}
+
 /* remove all the n-tuple filters */
 void
 txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev)
-- 
2.18.4





[dpdk-dev] [PATCH 13/37] net/txgbe: add FDIR filter init and uninit.

2020-11-03 Thread Jiawen Wu
Add flow director filter init and uninit operations.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_type.h | 29 +
 drivers/net/txgbe/txgbe_ethdev.c| 63 +
 drivers/net/txgbe/txgbe_ethdev.h| 51 +++
 3 files changed, 143 insertions(+)

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index 69aa8993a..b9d31ab83 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -67,6 +67,35 @@ enum {
 
 #define TXGBE_ATR_HASH_MASK0x7fff
 
+/* Flow Director ATR input struct. */
+struct txgbe_atr_input {
+   /*
+* Byte layout in order, all values with MSB first:
+*
+* vm_pool  - 1 byte
+* flow_type- 1 byte
+* vlan_id  - 2 bytes
+* src_ip   - 16 bytes
+* inner_mac- 6 bytes
+* cloud_mode   - 2 bytes
+* tni_vni  - 4 bytes
+* dst_ip   - 16 bytes
+* src_port - 2 bytes
+* dst_port - 2 bytes
+* flex_bytes   - 2 bytes
+* bkt_hash - 2 bytes
+*/
+   u8 vm_pool;
+   u8 flow_type;
+   __be16 pkt_type;
+   __be32 dst_ip[4];
+   __be32 src_ip[4];
+   __be16 src_port;
+   __be16 dst_port;
+   __be16 flex_bytes;
+   __be16 bkt_hash;
+};
+
 enum txgbe_eeprom_type {
txgbe_eeprom_unknown = 0,
txgbe_eeprom_spi,
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 94fa03182..4f4e51fe1 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = {
txgbe_regs_diagnostic,
NULL};
 
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev);
 static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
@@ -690,6 +692,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
 
+   /* initialize flow director filter list & hash */
+   txgbe_fdir_filter_init(eth_dev);
+
/* initialize l2 tunnel filter list & hash */
txgbe_l2_tn_filter_init(eth_dev);
 
@@ -729,6 +734,26 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev 
*eth_dev)
return 0;
 }
 
+static int txgbe_fdir_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+   struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+   struct txgbe_fdir_filter *fdir_filter;
+
+   if (fdir_info->hash_map)
+   rte_free(fdir_info->hash_map);
+   if (fdir_info->hash_handle)
+   rte_hash_free(fdir_info->hash_handle);
+
+   while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
+   TAILQ_REMOVE(&fdir_info->fdir_list,
+fdir_filter,
+entries);
+   rte_free(fdir_filter);
+   }
+
+   return 0;
+}
+
 static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
 {
struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
@@ -749,6 +774,41 @@ static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev 
*eth_dev)
return 0;
 }
 
+static int txgbe_fdir_filter_init(struct rte_eth_dev *eth_dev)
+{
+   struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(eth_dev);
+   char fdir_hash_name[RTE_HASH_NAMESIZE];
+   struct rte_hash_parameters fdir_hash_params = {
+   .name = fdir_hash_name,
+   .entries = TXGBE_MAX_FDIR_FILTER_NUM,
+   .key_len = sizeof(struct txgbe_atr_input),
+   .hash_func = rte_hash_crc,
+   .hash_func_init_val = 0,
+   .socket_id = rte_socket_id(),
+   };
+
+   TAILQ_INIT(&fdir_info->fdir_list);
+   snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
+"fdir_%s", TDEV_NAME(eth_dev));
+   fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
+   if (!fdir_info->hash_handle) {
+   PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
+   return -EINVAL;
+   }
+   fdir_info->hash_map = rte_zmalloc("txgbe",
+ sizeof(struct txgbe_fdir_filter *) *
+ TXGBE_MAX_FDIR_FILTER_NUM,
+ 0);
+   if (!fdir_info->hash_map) {
+   PMD_INIT_LOG(ERR,
+"Failed to allocate memory for fdir hash map!");
+   return -ENOMEM;
+   }
+   fdir_info->mask_added = FALSE;
+
+   return 0;
+}
+
 static int txgbe_l2_

[dpdk-dev] [PATCH 12/37] net/txgbe: add L2 tunnel filter parse rule

2020-11-03 Thread Jiawen Wu
Add support to parse flow for L2 tunnel filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_flow.c | 202 +
 1 file changed, 202 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 7110e594b..8589e3328 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -15,6 +15,8 @@
 #include 
 #include 
 
+#include 
+#include 
 #include 
 #include 
 
@@ -1040,3 +1042,203 @@ txgbe_parse_syn_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+/**
+ * Parse the rule to see if it is a L2 tunnel rule.
+ * And get the L2 tunnel filter info BTW.
+ * Only support E-tag now.
+ * pattern:
+ * The first not void item can be E_TAG.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be VF or PF.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEMSpecMask
+ * E_TAG   grp 0x1 0x3
+   e_cid_base  0x309   0xFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
+   const struct rte_flow_attr *attr,
+   const struct rte_flow_item pattern[],
+   const struct rte_flow_action actions[],
+   struct txgbe_l2_tunnel_conf *filter,
+   struct rte_flow_error *error)
+{
+   const struct rte_flow_item *item;
+   const struct rte_flow_item_e_tag *e_tag_spec;
+   const struct rte_flow_item_e_tag *e_tag_mask;
+   const struct rte_flow_action *act;
+   const struct rte_flow_action_vf *act_vf;
+   struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+
+   if (!pattern) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+   NULL, "NULL pattern.");
+   return -rte_errno;
+   }
+
+   if (!actions) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+  NULL, "NULL action.");
+   return -rte_errno;
+   }
+
+   if (!attr) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR,
+  NULL, "NULL attribute.");
+   return -rte_errno;
+   }
+
+   /* The first not void item should be e-tag. */
+   item = next_no_void_pattern(pattern, NULL);
+   if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
+   memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by L2 tunnel filter");
+   return -rte_errno;
+   }
+
+   if (!item->spec || !item->mask) {
+   memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+   rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by L2 tunnel filter");
+   return -rte_errno;
+   }
+
+   /*Not supported last point for range*/
+   if (item->last) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+   item, "Not supported last point for range");
+   return -rte_errno;
+   }
+
+   e_tag_spec = item->spec;
+   e_tag_mask = item->mask;
+
+   /* Only care about GRP and E cid base. */
+   if (e_tag_mask->epcp_edei_in_ecid_b ||
+   e_tag_mask->in_ecid_e ||
+   e_tag_mask->ecid_e ||
+   e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
+   memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by L2 tunnel filter");
+   return -rte_errno;
+   }
+
+   filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+   /**
+* grp and e_cid_base are bit fields and only use 14 bits.
+* e-tag id is taken as little endian by HW.
+*/
+   filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
+
+   /* check if the next not void item is END */
+   item = next_no_void_pattern(pattern, item);
+   if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+   memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by L2 tunnel filter");
+   return -rte_errno;
+   }
+
+   /* parse attr */
+   /* must be input direction */
+   if (!attr->ingress) {
+   

Re: [dpdk-dev] [PATCH v7 0/4] devtools: abi breakage checks

2020-11-03 Thread Kinsella, Ray
Hi David,

Came across an issue with this.

Essentially what is happening is that an ABI dump file generated with a newer 
versions of libabigail
is not guaranteed to be 100% compatible with a older versions.

That then adds a wrinkle that we need may need to look at maintaining abi dump 
archives per distro release,
or libabigail version depending on how you look at it. 

An alter approach suggested by Dodi would be to just archive the binaries 
somewhere instead, 
and regenerate the dumps at build time. That _may_ be feasible, 
but you lose some of the benefit (build time saving) compared to archiving the 
abi dumps. 

The most sensible approach to archiving the binaries.
is to use DPDK release os packaging for this, installed to a fs sandbox. 

So the next steps are figuring out, which is the better option between 
maintaining multiple abi dump archives, one per supported os distro. 
or looking at what needs to happen with DPDK os packaging. 

So some work still to do here. 

Thanks,

Ray K


[dpdk-dev] [PATCH 11/37] net/txgbe: support L2 tunnel filter add and delete

2020-11-03 Thread Jiawen Wu
Support L2 tunnel filter add and delete operations.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 230 +++
 drivers/net/txgbe/txgbe_ethdev.h |  18 +++
 2 files changed, 248 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 0be894b7b..94fa03182 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4710,6 +4710,219 @@ txgbe_e_tag_enable(struct txgbe_hw *hw)
return 0;
 }
 
+static int
+txgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+  struct txgbe_l2_tunnel_conf  *l2_tunnel)
+{
+   int ret = 0;
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   uint32_t i, rar_entries;
+   uint32_t rar_low, rar_high;
+
+   rar_entries = hw->mac.num_rar_entries;
+
+   for (i = 1; i < rar_entries; i++) {
+   wr32(hw, TXGBE_ETHADDRIDX, i);
+   rar_high = rd32(hw, TXGBE_ETHADDRH);
+   rar_low  = rd32(hw, TXGBE_ETHADDRL);
+   if ((rar_high & TXGBE_ETHADDRH_VLD) &&
+   (rar_high & TXGBE_ETHADDRH_ETAG) &&
+   (TXGBE_ETHADDRL_ETAG(rar_low) ==
+l2_tunnel->tunnel_id)) {
+   wr32(hw, TXGBE_ETHADDRL, 0);
+   wr32(hw, TXGBE_ETHADDRH, 0);
+
+   txgbe_clear_vmdq(hw, i, BIT_MASK32);
+
+   return ret;
+   }
+   }
+
+   return ret;
+}
+
+static int
+txgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+  struct txgbe_l2_tunnel_conf *l2_tunnel)
+{
+   int ret = 0;
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   uint32_t i, rar_entries;
+   uint32_t rar_low, rar_high;
+
+   /* One entry for one tunnel. Try to remove potential existing entry. */
+   txgbe_e_tag_filter_del(dev, l2_tunnel);
+
+   rar_entries = hw->mac.num_rar_entries;
+
+   for (i = 1; i < rar_entries; i++) {
+   wr32(hw, TXGBE_ETHADDRIDX, i);
+   rar_high = rd32(hw, TXGBE_ETHADDRH);
+   if (rar_high & TXGBE_ETHADDRH_VLD) {
+   continue;
+   } else {
+   txgbe_set_vmdq(hw, i, l2_tunnel->pool);
+   rar_high = TXGBE_ETHADDRH_VLD | TXGBE_ETHADDRH_ETAG;
+   rar_low = l2_tunnel->tunnel_id;
+
+   wr32(hw, TXGBE_ETHADDRL, rar_low);
+   wr32(hw, TXGBE_ETHADDRH, rar_high);
+
+   return ret;
+   }
+   }
+
+   PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+" Please remove a rule before adding a new one.");
+   return -EINVAL;
+}
+
+static inline struct txgbe_l2_tn_filter *
+txgbe_l2_tn_filter_lookup(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_key *key)
+{
+   int ret;
+
+   ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+   if (ret < 0)
+   return NULL;
+
+   return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+txgbe_insert_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_filter *l2_tn_filter)
+{
+   int ret;
+
+   ret = rte_hash_add_key(l2_tn_info->hash_handle,
+  &l2_tn_filter->key);
+
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR,
+   "Failed to insert L2 tunnel filter"
+   " to hash table %d!",
+   ret);
+   return ret;
+   }
+
+   l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+   TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+   return 0;
+}
+
+static inline int
+txgbe_remove_l2_tn_filter(struct txgbe_l2_tn_info *l2_tn_info,
+ struct txgbe_l2_tn_key *key)
+{
+   int ret;
+   struct txgbe_l2_tn_filter *l2_tn_filter;
+
+   ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+   if (ret < 0) {
+   PMD_DRV_LOG(ERR,
+   "No such L2 tunnel filter to delete %d!",
+   ret);
+   return ret;
+   }
+
+   l2_tn_filter = l2_tn_info->hash_map[ret];
+   l2_tn_info->hash_map[ret] = NULL;
+
+   TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+   rte_free(l2_tn_filter);
+
+   return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+  struct txgbe_l2_tunnel_conf *l2_tunnel,
+  bool restore)
+{
+   int ret;
+   struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+   struct txgbe_l2_tn_key key;
+   struct txgbe_l2_tn_filter *node;
+
+   if (!restore) {
+   key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+   key.tn_id = l2_tunnel->tunne

[dpdk-dev] [PATCH 06/37] net/txgbe: add ethertype parse rule

2020-11-03 Thread Jiawen Wu
Add support to parse flow for ethertype filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_flow.c | 250 +
 1 file changed, 250 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 6f8be3b7f..fc2505ddc 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -534,3 +534,253 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+/**
+ * Parse the rule to see if it is a ethertype rule.
+ * And get the ethertype filter info BTW.
+ * pattern:
+ * The first not void item can be ETH.
+ * The next not void item must be END.
+ * action:
+ * The first not void action should be QUEUE.
+ * The next not void action should be END.
+ * pattern example:
+ * ITEMSpecMask
+ * ETH type0x0807  0x
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
+   const struct rte_flow_item *pattern,
+   const struct rte_flow_action *actions,
+   struct rte_eth_ethertype_filter *filter,
+   struct rte_flow_error *error)
+{
+   const struct rte_flow_item *item;
+   const struct rte_flow_action *act;
+   const struct rte_flow_item_eth *eth_spec;
+   const struct rte_flow_item_eth *eth_mask;
+   const struct rte_flow_action_queue *act_q;
+
+   if (!pattern) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+   NULL, "NULL pattern.");
+   return -rte_errno;
+   }
+
+   if (!actions) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+   NULL, "NULL action.");
+   return -rte_errno;
+   }
+
+   if (!attr) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR,
+  NULL, "NULL attribute.");
+   return -rte_errno;
+   }
+
+   item = next_no_void_pattern(pattern, NULL);
+   /* The first non-void item should be MAC. */
+   if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by ethertype filter");
+   return -rte_errno;
+   }
+
+   /*Not supported last point for range*/
+   if (item->last) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+   item, "Not supported last point for range");
+   return -rte_errno;
+   }
+
+   /* Get the MAC info. */
+   if (!item->spec || !item->mask) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by ethertype filter");
+   return -rte_errno;
+   }
+
+   eth_spec = item->spec;
+   eth_mask = item->mask;
+
+   /* Mask bits of source MAC address must be full of 0.
+* Mask bits of destination MAC address must be full
+* of 1 or full of 0.
+*/
+   if (!rte_is_zero_ether_addr(ð_mask->src) ||
+   (!rte_is_zero_ether_addr(ð_mask->dst) &&
+!rte_is_broadcast_ether_addr(ð_mask->dst))) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Invalid ether address mask");
+   return -rte_errno;
+   }
+
+   if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Invalid ethertype mask");
+   return -rte_errno;
+   }
+
+   /* If mask bits of destination MAC address
+* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
+*/
+   if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
+   filter->mac_addr = eth_spec->dst;
+   filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
+   } else {
+   filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
+   }
+   filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+   /* Check if the next non-void item is END. */
+   item = next_no_void_pattern(pattern, item);
+   if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "Not supported by ethertype filter.");
+   return -rte_errno;
+   }
+
+   /* Parse action */
+
+

[dpdk-dev] [PATCH 15/37] net/txgbe: support FDIR add and delete operations

2020-11-03 Thread Jiawen Wu
Support add and delete operations on flow director.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_type.h |  11 +
 drivers/net/txgbe/txgbe_ethdev.h|  16 +
 drivers/net/txgbe/txgbe_fdir.c  | 472 +++-
 3 files changed, 498 insertions(+), 1 deletion(-)

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index 633692cd7..160d5253a 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -22,6 +22,7 @@
 #define TXGBE_MAX_UTA  128
 
 #define TXGBE_FDIR_INIT_DONE_POLL  10
+#define TXGBE_FDIRCMD_CMD_POLL 10
 
 #define TXGBE_ALIGN128 /* as intel did */
 
@@ -71,7 +72,17 @@ enum {
 #define TXGBE_ATR_BUCKET_HASH_KEY  0x3DAD14E2
 #define TXGBE_ATR_SIGNATURE_HASH_KEY   0x174D3614
 
+/* Software ATR input stream values and masks */
 #define TXGBE_ATR_HASH_MASK0x7fff
+#define TXGBE_ATR_L3TYPE_MASK  0x4
+#define TXGBE_ATR_L3TYPE_IPV4  0x0
+#define TXGBE_ATR_L3TYPE_IPV6  0x4
+#define TXGBE_ATR_L4TYPE_MASK  0x3
+#define TXGBE_ATR_L4TYPE_UDP   0x1
+#define TXGBE_ATR_L4TYPE_TCP   0x2
+#define TXGBE_ATR_L4TYPE_SCTP  0x3
+#define TXGBE_ATR_TUNNEL_MASK  0x10
+#define TXGBE_ATR_TUNNEL_ANY   0x10
 
 /* Flow Director ATR input struct. */
 struct txgbe_atr_input {
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 5a5d9c4d3..5e7fd0a86 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -91,6 +91,18 @@ struct txgbe_fdir_filter {
 /* list of fdir filters */
 TAILQ_HEAD(txgbe_fdir_filter_list, txgbe_fdir_filter);
 
+struct txgbe_fdir_rule {
+   struct txgbe_hw_fdir_mask mask;
+   struct txgbe_atr_input input; /* key of fdir filter */
+   bool b_spec; /* If TRUE, input, fdirflags, queue have meaning. */
+   bool b_mask; /* If TRUE, mask has meaning. */
+   enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+   uint32_t fdirflags; /* drop or forward */
+   uint32_t soft_id; /* an unique value for this rule */
+   uint8_t queue; /* assigned rx queue */
+   uint8_t flex_bytes_offset;
+};
+
 struct txgbe_hw_fdir_info {
struct txgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@@ -436,6 +448,10 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t 
direction,
  */
 int txgbe_fdir_configure(struct rte_eth_dev *dev);
 int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct txgbe_fdir_rule *rule,
+ bool del, bool update);
+
 void txgbe_configure_pb(struct rte_eth_dev *dev);
 void txgbe_configure_port(struct rte_eth_dev *dev);
 void txgbe_configure_dcb(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index df6125d4a..d38e21e9e 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -7,7 +7,7 @@
 #include 
 #include 
 #include 
-
+#include 
 
 #include "txgbe_logs.h"
 #include "base/txgbe.h"
@@ -15,6 +15,7 @@
 
 #define TXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /*default flexbytes offset in 
bytes*/
 #define TXGBE_MAX_FLX_SOURCE_OFF62
+#define TXGBE_FDIRCMD_CMD_INTERVAL_US   10
 
 #define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
uint8_t ipv6_addr[16]; \
@@ -405,3 +406,472 @@ txgbe_fdir_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+/*
+ * Note that the bkt_hash field in the txgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ *  @stream: input bitstream to compute the hash on
+ *  @key: 32-bit hash key
+ **/
+static uint32_t
+txgbe_atr_compute_hash(struct txgbe_atr_input *atr_input,
+uint32_t key)
+{
+   /*
+* The algorithm is as follows:
+*Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+*where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+*and A[n] x B[n] is bitwise AND between same length strings
+*
+*K[n] is 16 bits, defined as:
+*   for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+*   for n modulo 32 < 15, K[n] =
+* K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+*
+*S[n] is 16 bits, defined as:
+*   for n >= 15, S[n] = S[n:n - 15]
+*   for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+*
+*To simplify for programming, the algorithm is implemented
+*in software this way:
+*
+*key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+*
+*for (i = 0; i < 352; i+=32)
+*hi_hash_dword[31:0] ^= Stream[(i+31):i];
+*
+

[dpdk-dev] [PATCH 14/37] net/txgbe: configure FDIR filter

2020-11-03 Thread Jiawen Wu
Configure flow director filter with it enabled.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_type.h |   6 +
 drivers/net/txgbe/meson.build   |   1 +
 drivers/net/txgbe/txgbe_ethdev.c|   6 +
 drivers/net/txgbe/txgbe_ethdev.h|   6 +
 drivers/net/txgbe/txgbe_fdir.c  | 407 
 5 files changed, 426 insertions(+)
 create mode 100644 drivers/net/txgbe/txgbe_fdir.c

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index b9d31ab83..633692cd7 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -21,6 +21,8 @@
 #define TXGBE_MAX_QP   (128)
 #define TXGBE_MAX_UTA  128
 
+#define TXGBE_FDIR_INIT_DONE_POLL  10
+
 #define TXGBE_ALIGN128 /* as intel did */
 
 #include "txgbe_status.h"
@@ -65,6 +67,10 @@ enum {
 #define TXGBE_PHYSICAL_LAYER_10BASE_T  0x08000
 #define TXGBE_PHYSICAL_LAYER_2500BASE_KX   0x1
 
+/* Software ATR hash keys */
+#define TXGBE_ATR_BUCKET_HASH_KEY  0x3DAD14E2
+#define TXGBE_ATR_SIGNATURE_HASH_KEY   0x174D3614
+
 #define TXGBE_ATR_HASH_MASK0x7fff
 
 /* Flow Director ATR input struct. */
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 45379175d..bb1683631 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -6,6 +6,7 @@ objs = [base_objs]
 
 sources = files(
'txgbe_ethdev.c',
+   'txgbe_fdir.c',
'txgbe_flow.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 4f4e51fe1..df2efe80f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -1633,6 +1633,12 @@ txgbe_dev_start(struct rte_eth_dev *dev)
txgbe_configure_port(dev);
txgbe_configure_dcb(dev);
 
+   if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+   err = txgbe_fdir_configure(dev);
+   if (err)
+   goto error;
+   }
+
/* Restore vf rate limit */
if (vfinfo != NULL) {
for (vf = 0; vf < pci_dev->max_vfs; vf++)
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 059b0164b..5a5d9c4d3 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /* need update link, bit flag */
 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -430,6 +431,11 @@ txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
   uint8_t queue, uint8_t msix_vector);
 
+/*
+ * Flow director function prototypes
+ */
+int txgbe_fdir_configure(struct rte_eth_dev *dev);
+int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
 void txgbe_configure_pb(struct rte_eth_dev *dev);
 void txgbe_configure_port(struct rte_eth_dev *dev);
 void txgbe_configure_dcb(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
new file mode 100644
index 0..df6125d4a
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -0,0 +1,407 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+
+#include "txgbe_logs.h"
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+
+#define TXGBE_DEFAULT_FLEXBYTES_OFFSET  12 /*default flexbytes offset in 
bytes*/
+#define TXGBE_MAX_FLX_SOURCE_OFF62
+
+#define IPV6_ADDR_TO_MASK(ipaddr, ipv6m) do { \
+   uint8_t ipv6_addr[16]; \
+   uint8_t i; \
+   rte_memcpy(ipv6_addr, (ipaddr), sizeof(ipv6_addr));\
+   (ipv6m) = 0; \
+   for (i = 0; i < sizeof(ipv6_addr); i++) { \
+   if (ipv6_addr[i] == UINT8_MAX) \
+   (ipv6m) |= 1 << i; \
+   else if (ipv6_addr[i] != 0) { \
+   PMD_DRV_LOG(ERR, " invalid IPv6 address mask."); \
+   return -EINVAL; \
+   } \
+   } \
+} while (0)
+
+#define IPV6_MASK_TO_ADDR(ipv6m, ipaddr) do { \
+   uint8_t ipv6_addr[16]; \
+   uint8_t i; \
+   for (i = 0; i < sizeof(ipv6_addr); i++) { \
+   if ((ipv6m) & (1 << i)) \
+   ipv6_addr[i] = UINT8_MAX; \
+   else \
+   ipv6_addr[i] = 0; \
+   } \
+   rte_memcpy((ipaddr), ipv6_addr, sizeof(ipv6_addr));\
+} while (0)
+
+/**
+ *  Initialize Flow Director control registers
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register
+ **/
+static int
+txgbe_fdir_enable(struct txgbe_hw *hw, uint32_t fdirctrl)
+{
+   int i;
+
+   PMD_INIT_FUNC_TRACE();
+
+   /* Prime the keys for hashing */
+   wr32(hw, TXGBE_FDIRBKTHKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+   

[dpdk-dev] [PATCH 17/37] net/txgbe: add FDIR parse tunnel rule

2020-11-03 Thread Jiawen Wu
Add support to parse tunnel flow for fdir filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_type.h |   8 +
 drivers/net/txgbe/txgbe_flow.c  | 290 
 2 files changed, 298 insertions(+)

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index a73f66d39..22efcef78 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -92,6 +92,14 @@ enum txgbe_atr_flow_type {
TXGBE_ATR_FLOW_TYPE_UDPV6   = 0x5,
TXGBE_ATR_FLOW_TYPE_TCPV6   = 0x6,
TXGBE_ATR_FLOW_TYPE_SCTPV6  = 0x7,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4   = 0x10,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4  = 0x11,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4  = 0x12,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6   = 0x14,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6  = 0x15,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6  = 0x16,
+   TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
 };
 
 /* Flow Director ATR input struct. */
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index ba1be9f12..b7d0e08a9 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2064,6 +2064,291 @@ txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev 
__rte_unused,
return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
 }
 
+/**
+ * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
+ * And get the flow director filter info BTW.
+ * VxLAN PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * NVGRE PATTERN:
+ * The first not void item must be ETH.
+ * The second not void item must be IPV4/ IPV6.
+ * The third not void item must be NVGRE.
+ * The next not void item must be END.
+ * ACTION:
+ * The first not void action should be QUEUE or DROP.
+ * The second not void optional action should be MARK,
+ * mark_id is a uint32_t number.
+ * The next not void action should be END.
+ * VxLAN pattern example:
+ * ITEMSpecMask
+ * ETH NULLNULL
+ * IPV4/IPV6   NULLNULL
+ * UDP NULLNULL
+ * VxLAN   vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
+ * MAC VLANtci 0x2016  0xEFFF
+ * END
+ * NEGRV pattern example:
+ * ITEMSpecMask
+ * ETH NULLNULL
+ * IPV4/IPV6   NULLNULL
+ * NVGRE   protocol0x6558  0x
+ * tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
+ * MAC VLANtci 0x2016  0xEFFF
+ * END
+ * other members in mask and spec should set to 0x00.
+ * item->last should be NULL.
+ */
+static int
+txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
+  const struct rte_flow_item pattern[],
+  const struct rte_flow_action actions[],
+  struct txgbe_fdir_rule *rule,
+  struct rte_flow_error *error)
+{
+   const struct rte_flow_item *item;
+   const struct rte_flow_item_eth *eth_mask;
+   uint32_t j;
+
+   if (!pattern) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM_NUM,
+  NULL, "NULL pattern.");
+   return -rte_errno;
+   }
+
+   if (!actions) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ACTION_NUM,
+  NULL, "NULL action.");
+   return -rte_errno;
+   }
+
+   if (!attr) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ATTR,
+  NULL, "NULL attribute.");
+   return -rte_errno;
+   }
+
+   /**
+* Some fields may not be provided. Set spec to 0 and mask to default
+* value. So, we need not do anything for the not provided fields later.
+*/
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
+   rule->mask.vlan_tci_mask = 0;
+
+   /**
+* The first not void item should be
+* MAC or IPv4 or IPv6 or UDP or VxLAN.
+*/
+   item = next_no_void_pattern(pattern, NULL);
+   if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
+   item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+   item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
+   item->type != RTE_FLOW_ITEM_TYPE_UDP &&
+   item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
+   item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
+   memset(rul

[dpdk-dev] [PATCH 09/37] net/txgbe: add L2 tunnel filter init and uninit

2020-11-03 Thread Jiawen Wu
Add L2 tunnel filter init and uninit.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 65 
 drivers/net/txgbe/txgbe_ethdev.h | 32 
 2 files changed, 97 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index e2599e429..79f1f9535 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -88,6 +88,8 @@ static const struct reg_info *txgbe_regs_others[] = {
txgbe_regs_diagnostic,
NULL};
 
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev);
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev);
 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
 static int txgbe_dev_close(struct rte_eth_dev *dev);
@@ -687,6 +689,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
 
+   /* initialize l2 tunnel filter list & hash */
+   txgbe_l2_tn_filter_init(eth_dev);
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
@@ -723,6 +728,63 @@ static int txgbe_ntuple_filter_uninit(struct rte_eth_dev 
*eth_dev)
return 0;
 }
 
+static int txgbe_l2_tn_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+   struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+   struct txgbe_l2_tn_filter *l2_tn_filter;
+
+   if (l2_tn_info->hash_map)
+   rte_free(l2_tn_info->hash_map);
+   if (l2_tn_info->hash_handle)
+   rte_hash_free(l2_tn_info->hash_handle);
+
+   while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+   TAILQ_REMOVE(&l2_tn_info->l2_tn_list,
+l2_tn_filter,
+entries);
+   rte_free(l2_tn_filter);
+   }
+
+   return 0;
+}
+
+static int txgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
+{
+   struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(eth_dev);
+   char l2_tn_hash_name[RTE_HASH_NAMESIZE];
+   struct rte_hash_parameters l2_tn_hash_params = {
+   .name = l2_tn_hash_name,
+   .entries = TXGBE_MAX_L2_TN_FILTER_NUM,
+   .key_len = sizeof(struct txgbe_l2_tn_key),
+   .hash_func = rte_hash_crc,
+   .hash_func_init_val = 0,
+   .socket_id = rte_socket_id(),
+   };
+
+   TAILQ_INIT(&l2_tn_info->l2_tn_list);
+   snprintf(l2_tn_hash_name, RTE_HASH_NAMESIZE,
+"l2_tn_%s", TDEV_NAME(eth_dev));
+   l2_tn_info->hash_handle = rte_hash_create(&l2_tn_hash_params);
+   if (!l2_tn_info->hash_handle) {
+   PMD_INIT_LOG(ERR, "Failed to create L2 TN hash table!");
+   return -EINVAL;
+   }
+   l2_tn_info->hash_map = rte_zmalloc("txgbe",
+  sizeof(struct txgbe_l2_tn_filter *) *
+  TXGBE_MAX_L2_TN_FILTER_NUM,
+  0);
+   if (!l2_tn_info->hash_map) {
+   PMD_INIT_LOG(ERR,
+   "Failed to allocate memory for L2 TN hash map!");
+   return -ENOMEM;
+   }
+   l2_tn_info->e_tag_en = FALSE;
+   l2_tn_info->e_tag_fwd_en = FALSE;
+   l2_tn_info->e_tag_ether_type = RTE_ETHER_TYPE_ETAG;
+
+   return 0;
+}
+
 static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
@@ -1802,6 +1864,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
 
+   /* remove all the L2 tunnel filters & hash */
+   txgbe_l2_tn_filter_uninit(dev);
+
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index bb53dd74a..bd4ddab5a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -9,7 +9,10 @@
 
 #include "base/txgbe.h"
 #include "txgbe_ptypes.h"
+#include 
 #include 
+#include 
+#include 
 
 /* need update link, bit flag */
 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -56,6 +59,8 @@
 #define TXGBE_MISC_VEC_ID   RTE_INTR_VEC_ZERO_OFFSET
 #define TXGBE_RX_VEC_START  RTE_INTR_VEC_RXTX_OFFSET
 
+#define TXGBE_MAX_L2_TN_FILTER_NUM  128
+
 /* structure for interrupt relative data */
 struct txgbe_interrupt {
uint32_t flags;
@@ -168,6 +173,28 @@ struct txgbe_filter_info {
uint32_t syn_info;
 };
 
+struct txgbe_l2_tn_key {
+   enum rte_eth_tunnel_type  l2_tn_type;
+   uint32_t  tn_id;
+};
+
+struct txgbe_l2_tn_filter {
+   TAIL

[dpdk-dev] [PATCH 18/37] net/txgbe: add FDIR restore operation

2020-11-03 Thread Jiawen Wu
Add restore operation on FDIR filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c |  1 +
 drivers/net/txgbe/txgbe_ethdev.h |  2 ++
 drivers/net/txgbe/txgbe_fdir.c   | 34 
 3 files changed, 37 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index df2efe80f..a17d7b190 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5077,6 +5077,7 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
txgbe_ntuple_filter_restore(dev);
txgbe_ethertype_filter_restore(dev);
txgbe_syn_filter_restore(dev);
+   txgbe_fdir_filter_restore(dev);
txgbe_l2_tn_filter_restore(dev);
 
return 0;
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 5e7fd0a86..88c7d8191 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -469,6 +469,8 @@ int txgbe_pf_host_configure(struct rte_eth_dev *eth_dev);
 
 uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
 
+void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+
 void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
 void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
 void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index d38e21e9e..2faf7fd84 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -875,3 +875,37 @@ txgbe_fdir_filter_program(struct rte_eth_dev *dev,
return err;
 }
 
+/* restore flow director filter */
+void
+txgbe_fdir_filter_restore(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+   struct txgbe_fdir_filter *node;
+   bool is_perfect = FALSE;
+   enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
+
+   if (fdir_mode >= RTE_FDIR_MODE_PERFECT &&
+   fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL)
+   is_perfect = TRUE;
+
+   if (is_perfect) {
+   TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+   (void)fdir_write_perfect_filter(hw,
+ &node->input,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash,
+ fdir_mode);
+   }
+   } else {
+   TAILQ_FOREACH(node, &fdir_info->fdir_list, entries) {
+   (void)fdir_add_signature_filter(hw,
+ &node->input,
+ node->queue,
+ node->fdirflags,
+ node->fdirhash);
+   }
+   }
+}
+
-- 
2.18.4





[dpdk-dev] [PATCH 20/37] net/txgbe: add RSS filter restore operation

2020-11-03 Thread Jiawen Wu
Add restore operation on RSS filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 12 +
 drivers/net/txgbe/txgbe_ethdev.h |  7 +++
 drivers/net/txgbe/txgbe_rxtx.c   | 76 
 3 files changed, 95 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index a17d7b190..5ed96479e 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5071,6 +5071,17 @@ txgbe_l2_tn_filter_restore(struct rte_eth_dev *dev)
}
 }
 
+/* restore rss filter */
+static inline void
+txgbe_rss_filter_restore(struct rte_eth_dev *dev)
+{
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+   if (filter_info->rss_info.conf.queue_num)
+   txgbe_config_rss_filter(dev,
+   &filter_info->rss_info, TRUE);
+}
+
 static int
 txgbe_filter_restore(struct rte_eth_dev *dev)
 {
@@ -5079,6 +5090,7 @@ txgbe_filter_restore(struct rte_eth_dev *dev)
txgbe_syn_filter_restore(dev);
txgbe_fdir_filter_restore(dev);
txgbe_l2_tn_filter_restore(dev);
+   txgbe_rss_filter_restore(dev);
 
return 0;
 }
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 924c57faf..df7a14506 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -237,6 +237,8 @@ struct txgbe_filter_info {
struct txgbe_5tuple_filter_list fivetuple_list;
/* store the SYN filter info */
uint32_t syn_info;
+   /* store the rss filter info */
+   struct txgbe_rte_flow_rss_conf rss_info;
 };
 
 struct txgbe_l2_tn_key {
@@ -488,6 +490,11 @@ int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, 
uint16_t queue_idx,
   uint16_t tx_rate);
 int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
const struct rte_flow_action_rss *in);
+int txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with);
+int txgbe_config_rss_filter(struct rte_eth_dev *dev,
+   struct txgbe_rte_flow_rss_conf *conf, bool add);
+
 static inline int
 txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
  uint16_t ethertype)
diff --git a/drivers/net/txgbe/txgbe_rxtx.c b/drivers/net/txgbe/txgbe_rxtx.c
index ab812dbff..babda3a79 100644
--- a/drivers/net/txgbe/txgbe_rxtx.c
+++ b/drivers/net/txgbe/txgbe_rxtx.c
@@ -4653,3 +4653,79 @@ txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
return 0;
 }
 
+int
+txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
+ const struct rte_flow_action_rss *with)
+{
+   return (comp->func == with->func &&
+   comp->level == with->level &&
+   comp->types == with->types &&
+   comp->key_len == with->key_len &&
+   comp->queue_num == with->queue_num &&
+   !memcmp(comp->key, with->key, with->key_len) &&
+   !memcmp(comp->queue, with->queue,
+   sizeof(*with->queue) * with->queue_num));
+}
+
+int
+txgbe_config_rss_filter(struct rte_eth_dev *dev,
+   struct txgbe_rte_flow_rss_conf *conf, bool add)
+{
+   struct txgbe_hw *hw;
+   uint32_t reta;
+   uint16_t i;
+   uint16_t j;
+   struct rte_eth_rss_conf rss_conf = {
+   .rss_key = conf->conf.key_len ?
+   (void *)(uintptr_t)conf->conf.key : NULL,
+   .rss_key_len = conf->conf.key_len,
+   .rss_hf = conf->conf.types,
+   };
+   struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+
+   PMD_INIT_FUNC_TRACE();
+   hw = TXGBE_DEV_HW(dev);
+
+   if (!add) {
+   if (txgbe_action_rss_same(&filter_info->rss_info.conf,
+ &conf->conf)) {
+   txgbe_rss_disable(dev);
+   memset(&filter_info->rss_info, 0,
+   sizeof(struct txgbe_rte_flow_rss_conf));
+   return 0;
+   }
+   return -EINVAL;
+   }
+
+   if (filter_info->rss_info.conf.queue_num)
+   return -EINVAL;
+   /* Fill in redirection table
+* The byte-swap is needed because NIC registers are in
+* little-endian order.
+*/
+   reta = 0;
+   for (i = 0, j = 0; i < ETH_RSS_RETA_SIZE_128; i++, j++) {
+   if (j == conf->conf.queue_num)
+   j = 0;
+   reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
+   if ((i & 3) == 3)
+   wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+   }
+
+   /* Configure the RSS key and the RSS protocols used to compute
+* the RSS hash of input packets.
+*/
+   if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) {
+  

[dpdk-dev] [PATCH 16/37] net/txgbe: add FDIR parse normal rule

2020-11-03 Thread Jiawen Wu
Add support to parse flow for fdir filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_type.h |  10 +
 drivers/net/txgbe/txgbe_flow.c  | 856 
 2 files changed, 866 insertions(+)

diff --git a/drivers/net/txgbe/base/txgbe_type.h 
b/drivers/net/txgbe/base/txgbe_type.h
index 160d5253a..a73f66d39 100644
--- a/drivers/net/txgbe/base/txgbe_type.h
+++ b/drivers/net/txgbe/base/txgbe_type.h
@@ -83,6 +83,16 @@ enum {
 #define TXGBE_ATR_L4TYPE_SCTP  0x3
 #define TXGBE_ATR_TUNNEL_MASK  0x10
 #define TXGBE_ATR_TUNNEL_ANY   0x10
+enum txgbe_atr_flow_type {
+   TXGBE_ATR_FLOW_TYPE_IPV4= 0x0,
+   TXGBE_ATR_FLOW_TYPE_UDPV4   = 0x1,
+   TXGBE_ATR_FLOW_TYPE_TCPV4   = 0x2,
+   TXGBE_ATR_FLOW_TYPE_SCTPV4  = 0x3,
+   TXGBE_ATR_FLOW_TYPE_IPV6= 0x4,
+   TXGBE_ATR_FLOW_TYPE_UDPV6   = 0x5,
+   TXGBE_ATR_FLOW_TYPE_TCPV6   = 0x6,
+   TXGBE_ATR_FLOW_TYPE_SCTPV6  = 0x7,
+};
 
 /* Flow Director ATR input struct. */
 struct txgbe_atr_input {
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 8589e3328..ba1be9f12 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -27,6 +27,7 @@
 
 #define TXGBE_MIN_N_TUPLE_PRIO 1
 #define TXGBE_MAX_N_TUPLE_PRIO 7
+#define TXGBE_MAX_FLX_SOURCE_OFF 62
 
 /**
  * Endless loop will never happen with below assumption
@@ -1242,3 +1243,858 @@ txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
return ret;
 }
 
+/* Parse to get the attr and action info of flow director rule. */
+static int
+txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct txgbe_fdir_rule *rule,
+ struct rte_flow_error *error)
+{
+   const struct rte_flow_action *act;
+   const struct rte_flow_action_queue *act_q;
+   const struct rte_flow_action_mark *mark;
+
+   /* parse attr */
+   /* must be input direction */
+   if (!attr->ingress) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+   attr, "Only support ingress.");
+   return -rte_errno;
+   }
+
+   /* not supported */
+   if (attr->egress) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+   attr, "Not support egress.");
+   return -rte_errno;
+   }
+
+   /* not supported */
+   if (attr->transfer) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+   attr, "No support for transfer.");
+   return -rte_errno;
+   }
+
+   /* not supported */
+   if (attr->priority) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+   attr, "Not support priority.");
+   return -rte_errno;
+   }
+
+   /* check if the first not void action is QUEUE or DROP. */
+   act = next_no_void_action(actions, NULL);
+   if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
+   act->type != RTE_FLOW_ACTION_TYPE_DROP) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION,
+   act, "Not supported action.");
+   return -rte_errno;
+   }
+
+   if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
+   act_q = (const struct rte_flow_action_queue *)act->conf;
+   rule->queue = act_q->index;
+   } else { /* drop */
+   /* signature mode does not support drop action. */
+   if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION,
+   act, "Not supported action.");
+   return -rte_errno;
+   }
+   rule->fdirflags = TXGBE_FDIRPICMD_DROP;
+   }
+
+   /* check if the next not void item is MARK */
+   act = next_no_void_action(actions, act);
+   if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
+   act->type != RTE_FLOW_ACTION_TYPE_END) {
+   memset(rule, 0, sizeof(struct txgbe_fdir_rule));
+   rte_flow_error_set(error, EINV

[dpdk-dev] [PATCH 19/37] net/txgbe: add RSS filter parse rule

2020-11-03 Thread Jiawen Wu
Add support to parse flow for RSS filter.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.h |   8 +++
 drivers/net/txgbe/txgbe_flow.c   | 114 +++
 drivers/net/txgbe/txgbe_rxtx.c   |  20 ++
 3 files changed, 142 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 88c7d8191..924c57faf 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -121,6 +121,12 @@ struct txgbe_hw_fdir_info {
bool mask_added; /* If already got mask from consistent filter */
 };
 
+struct txgbe_rte_flow_rss_conf {
+   struct rte_flow_action_rss conf; /**< RSS parameters. */
+   uint8_t key[TXGBE_HKEY_MAX_INDEX * sizeof(uint32_t)]; /* Hash key. */
+   uint16_t queue[TXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */
+};
+
 /* structure for interrupt relative data */
 struct txgbe_interrupt {
uint32_t flags;
@@ -480,6 +486,8 @@ int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, 
uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
 int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
   uint16_t tx_rate);
+int txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
+   const struct rte_flow_action_rss *in);
 static inline int
 txgbe_ethertype_filter_lookup(struct txgbe_filter_info *filter_info,
  uint16_t ethertype)
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index b7d0e08a9..0d90ef33a 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2388,3 +2388,117 @@ txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
return ret;
 }
 
+static int
+txgbe_parse_rss_filter(struct rte_eth_dev *dev,
+   const struct rte_flow_attr *attr,
+   const struct rte_flow_action actions[],
+   struct txgbe_rte_flow_rss_conf *rss_conf,
+   struct rte_flow_error *error)
+{
+   const struct rte_flow_action *act;
+   const struct rte_flow_action_rss *rss;
+   uint16_t n;
+
+   /**
+* rss only supports forwarding,
+* check if the first not void action is RSS.
+*/
+   act = next_no_void_action(actions, NULL);
+   if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+   memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION,
+   act, "Not supported action.");
+   return -rte_errno;
+   }
+
+   rss = (const struct rte_flow_action_rss *)act->conf;
+
+   if (!rss || !rss->queue_num) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION,
+   act,
+  "no valid queues");
+   return -rte_errno;
+   }
+
+   for (n = 0; n < rss->queue_num; n++) {
+   if (rss->queue[n] >= dev->data->nb_rx_queues) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_ACTION,
+  act,
+  "queue id > max number of queues");
+   return -rte_errno;
+   }
+   }
+
+   if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+   return rte_flow_error_set
+   (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+"non-default RSS hash functions are not supported");
+   if (rss->level)
+   return rte_flow_error_set
+   (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+"a nonzero RSS encapsulation level is not supported");
+   if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+   return rte_flow_error_set
+   (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+"RSS hash key must be exactly 40 bytes");
+   if (rss->queue_num > RTE_DIM(rss_conf->queue))
+   return rte_flow_error_set
+   (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+"too many queues for RSS context");
+   if (txgbe_rss_conf_init(rss_conf, rss))
+   return rte_flow_error_set
+   (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+"RSS context initialization failure");
+
+   /* check if the next not void item is END */
+   act = next_no_void_action(actions, act);
+   if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+   memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION,
+   act, "Not 

[dpdk-dev] [PATCH 22/37] net/txgbe: add flow API

2020-11-03 Thread Jiawen Wu
Add flow API with generic flow operaions, validate first.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/txgbe.ini |  1 +
 doc/guides/nics/txgbe.rst  |  1 +
 drivers/net/txgbe/txgbe_ethdev.c   | 25 
 drivers/net/txgbe/txgbe_ethdev.h   |  2 +
 drivers/net/txgbe/txgbe_flow.c | 61 ++
 5 files changed, 90 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini 
b/doc/guides/nics/features/txgbe.ini
index 7c457fede..9db2ccde0 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -26,6 +26,7 @@ SR-IOV   = Y
 DCB  = Y
 VLAN filter  = Y
 Flow control = Y
+Flow API = Y
 Rate limitation  = Y
 Traffic mirroring= Y
 CRC offload  = P
diff --git a/doc/guides/nics/txgbe.rst b/doc/guides/nics/txgbe.rst
index cd293698b..5a7299964 100644
--- a/doc/guides/nics/txgbe.rst
+++ b/doc/guides/nics/txgbe.rst
@@ -29,6 +29,7 @@ Features
 - IEEE 1588
 - FW version
 - LRO
+- Generic flow API
 
 Prerequisites
 -
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 45a785fec..cc061e0d6 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -4234,6 +4234,30 @@ txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+static int
+txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
+enum rte_filter_type filter_type,
+enum rte_filter_op filter_op,
+void *arg)
+{
+   int ret = 0;
+
+   switch (filter_type) {
+   case RTE_ETH_FILTER_GENERIC:
+   if (filter_op != RTE_ETH_FILTER_GET)
+   return -EINVAL;
+   *(const void **)arg = &txgbe_flow_ops;
+   break;
+   default:
+   PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+   filter_type);
+   ret = -EINVAL;
+   break;
+   }
+
+   return ret;
+}
+
 static u8 *
 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
u8 **mc_addr_ptr, u32 *vmdq)
@@ -5218,6 +5242,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.reta_query = txgbe_dev_rss_reta_query,
.rss_hash_update= txgbe_dev_rss_hash_update,
.rss_hash_conf_get  = txgbe_dev_rss_hash_conf_get,
+   .filter_ctrl= txgbe_dev_filter_ctrl,
.set_mc_addr_list   = txgbe_dev_set_mc_addr_list,
.rxq_info_get   = txgbe_rxq_info_get,
.txq_info_get   = txgbe_txq_info_get,
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index e12324404..2b5c0e35d 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -487,6 +487,8 @@ uint32_t txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, 
uint32_t orig_val);
 
 void txgbe_fdir_filter_restore(struct rte_eth_dev *dev);
 
+extern const struct rte_flow_ops txgbe_flow_ops;
+
 void txgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
 void txgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
 void txgbe_clear_syn_filter(struct rte_eth_dev *dev);
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 77e025863..884a8545f 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2629,3 +2629,64 @@ txgbe_filterlist_flush(void)
}
 }
 
+/**
+ * Check if the flow rule is supported by txgbe.
+ * It only checkes the format. Don't guarantee the rule can be programmed into
+ * the HW. Because there can be no enough room for the rule.
+ */
+static int
+txgbe_flow_validate(struct rte_eth_dev *dev,
+   const struct rte_flow_attr *attr,
+   const struct rte_flow_item pattern[],
+   const struct rte_flow_action actions[],
+   struct rte_flow_error *error)
+{
+   struct rte_eth_ntuple_filter ntuple_filter;
+   struct rte_eth_ethertype_filter ethertype_filter;
+   struct rte_eth_syn_filter syn_filter;
+   struct txgbe_l2_tunnel_conf l2_tn_filter;
+   struct txgbe_fdir_rule fdir_rule;
+   struct txgbe_rte_flow_rss_conf rss_conf;
+   int ret;
+
+   memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+   ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+   actions, &ntuple_filter, error);
+   if (!ret)
+   return 0;
+
+   memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
+   ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
+   actions, ðertype_filter, error);
+   if (!ret)
+   return 0;
+
+   memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
+   ret = txgbe_parse_syn_filter(dev, attr, pattern,
+ 

[dpdk-dev] [PATCH 21/37] net/txgbe: add filter list init and uninit

2020-11-03 Thread Jiawen Wu
Add filter list init and uninit.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c |   6 ++
 drivers/net/txgbe/txgbe_ethdev.h |   8 ++
 drivers/net/txgbe/txgbe_flow.c   | 127 +++
 3 files changed, 141 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 5ed96479e..45a785fec 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -698,6 +698,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* initialize l2 tunnel filter list & hash */
txgbe_l2_tn_filter_init(eth_dev);
 
+   /* initialize flow filter lists */
+   txgbe_filterlist_init();
+
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
@@ -1941,6 +1944,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* Remove all ntuple filters of the device */
txgbe_ntuple_filter_uninit(dev);
 
+   /* clear all the filters list */
+   txgbe_filterlist_flush();
+
return ret;
 }
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index df7a14506..e12324404 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -263,6 +263,11 @@ struct txgbe_l2_tn_info {
uint16_t e_tag_ether_type; /* ether type for e-tag */
 };
 
+struct rte_flow {
+   enum rte_filter_type filter_type;
+   void *rule;
+};
+
 /* The configuration of bandwidth */
 struct txgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
@@ -448,6 +453,9 @@ txgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
 int
 txgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
   struct txgbe_l2_tunnel_conf *l2_tunnel);
+void txgbe_filterlist_init(void);
+void txgbe_filterlist_flush(void);
+
 void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
   uint8_t queue, uint8_t msix_vector);
 
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 0d90ef33a..77e025863 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -16,6 +16,7 @@
 #include 
 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -29,6 +30,58 @@
 #define TXGBE_MAX_N_TUPLE_PRIO 7
 #define TXGBE_MAX_FLX_SOURCE_OFF 62
 
+/* ntuple filter list structure */
+struct txgbe_ntuple_filter_ele {
+   TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
+   struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct txgbe_ethertype_filter_ele {
+   TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
+   struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct txgbe_eth_syn_filter_ele {
+   TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
+   struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct txgbe_fdir_rule_ele {
+   TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
+   struct txgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct txgbe_eth_l2_tunnel_conf_ele {
+   TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
+   struct txgbe_l2_tunnel_conf filter_info;
+};
+/* rss filter list structure */
+struct txgbe_rss_conf_ele {
+   TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
+   struct txgbe_rte_flow_rss_conf filter_info;
+};
+/* txgbe_flow memory list structure */
+struct txgbe_flow_mem {
+   TAILQ_ENTRY(txgbe_flow_mem) entries;
+   struct rte_flow *flow;
+};
+
+TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
+TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
+TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
+TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
+TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
+TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
+
+static struct txgbe_ntuple_filter_list filter_ntuple_list;
+static struct txgbe_ethertype_filter_list filter_ethertype_list;
+static struct txgbe_syn_filter_list filter_syn_list;
+static struct txgbe_fdir_rule_filter_list filter_fdir_list;
+static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct txgbe_rss_filter_list filter_rss_list;
+static struct txgbe_flow_mem_list txgbe_flow_list;
+
 /**
  * Endless loop will never happen with below assumption
  * 1. there is at least one no-void item(END)
@@ -2502,3 +2555,77 @@ txgbe_parse_rss_filter(struct rte_eth_dev *dev,
return 0;
 }
 
+void
+txgbe_filterlist_init(void)
+{
+   TAILQ_INIT(&filter_ntuple_list);
+   TAILQ_INIT(&filter_ethertype_list);
+   TAILQ_INIT(&filter_syn_list);
+   TAILQ_INIT(&filter_fdir_list);
+   TAILQ_INIT(&filter_l2_tunnel_list);
+   TAILQ_INIT(&filter_rss_list);
+   TAILQ_INIT(&txgbe_flow_list);
+}
+
+void
+txgbe_filterlis

[dpdk-dev] [PATCH 24/37] net/txgbe: add flow API destroy function

2020-11-03 Thread Jiawen Wu
Add support to destroy operation for flow API.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_flow.c | 128 +
 1 file changed, 128 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 4141352bf..8d5362175 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2942,8 +2942,136 @@ txgbe_flow_validate(struct rte_eth_dev *dev,
return ret;
 }
 
+/* Destroy a flow rule on txgbe. */
+static int
+txgbe_flow_destroy(struct rte_eth_dev *dev,
+   struct rte_flow *flow,
+   struct rte_flow_error *error)
+{
+   int ret;
+   struct rte_flow *pmd_flow = flow;
+   enum rte_filter_type filter_type = pmd_flow->filter_type;
+   struct rte_eth_ntuple_filter ntuple_filter;
+   struct rte_eth_ethertype_filter ethertype_filter;
+   struct rte_eth_syn_filter syn_filter;
+   struct txgbe_fdir_rule fdir_rule;
+   struct txgbe_l2_tunnel_conf l2_tn_filter;
+   struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+   struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+   struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+   struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+   struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+   struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+   struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+   struct txgbe_rss_conf_ele *rss_filter_ptr;
+
+   switch (filter_type) {
+   case RTE_ETH_FILTER_NTUPLE:
+   ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
+   pmd_flow->rule;
+   rte_memcpy(&ntuple_filter,
+   &ntuple_filter_ptr->filter_info,
+   sizeof(struct rte_eth_ntuple_filter));
+   ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
+   if (!ret) {
+   TAILQ_REMOVE(&filter_ntuple_list,
+   ntuple_filter_ptr, entries);
+   rte_free(ntuple_filter_ptr);
+   }
+   break;
+   case RTE_ETH_FILTER_ETHERTYPE:
+   ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
+   pmd_flow->rule;
+   rte_memcpy(ðertype_filter,
+   ðertype_filter_ptr->filter_info,
+   sizeof(struct rte_eth_ethertype_filter));
+   ret = txgbe_add_del_ethertype_filter(dev,
+   ðertype_filter, FALSE);
+   if (!ret) {
+   TAILQ_REMOVE(&filter_ethertype_list,
+   ethertype_filter_ptr, entries);
+   rte_free(ethertype_filter_ptr);
+   }
+   break;
+   case RTE_ETH_FILTER_SYN:
+   syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
+   pmd_flow->rule;
+   rte_memcpy(&syn_filter,
+   &syn_filter_ptr->filter_info,
+   sizeof(struct rte_eth_syn_filter));
+   ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
+   if (!ret) {
+   TAILQ_REMOVE(&filter_syn_list,
+   syn_filter_ptr, entries);
+   rte_free(syn_filter_ptr);
+   }
+   break;
+   case RTE_ETH_FILTER_FDIR:
+   fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
+   rte_memcpy(&fdir_rule,
+   &fdir_rule_ptr->filter_info,
+   sizeof(struct txgbe_fdir_rule));
+   ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
+   if (!ret) {
+   TAILQ_REMOVE(&filter_fdir_list,
+   fdir_rule_ptr, entries);
+   rte_free(fdir_rule_ptr);
+   if (TAILQ_EMPTY(&filter_fdir_list))
+   fdir_info->mask_added = false;
+   }
+   break;
+   case RTE_ETH_FILTER_L2_TUNNEL:
+   l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
+   pmd_flow->rule;
+   rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
+   sizeof(struct txgbe_l2_tunnel_conf));
+   ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
+   if (!ret) {
+   TAILQ_REMOVE(&filter_l2_tunnel_list,
+   l2_tn_filter_ptr, entries);
+   rte_free(l2_tn_filter_ptr);
+   }
+   break;
+   case RTE_ETH_FILTER_HASH:
+   rss_filter_ptr = (struct txgbe_rss_conf_ele *)
+   pmd_flow->rule;
+   ret = txgbe_config_rss_filter(dev,
+   

[dpdk-dev] [PATCH 25/37] net/txgbe: add flow API flush function

2020-11-03 Thread Jiawen Wu
Add support to flush operation for flow API.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/base/txgbe_hw.c | 87 +++
 drivers/net/txgbe/base/txgbe_hw.h |  1 +
 drivers/net/txgbe/txgbe_ethdev.c  | 21 
 drivers/net/txgbe/txgbe_ethdev.h  |  2 +
 drivers/net/txgbe/txgbe_fdir.c| 47 +
 drivers/net/txgbe/txgbe_flow.c| 43 +++
 6 files changed, 201 insertions(+)

diff --git a/drivers/net/txgbe/base/txgbe_hw.c 
b/drivers/net/txgbe/base/txgbe_hw.c
index 5ee13b0f8..dc419d7d4 100644
--- a/drivers/net/txgbe/base/txgbe_hw.c
+++ b/drivers/net/txgbe/base/txgbe_hw.c
@@ -3649,6 +3649,93 @@ s32 txgbe_reset_hw(struct txgbe_hw *hw)
return status;
 }
 
+/**
+ * txgbe_fdir_check_cmd_complete - poll to check whether FDIRPICMD is complete
+ * @hw: pointer to hardware structure
+ * @fdircmd: current value of FDIRCMD register
+ */
+static s32 txgbe_fdir_check_cmd_complete(struct txgbe_hw *hw, u32 *fdircmd)
+{
+   int i;
+
+   for (i = 0; i < TXGBE_FDIRCMD_CMD_POLL; i++) {
+   *fdircmd = rd32(hw, TXGBE_FDIRPICMD);
+   if (!(*fdircmd & TXGBE_FDIRPICMD_OP_MASK))
+   return 0;
+   usec_delay(10);
+   }
+
+   return TXGBE_ERR_FDIR_CMD_INCOMPLETE;
+}
+
+/**
+ *  txgbe_reinit_fdir_tables - Reinitialize Flow Director tables.
+ *  @hw: pointer to hardware structure
+ **/
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw)
+{
+   s32 err;
+   int i;
+   u32 fdirctrl = rd32(hw, TXGBE_FDIRCTL);
+   u32 fdircmd;
+   fdirctrl &= ~TXGBE_FDIRCTL_INITDONE;
+
+   DEBUGFUNC("txgbe_reinit_fdir_tables");
+
+   /*
+* Before starting reinitialization process,
+* FDIRPICMD.OP must be zero.
+*/
+   err = txgbe_fdir_check_cmd_complete(hw, &fdircmd);
+   if (err) {
+   DEBUGOUT("Flow Director previous command did not complete, 
aborting table re-initialization.\n");
+   return err;
+   }
+
+   wr32(hw, TXGBE_FDIRFREE, 0);
+   txgbe_flush(hw);
+   /*
+* adapters flow director init flow cannot be restarted,
+* Workaround silicon errata by performing the following steps
+* before re-writing the FDIRCTL control register with the same value.
+* - write 1 to bit 8 of FDIRPICMD register &
+* - write 0 to bit 8 of FDIRPICMD register
+*/
+   wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, TXGBE_FDIRPICMD_CLR);
+   txgbe_flush(hw);
+   wr32m(hw, TXGBE_FDIRPICMD, TXGBE_FDIRPICMD_CLR, 0);
+   txgbe_flush(hw);
+   /*
+* Clear FDIR Hash register to clear any leftover hashes
+* waiting to be programmed.
+*/
+   wr32(hw, TXGBE_FDIRPIHASH, 0x00);
+   txgbe_flush(hw);
+
+   wr32(hw, TXGBE_FDIRCTL, fdirctrl);
+   txgbe_flush(hw);
+
+   /* Poll init-done after we write FDIRCTL register */
+   for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+   if (rd32m(hw, TXGBE_FDIRCTL, TXGBE_FDIRCTL_INITDONE))
+   break;
+   msec_delay(1);
+   }
+   if (i >= TXGBE_FDIR_INIT_DONE_POLL) {
+   DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+   return TXGBE_ERR_FDIR_REINIT_FAILED;
+   }
+
+   /* Clear FDIR statistics registers (read to clear) */
+   rd32(hw, TXGBE_FDIRUSED);
+   rd32(hw, TXGBE_FDIRFAIL);
+   rd32(hw, TXGBE_FDIRMATCH);
+   rd32(hw, TXGBE_FDIRMISS);
+   rd32(hw, TXGBE_FDIRLEN);
+
+   return 0;
+}
+
 /**
  *  txgbe_start_hw_raptor - Prepare hardware for Tx/Rx
  *  @hw: pointer to hardware structure
diff --git a/drivers/net/txgbe/base/txgbe_hw.h 
b/drivers/net/txgbe/base/txgbe_hw.h
index 09298ea0c..a7473e7e5 100644
--- a/drivers/net/txgbe/base/txgbe_hw.h
+++ b/drivers/net/txgbe/base/txgbe_hw.h
@@ -108,5 +108,6 @@ s32 txgbe_init_phy_raptor(struct txgbe_hw *hw);
 s32 txgbe_enable_rx_dma_raptor(struct txgbe_hw *hw, u32 regval);
 s32 txgbe_prot_autoc_read_raptor(struct txgbe_hw *hw, bool *locked, u64 
*value);
 s32 txgbe_prot_autoc_write_raptor(struct txgbe_hw *hw, bool locked, u64 value);
+s32 txgbe_reinit_fdir_tables(struct txgbe_hw *hw);
 bool txgbe_verify_lesm_fw_enabled_raptor(struct txgbe_hw *hw);
 #endif /* _TXGBE_HW_H_ */
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index cc061e0d6..83c078a2a 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5186,6 +5186,27 @@ txgbe_clear_syn_filter(struct rte_eth_dev *dev)
}
 }
 
+/* remove all the L2 tunnel filters */
+int
+txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
+{
+   struct txgbe_l2_tn_info *l2_tn_info = TXGBE_DEV_L2_TN(dev);
+   struct txgbe_l2_tn_filter *l2_tn_filter;
+   struct txgbe_l2_tunnel_conf l2_tn_conf;
+   int ret = 0;
+
+   while ((l2_tn_filter = TAILQ_FIRST(&l2_tn_info->l2_tn_list))) {
+   l2_tn_conf.l2_tunnel_type = l2_

[dpdk-dev] [PATCH 23/37] net/txgbe: add flow API create function

2020-11-03 Thread Jiawen Wu
Add support to create operation for flow API.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.h |   2 +
 drivers/net/txgbe/txgbe_fdir.c   |  27 
 drivers/net/txgbe/txgbe_flow.c   | 257 +++
 3 files changed, 286 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2b5c0e35d..f439a201b 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -464,6 +464,8 @@ void txgbe_set_ivar_map(struct txgbe_hw *hw, int8_t 
direction,
  */
 int txgbe_fdir_configure(struct rte_eth_dev *dev);
 int txgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+   uint16_t offset);
 int txgbe_fdir_filter_program(struct rte_eth_dev *dev,
  struct txgbe_fdir_rule *rule,
  bool del, bool update);
diff --git a/drivers/net/txgbe/txgbe_fdir.c b/drivers/net/txgbe/txgbe_fdir.c
index 2faf7fd84..2342cf681 100644
--- a/drivers/net/txgbe/txgbe_fdir.c
+++ b/drivers/net/txgbe/txgbe_fdir.c
@@ -270,6 +270,33 @@ txgbe_fdir_store_input_mask(struct rte_eth_dev *dev)
return 0;
 }
 
+int
+txgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev,
+   uint16_t offset)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   int i;
+
+   for (i = 0; i < 64; i++) {
+   uint32_t flexreg, flex;
+   flexreg = rd32(hw, TXGBE_FDIRFLEXCFG(i / 4));
+   flex = TXGBE_FDIRFLEXCFG_BASE_MAC;
+   flex |= TXGBE_FDIRFLEXCFG_OFST(offset / 2);
+   flexreg &= ~(TXGBE_FDIRFLEXCFG_ALL(~0UL, i % 4));
+   flexreg |= TXGBE_FDIRFLEXCFG_ALL(flex, i % 4);
+   wr32(hw, TXGBE_FDIRFLEXCFG(i / 4), flexreg);
+   }
+
+   txgbe_flush(hw);
+   for (i = 0; i < TXGBE_FDIR_INIT_DONE_POLL; i++) {
+   if (rd32(hw, TXGBE_FDIRCTL) &
+   TXGBE_FDIRCTL_INITDONE)
+   break;
+   msec_delay(1);
+   }
+   return 0;
+}
+
 /*
  * txgbe_check_fdir_flex_conf -check if the flex payload and mask configuration
  * arguments are valid
diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index 884a8545f..4141352bf 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -2629,6 +2629,262 @@ txgbe_filterlist_flush(void)
}
 }
 
+/**
+ * Create or destroy a flow rule.
+ * Theorically one rule can match more than one filters.
+ * We will let it use the filter which it hitt first.
+ * So, the sequence matters.
+ */
+static struct rte_flow *
+txgbe_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+   int ret;
+   struct rte_eth_ntuple_filter ntuple_filter;
+   struct rte_eth_ethertype_filter ethertype_filter;
+   struct rte_eth_syn_filter syn_filter;
+   struct txgbe_fdir_rule fdir_rule;
+   struct txgbe_l2_tunnel_conf l2_tn_filter;
+   struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
+   struct txgbe_rte_flow_rss_conf rss_conf;
+   struct rte_flow *flow = NULL;
+   struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
+   struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
+   struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
+   struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
+   struct txgbe_fdir_rule_ele *fdir_rule_ptr;
+   struct txgbe_rss_conf_ele *rss_filter_ptr;
+   struct txgbe_flow_mem *txgbe_flow_mem_ptr;
+   uint8_t first_mask = FALSE;
+
+   flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
+   if (!flow) {
+   PMD_DRV_LOG(ERR, "failed to allocate memory");
+   return (struct rte_flow *)flow;
+   }
+   txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
+   sizeof(struct txgbe_flow_mem), 0);
+   if (!txgbe_flow_mem_ptr) {
+   PMD_DRV_LOG(ERR, "failed to allocate memory");
+   rte_free(flow);
+   return NULL;
+   }
+   txgbe_flow_mem_ptr->flow = flow;
+   TAILQ_INSERT_TAIL(&txgbe_flow_list,
+   txgbe_flow_mem_ptr, entries);
+
+   memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
+   ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
+   actions, &ntuple_filter, error);
+
+   if (!ret) {
+   ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
+   if (!ret) {
+   ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
+   sizeof(struct txgbe_ntuple_filter_ele), 0);
+   if (!ntuple_filter_ptr)

[dpdk-dev] [PATCH 27/37] net/txgbe: add TM configuration init and uninit

2020-11-03 Thread Jiawen Wu
Add traffic manager configuration init and uninit operations.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/meson.build|  1 +
 drivers/net/txgbe/txgbe_ethdev.c | 16 +
 drivers/net/txgbe/txgbe_ethdev.h | 60 
 drivers/net/txgbe/txgbe_tm.c | 57 ++
 4 files changed, 134 insertions(+)
 create mode 100644 drivers/net/txgbe/txgbe_tm.c

diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index bb1683631..352baad8b 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -11,6 +11,7 @@ sources = files(
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
+   'txgbe_tm.c',
 )
 
 deps += ['hash']
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 337ebf2e0..2b73abae6 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -704,6 +704,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* initialize bandwidth configuration info */
memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
+   /* initialize Traffic Manager configuration */
+   txgbe_tm_conf_init(eth_dev);
+
return 0;
 }
 
@@ -1545,6 +1548,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
int status;
uint16_t vf, idx;
uint32_t *link_speeds;
+   struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
PMD_INIT_FUNC_TRACE();
 
@@ -1748,6 +1752,11 @@ txgbe_dev_start(struct rte_eth_dev *dev)
txgbe_l2_tunnel_conf(dev);
txgbe_filter_restore(dev);
 
+   if (tm_conf->root && !tm_conf->committed)
+   PMD_DRV_LOG(WARNING,
+   "please call hierarchy_commit() "
+   "before starting the port");
+
/*
 * Update link status right before return, because it may
 * start link configuration process in a separate thread.
@@ -1780,6 +1789,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
int vf;
+   struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
 
if (hw->adapter_stopped)
return 0;
@@ -1832,6 +1842,9 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
intr_handle->intr_vec = NULL;
}
 
+   /* reset hierarchy commit */
+   tm_conf->committed = false;
+
adapter->rss_reta_updated = 0;
wr32m(hw, TXGBE_LEDCTL, 0x, TXGBE_LEDCTL_SEL_MASK);
 
@@ -1947,6 +1960,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* clear all the filters list */
txgbe_filterlist_flush();
 
+   /* Remove all Traffic Manager configuration */
+   txgbe_tm_conf_uninit(dev);
+
return ret;
 }
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1df74ab9b..0f1a39918 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 
 /* need update link, bit flag */
 #define TXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
@@ -273,6 +274,60 @@ struct txgbe_bw_conf {
uint8_t tc_num; /* Number of TCs. */
 };
 
+/* Struct to store Traffic Manager shaper profile. */
+struct txgbe_tm_shaper_profile {
+   TAILQ_ENTRY(txgbe_tm_shaper_profile) node;
+   uint32_t shaper_profile_id;
+   uint32_t reference_count;
+   struct rte_tm_shaper_params profile;
+};
+
+TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
+
+/* Struct to store Traffic Manager node configuration. */
+struct txgbe_tm_node {
+   TAILQ_ENTRY(txgbe_tm_node) node;
+   uint32_t id;
+   uint32_t priority;
+   uint32_t weight;
+   uint32_t reference_count;
+   uint16_t no;
+   struct txgbe_tm_node *parent;
+   struct txgbe_tm_shaper_profile *shaper_profile;
+   struct rte_tm_node_params params;
+};
+
+TAILQ_HEAD(txgbe_tm_node_list, txgbe_tm_node);
+
+/* The configuration of Traffic Manager */
+struct txgbe_tm_conf {
+   struct txgbe_shaper_profile_list shaper_profile_list;
+   struct txgbe_tm_node *root; /* root node - port */
+   struct txgbe_tm_node_list tc_list; /* node list for all the TCs */
+   struct txgbe_tm_node_list queue_list; /* node list for all the queues */
+   /**
+* The number of added TC nodes.
+* It should be no more than the TC number of this port.
+*/
+   uint32_t nb_tc_node;
+   /**
+* The number of added queue nodes.
+* It should be no more than the queue number of this port.
+*/
+   uint32_t nb_queue_node;
+   /**
+* This flag is used to check if APP can change the TM node
+* configuration.
+* When it's true, means the configuration is applied to HW,
+* APP should not change the configu

[dpdk-dev] [PATCH 26/37] net/txgbe: support UDP tunnel port add and delete

2020-11-03 Thread Jiawen Wu
Support UDP tunnel port add and delete operations.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c | 105 +++
 1 file changed, 105 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 83c078a2a..337ebf2e0 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5038,6 +5038,109 @@ txgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, 
bool en)
return ret;
 }
 
+/* Add UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   int ret = 0;
+
+   if (udp_tunnel == NULL)
+   return -EINVAL;
+
+   switch (udp_tunnel->prot_type) {
+   case RTE_TUNNEL_TYPE_VXLAN:
+   if (udp_tunnel->udp_port == 0) {
+   PMD_DRV_LOG(ERR, "Add VxLAN port 0 is not allowed.");
+   ret = -EINVAL;
+   break;
+   }
+   wr32(hw, TXGBE_VXLANPORT, udp_tunnel->udp_port);
+   wr32(hw, TXGBE_VXLANPORTGPE, udp_tunnel->udp_port);
+   break;
+   case RTE_TUNNEL_TYPE_GENEVE:
+   if (udp_tunnel->udp_port == 0) {
+   PMD_DRV_LOG(ERR, "Add Geneve port 0 is not allowed.");
+   ret = -EINVAL;
+   break;
+   }
+   wr32(hw, TXGBE_GENEVEPORT, udp_tunnel->udp_port);
+   break;
+   case RTE_TUNNEL_TYPE_TEREDO:
+   if (udp_tunnel->udp_port == 0) {
+   PMD_DRV_LOG(ERR, "Add Teredo port 0 is not allowed.");
+   ret = -EINVAL;
+   break;
+   }
+   wr32(hw, TXGBE_TEREDOPORT, udp_tunnel->udp_port);
+   break;
+   default:
+   PMD_DRV_LOG(ERR, "Invalid tunnel type");
+   ret = -EINVAL;
+   break;
+   }
+
+   txgbe_flush(hw);
+
+   return ret;
+}
+
+/* Remove UDP tunneling port */
+static int
+txgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
+ struct rte_eth_udp_tunnel *udp_tunnel)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   int ret = 0;
+   uint16_t cur_port;
+
+   if (udp_tunnel == NULL)
+   return -EINVAL;
+
+   switch (udp_tunnel->prot_type) {
+   case RTE_TUNNEL_TYPE_VXLAN:
+   cur_port = (uint16_t)rd32(hw, TXGBE_VXLANPORT);
+   if (cur_port != udp_tunnel->udp_port) {
+   PMD_DRV_LOG(ERR, "Port %u does not exist.",
+   udp_tunnel->udp_port);
+   ret = -EINVAL;
+   break;
+   }
+   wr32(hw, TXGBE_VXLANPORT, 0);
+   wr32(hw, TXGBE_VXLANPORTGPE, 0);
+   break;
+   case RTE_TUNNEL_TYPE_GENEVE:
+   cur_port = (uint16_t)rd32(hw, TXGBE_GENEVEPORT);
+   if (cur_port != udp_tunnel->udp_port) {
+   PMD_DRV_LOG(ERR, "Port %u does not exist.",
+   udp_tunnel->udp_port);
+   ret = -EINVAL;
+   break;
+   }
+   wr32(hw, TXGBE_GENEVEPORT, 0);
+   break;
+   case RTE_TUNNEL_TYPE_TEREDO:
+   cur_port = (uint16_t)rd32(hw, TXGBE_TEREDOPORT);
+   if (cur_port != udp_tunnel->udp_port) {
+   PMD_DRV_LOG(ERR, "Port %u does not exist.",
+   udp_tunnel->udp_port);
+   ret = -EINVAL;
+   break;
+   }
+   wr32(hw, TXGBE_TEREDOPORT, 0);
+   break;
+   default:
+   PMD_DRV_LOG(ERR, "Invalid tunnel type");
+   ret = -EINVAL;
+   break;
+   }
+
+   txgbe_flush(hw);
+
+   return ret;
+}
+
 /* restore n-tuple filter */
 static inline void
 txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -5281,6 +5384,8 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.timesync_adjust_time   = txgbe_timesync_adjust_time,
.timesync_read_time = txgbe_timesync_read_time,
.timesync_write_time= txgbe_timesync_write_time,
+   .udp_tunnel_port_add= txgbe_dev_udp_tunnel_port_add,
+   .udp_tunnel_port_del= txgbe_dev_udp_tunnel_port_del,
.tx_done_cleanup= txgbe_dev_tx_done_cleanup,
 };
 
-- 
2.18.4





[dpdk-dev] [PATCH 28/37] net/txgbe: add TM capabilities get operation

2020-11-03 Thread Jiawen Wu
Add support to get traffic manager capabilities.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.c |   1 +
 drivers/net/txgbe/txgbe_ethdev.h |   9 +
 drivers/net/txgbe/txgbe_tm.c | 278 +++
 3 files changed, 288 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 2b73abae6..80ea1038c 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -5402,6 +5402,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
.timesync_write_time= txgbe_timesync_write_time,
.udp_tunnel_port_add= txgbe_dev_udp_tunnel_port_add,
.udp_tunnel_port_del= txgbe_dev_udp_tunnel_port_del,
+   .tm_ops_get = txgbe_tm_ops_get,
.tx_done_cleanup= txgbe_dev_tx_done_cleanup,
 };
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 0f1a39918..cf377809e 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -284,6 +284,14 @@ struct txgbe_tm_shaper_profile {
 
 TAILQ_HEAD(txgbe_shaper_profile_list, txgbe_tm_shaper_profile);
 
+/* node type of Traffic Manager */
+enum txgbe_tm_node_type {
+   TXGBE_TM_NODE_TYPE_PORT,
+   TXGBE_TM_NODE_TYPE_TC,
+   TXGBE_TM_NODE_TYPE_QUEUE,
+   TXGBE_TM_NODE_TYPE_MAX,
+};
+
 /* Struct to store Traffic Manager node configuration. */
 struct txgbe_tm_node {
TAILQ_ENTRY(txgbe_tm_node) node;
@@ -558,6 +566,7 @@ int txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
 int txgbe_vt_check(struct txgbe_hw *hw);
 int txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
uint16_t tx_rate, uint64_t q_msk);
+int txgbe_tm_ops_get(struct rte_eth_dev *dev, void *ops);
 void txgbe_tm_conf_init(struct rte_eth_dev *dev);
 void txgbe_tm_conf_uninit(struct rte_eth_dev *dev);
 int txgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx,
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 78f426964..545590ba2 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -6,6 +6,36 @@
 
 #include "txgbe_ethdev.h"
 
+static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+struct rte_tm_capabilities *cap,
+struct rte_tm_error *error);
+static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
+   uint32_t level_id,
+   struct rte_tm_level_capabilities *cap,
+   struct rte_tm_error *error);
+static int txgbe_node_capabilities_get(struct rte_eth_dev *dev,
+  uint32_t node_id,
+  struct rte_tm_node_capabilities *cap,
+  struct rte_tm_error *error);
+
+const struct rte_tm_ops txgbe_tm_ops = {
+   .capabilities_get = txgbe_tm_capabilities_get,
+   .level_capabilities_get = txgbe_level_capabilities_get,
+   .node_capabilities_get = txgbe_node_capabilities_get,
+};
+
+int
+txgbe_tm_ops_get(struct rte_eth_dev *dev __rte_unused,
+void *arg)
+{
+   if (!arg)
+   return -EINVAL;
+
+   *(const void **)arg = &txgbe_tm_ops;
+
+   return 0;
+}
+
 void
 txgbe_tm_conf_init(struct rte_eth_dev *dev)
 {
@@ -55,3 +85,251 @@ txgbe_tm_conf_uninit(struct rte_eth_dev *dev)
}
 }
 
+static inline uint8_t
+txgbe_tc_nb_get(struct rte_eth_dev *dev)
+{
+   struct rte_eth_conf *eth_conf;
+   uint8_t nb_tcs = 0;
+
+   eth_conf = &dev->data->dev_conf;
+   if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+   nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+   } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+   if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+   ETH_32_POOLS)
+   nb_tcs = ETH_4_TCS;
+   else
+   nb_tcs = ETH_8_TCS;
+   } else {
+   nb_tcs = 1;
+   }
+
+   return nb_tcs;
+}
+
+static int
+txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
+ struct rte_tm_capabilities *cap,
+ struct rte_tm_error *error)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   uint8_t tc_nb = txgbe_tc_nb_get(dev);
+
+   if (!cap || !error)
+   return -EINVAL;
+
+   if (tc_nb > hw->mac.max_tx_queues)
+   return -EINVAL;
+
+   error->type = RTE_TM_ERROR_TYPE_NONE;
+
+   /* set all the parameters to 0 first. */
+   memset(cap, 0, sizeof(struct rte_tm_capabilities));
+
+   /**
+* here is the max capability not the current configuration.
+*/
+   /* port + TCs + queues */
+   cap->n_nodes_max = 1 + TXGBE_DCB_TC_MAX +
+

[dpdk-dev] [PATCH 31/37] net/txgbe: add TM hierarchy commit

2020-11-03 Thread Jiawen Wu
Add traffic manager hierarchy commit.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_tm.c | 70 
 1 file changed, 70 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 6dd593e54..b8edd78bf 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -33,6 +33,9 @@ static int txgbe_node_capabilities_get(struct rte_eth_dev 
*dev,
   uint32_t node_id,
   struct rte_tm_node_capabilities *cap,
   struct rte_tm_error *error);
+static int txgbe_hierarchy_commit(struct rte_eth_dev *dev,
+ int clear_on_fail,
+ struct rte_tm_error *error);
 
 const struct rte_tm_ops txgbe_tm_ops = {
.capabilities_get = txgbe_tm_capabilities_get,
@@ -43,6 +46,7 @@ const struct rte_tm_ops txgbe_tm_ops = {
.node_type_get = txgbe_node_type_get,
.level_capabilities_get = txgbe_level_capabilities_get,
.node_capabilities_get = txgbe_node_capabilities_get,
+   .hierarchy_commit = txgbe_hierarchy_commit,
 };
 
 int
@@ -950,3 +954,69 @@ txgbe_node_capabilities_get(struct rte_eth_dev *dev,
return 0;
 }
 
+static int
+txgbe_hierarchy_commit(struct rte_eth_dev *dev,
+  int clear_on_fail,
+  struct rte_tm_error *error)
+{
+   struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+   struct txgbe_tm_node *tm_node;
+   uint64_t bw;
+   int ret;
+
+   if (!error)
+   return -EINVAL;
+
+   /* check the setting */
+   if (!tm_conf->root)
+   goto done;
+
+   /* not support port max bandwidth yet */
+   if (tm_conf->root->shaper_profile &&
+   tm_conf->root->shaper_profile->profile.peak.rate) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "no port max bandwidth";
+   goto fail_clear;
+   }
+
+   /* HW not support TC max bandwidth */
+   TAILQ_FOREACH(tm_node, &tm_conf->tc_list, node) {
+   if (tm_node->shaper_profile &&
+   tm_node->shaper_profile->profile.peak.rate) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message = "no TC max bandwidth";
+   goto fail_clear;
+   }
+   }
+
+   /* queue max bandwidth */
+   TAILQ_FOREACH(tm_node, &tm_conf->queue_list, node) {
+   if (tm_node->shaper_profile)
+   bw = tm_node->shaper_profile->profile.peak.rate;
+   else
+   bw = 0;
+   if (bw) {
+   /* interpret Bps to Mbps */
+   bw = bw * 8 / 1000 / 1000;
+   ret = txgbe_set_queue_rate_limit(dev, tm_node->no, bw);
+   if (ret) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE;
+   error->message =
+   "failed to set queue max bandwidth";
+   goto fail_clear;
+   }
+   }
+   }
+
+done:
+   tm_conf->committed = true;
+   return 0;
+
+fail_clear:
+   /* clear all the traffic manager configuration */
+   if (clear_on_fail) {
+   txgbe_tm_conf_uninit(dev);
+   txgbe_tm_conf_init(dev);
+   }
+   return -EINVAL;
+}
-- 
2.18.4





[dpdk-dev] [PATCH 30/37] net/txgbe: support TM node add and delete

2020-11-03 Thread Jiawen Wu
Support traffic manager node add and delete operations.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.h |   1 +
 drivers/net/txgbe/txgbe_tm.c | 488 +++
 2 files changed, 489 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index cf377809e..1d90c6a06 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -14,6 +14,7 @@
 #include 
 #include 
 #include 
+#include 
 #include 
 
 /* need update link, bit flag */
diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 8adb03825..6dd593e54 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -16,6 +16,15 @@ static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
 static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
uint32_t shaper_profile_id,
struct rte_tm_error *error);
+static int txgbe_node_add(struct rte_eth_dev *dev, uint32_t node_id,
+ uint32_t parent_node_id, uint32_t priority,
+ uint32_t weight, uint32_t level_id,
+ struct rte_tm_node_params *params,
+ struct rte_tm_error *error);
+static int txgbe_node_delete(struct rte_eth_dev *dev, uint32_t node_id,
+struct rte_tm_error *error);
+static int txgbe_node_type_get(struct rte_eth_dev *dev, uint32_t node_id,
+  int *is_leaf, struct rte_tm_error *error);
 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
@@ -29,6 +38,9 @@ const struct rte_tm_ops txgbe_tm_ops = {
.capabilities_get = txgbe_tm_capabilities_get,
.shaper_profile_add = txgbe_shaper_profile_add,
.shaper_profile_delete = txgbe_shaper_profile_del,
+   .node_add = txgbe_node_add,
+   .node_delete = txgbe_node_delete,
+   .node_type_get = txgbe_node_type_get,
.level_capabilities_get = txgbe_level_capabilities_get,
.node_capabilities_get = txgbe_node_capabilities_get,
 };
@@ -332,6 +344,482 @@ txgbe_tm_node_search(struct rte_eth_dev *dev, uint32_t 
node_id,
return NULL;
 }
 
+static void
+txgbe_queue_base_nb_get(struct rte_eth_dev *dev, uint16_t tc_node_no,
+   uint16_t *base, uint16_t *nb)
+{
+   uint8_t nb_tcs = txgbe_tc_nb_get(dev);
+   struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+   uint16_t vf_num = pci_dev->max_vfs;
+
+   *base = 0;
+   *nb = 0;
+
+   /* VT on */
+   if (vf_num) {
+   /* no DCB */
+   if (nb_tcs == 1) {
+   if (vf_num >= ETH_32_POOLS) {
+   *nb = 2;
+   *base = vf_num * 2;
+   } else if (vf_num >= ETH_16_POOLS) {
+   *nb = 4;
+   *base = vf_num * 4;
+   } else {
+   *nb = 8;
+   *base = vf_num * 8;
+   }
+   } else {
+   /* DCB */
+   *nb = 1;
+   *base = vf_num * nb_tcs + tc_node_no;
+   }
+   } else {
+   /* VT off */
+   if (nb_tcs == ETH_8_TCS) {
+   switch (tc_node_no) {
+   case 0:
+   *base = 0;
+   *nb = 32;
+   break;
+   case 1:
+   *base = 32;
+   *nb = 32;
+   break;
+   case 2:
+   *base = 64;
+   *nb = 16;
+   break;
+   case 3:
+   *base = 80;
+   *nb = 16;
+   break;
+   case 4:
+   *base = 96;
+   *nb = 8;
+   break;
+   case 5:
+   *base = 104;
+   *nb = 8;
+   break;
+   case 6:
+   *base = 112;
+   *nb = 8;
+   break;
+   case 7:
+   *base = 120;
+   *nb = 8;
+   break;
+   default:
+   return;
+   }
+   } else {
+   switch 

[dpdk-dev] [PATCH 29/37] net/txgbe: support TM shaper profile add and delete

2020-11-03 Thread Jiawen Wu
Support traffic manager profile add and delete operations.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_tm.c | 129 +++
 1 file changed, 129 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_tm.c b/drivers/net/txgbe/txgbe_tm.c
index 545590ba2..8adb03825 100644
--- a/drivers/net/txgbe/txgbe_tm.c
+++ b/drivers/net/txgbe/txgbe_tm.c
@@ -9,6 +9,13 @@
 static int txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
 struct rte_tm_capabilities *cap,
 struct rte_tm_error *error);
+static int txgbe_shaper_profile_add(struct rte_eth_dev *dev,
+   uint32_t shaper_profile_id,
+   struct rte_tm_shaper_params *profile,
+   struct rte_tm_error *error);
+static int txgbe_shaper_profile_del(struct rte_eth_dev *dev,
+   uint32_t shaper_profile_id,
+   struct rte_tm_error *error);
 static int txgbe_level_capabilities_get(struct rte_eth_dev *dev,
uint32_t level_id,
struct rte_tm_level_capabilities *cap,
@@ -20,6 +27,8 @@ static int txgbe_node_capabilities_get(struct rte_eth_dev 
*dev,
 
 const struct rte_tm_ops txgbe_tm_ops = {
.capabilities_get = txgbe_tm_capabilities_get,
+   .shaper_profile_add = txgbe_shaper_profile_add,
+   .shaper_profile_delete = txgbe_shaper_profile_del,
.level_capabilities_get = txgbe_level_capabilities_get,
.node_capabilities_get = txgbe_node_capabilities_get,
 };
@@ -174,6 +183,126 @@ txgbe_tm_capabilities_get(struct rte_eth_dev *dev,
return 0;
 }
 
+static inline struct txgbe_tm_shaper_profile *
+txgbe_shaper_profile_search(struct rte_eth_dev *dev,
+   uint32_t shaper_profile_id)
+{
+   struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+   struct txgbe_shaper_profile_list *shaper_profile_list =
+   &tm_conf->shaper_profile_list;
+   struct txgbe_tm_shaper_profile *shaper_profile;
+
+   TAILQ_FOREACH(shaper_profile, shaper_profile_list, node) {
+   if (shaper_profile_id == shaper_profile->shaper_profile_id)
+   return shaper_profile;
+   }
+
+   return NULL;
+}
+
+static int
+txgbe_shaper_profile_param_check(struct rte_tm_shaper_params *profile,
+struct rte_tm_error *error)
+{
+   /* min rate not supported */
+   if (profile->committed.rate) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_RATE;
+   error->message = "committed rate not supported";
+   return -EINVAL;
+   }
+   /* min bucket size not supported */
+   if (profile->committed.size) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_COMMITTED_SIZE;
+   error->message = "committed bucket size not supported";
+   return -EINVAL;
+   }
+   /* max bucket size not supported */
+   if (profile->peak.size) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PEAK_SIZE;
+   error->message = "peak bucket size not supported";
+   return -EINVAL;
+   }
+   /* length adjustment not supported */
+   if (profile->pkt_length_adjust) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_PKT_ADJUST_LEN;
+   error->message = "packet length adjustment not supported";
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static int
+txgbe_shaper_profile_add(struct rte_eth_dev *dev,
+uint32_t shaper_profile_id,
+struct rte_tm_shaper_params *profile,
+struct rte_tm_error *error)
+{
+   struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+   struct txgbe_tm_shaper_profile *shaper_profile;
+   int ret;
+
+   if (!profile || !error)
+   return -EINVAL;
+
+   ret = txgbe_shaper_profile_param_check(profile, error);
+   if (ret)
+   return ret;
+
+   shaper_profile = txgbe_shaper_profile_search(dev, shaper_profile_id);
+
+   if (shaper_profile) {
+   error->type = RTE_TM_ERROR_TYPE_SHAPER_PROFILE_ID;
+   error->message = "profile ID exist";
+   return -EINVAL;
+   }
+
+   shaper_profile = rte_zmalloc("txgbe_tm_shaper_profile",
+sizeof(struct txgbe_tm_shaper_profile),
+0);
+   if (!shaper_profile)
+   return -ENOMEM;
+   shaper_profile->shaper_profile_id = shaper_profile_id;
+   rte_memcpy(&shaper_profile->profile, profile,
+sizeof(struct rte_tm_shaper_params));
+   TAILQ_INSERT_TAIL(&tm_conf->shaper_profile_list,
+ shaper_profile, 

[dpdk-dev] [PATCH 34/37] net/txgbe: add security session create operation

2020-11-03 Thread Jiawen Wu
Add support to configure a security session.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ethdev.h |   6 +
 drivers/net/txgbe/txgbe_ipsec.c  | 250 +++
 drivers/net/txgbe/txgbe_ipsec.h  |  66 
 3 files changed, 322 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 1e7fa1f87..eb68e12b2 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -367,6 +367,9 @@ struct txgbe_adapter {
struct txgbe_filter_infofilter;
struct txgbe_l2_tn_info l2_tn;
struct txgbe_bw_confbw_conf;
+#ifdef RTE_LIB_SECURITY
+   struct txgbe_ipsec  ipsec;
+#endif
bool rx_bulk_alloc_allowed;
struct rte_timecounter  systime_tc;
struct rte_timecounter  rx_tstamp_tc;
@@ -428,6 +431,9 @@ struct txgbe_adapter {
 #define TXGBE_DEV_TM_CONF(dev) \
(&((struct txgbe_adapter *)(dev)->data->dev_private)->tm_conf)
 
+#define TXGBE_DEV_IPSEC(dev) \
+   (&((struct txgbe_adapter *)(dev)->data->dev_private)->ipsec)
+
 /*
  * RX/TX function prototypes
  */
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index b21bba237..7501e25af 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -13,6 +13,255 @@
 #include "txgbe_ethdev.h"
 #include "txgbe_ipsec.h"
 
+#define CMP_IP(a, b) (\
+   (a).ipv6[0] == (b).ipv6[0] && \
+   (a).ipv6[1] == (b).ipv6[1] && \
+   (a).ipv6[2] == (b).ipv6[2] && \
+   (a).ipv6[3] == (b).ipv6[3])
+
+static int
+txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
+{
+   struct rte_eth_dev *dev = ic_session->dev;
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+   uint32_t reg_val;
+   int sa_index = -1;
+
+   if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+   int i, ip_index = -1;
+   uint8_t *key;
+
+   /* Find a match in the IP table*/
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   if (CMP_IP(priv->rx_ip_tbl[i].ip,
+  ic_session->dst_ip)) {
+   ip_index = i;
+   break;
+   }
+   }
+   /* If no match, find a free entry in the IP table*/
+   if (ip_index < 0) {
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   if (priv->rx_ip_tbl[i].ref_count == 0) {
+   ip_index = i;
+   break;
+   }
+   }
+   }
+
+   /* Fail if no match and no free entries*/
+   if (ip_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "No free entry left in the Rx IP table\n");
+   return -1;
+   }
+
+   /* Find a free entry in the SA table*/
+   for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+   if (priv->rx_sa_tbl[i].used == 0) {
+   sa_index = i;
+   break;
+   }
+   }
+   /* Fail if no free entries*/
+   if (sa_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "No free entry left in the Rx SA table\n");
+   return -1;
+   }
+
+   priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+   ic_session->dst_ip.ipv6[0];
+   priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+   ic_session->dst_ip.ipv6[1];
+   priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+   ic_session->dst_ip.ipv6[2];
+   priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+   ic_session->dst_ip.ipv6[3];
+   priv->rx_ip_tbl[ip_index].ref_count++;
+
+   priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
+   priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+   priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+   if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION)
+   priv->rx_sa_tbl[sa_index].mode |=
+   (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+   if (ic_session->dst_ip.type == IPv6) {
+   priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+   priv->rx_ip_tbl[ip_index].ip.type = IPv6;
+   } else if (ic_session->dst_ip.type == IPv4) {
+   priv->rx_ip_tbl[ip_index].ip.type = IPv4;
+   }
+   priv->rx_sa_tbl[sa_index].used = 1;
+
+   /* write IP table entry*/
+  

[dpdk-dev] [PATCH 32/37] net/txgbe: add macsec setting

2020-11-03 Thread Jiawen Wu
Add macsec register enable and setting reset operations.
Add macsec offload suuport.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/txgbe.ini |  1 +
 drivers/net/txgbe/txgbe_ethdev.c   | 87 ++
 drivers/net/txgbe/txgbe_ethdev.h   | 17 ++
 drivers/net/txgbe/txgbe_rxtx.c |  3 ++
 4 files changed, 108 insertions(+)

diff --git a/doc/guides/nics/features/txgbe.ini 
b/doc/guides/nics/features/txgbe.ini
index 9db2ccde0..d6705f7aa 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -34,6 +34,7 @@ VLAN offload = P
 QinQ offload = P
 L3 checksum offload  = P
 L4 checksum offload  = P
+MACsec offload   = P
 Inner L3 checksum= P
 Inner L4 checksum= P
 Packet type parsing  = Y
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index 80ea1038c..c92a4aa5f 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -487,6 +487,8 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
 
PMD_INIT_FUNC_TRACE();
 
+   txgbe_dev_macsec_setting_reset(eth_dev);
+
eth_dev->dev_ops = &txgbe_eth_dev_ops;
eth_dev->rx_queue_count   = txgbe_dev_rx_queue_count;
eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
@@ -1549,6 +1551,8 @@ txgbe_dev_start(struct rte_eth_dev *dev)
uint16_t vf, idx;
uint32_t *link_speeds;
struct txgbe_tm_conf *tm_conf = TXGBE_DEV_TM_CONF(dev);
+   struct txgbe_macsec_setting *macsec_setting =
+   TXGBE_DEV_MACSEC_SETTING(dev);
 
PMD_INIT_FUNC_TRACE();
 
@@ -1763,6 +1767,10 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 */
txgbe_dev_link_update(dev, 0);
 
+   /* setup the macsec ctrl register */
+   if (macsec_setting->offload_en)
+   txgbe_dev_macsec_register_enable(dev, macsec_setting);
+
wr32m(hw, TXGBE_LEDCTL, 0x, TXGBE_LEDCTL_ORD_MASK);
 
txgbe_read_stats_registers(hw, hw_stats);
@@ -5326,6 +5334,85 @@ txgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
return 0;
 }
 
+void
+txgbe_dev_macsec_setting_reset(struct rte_eth_dev *dev)
+{
+   struct txgbe_macsec_setting *macsec = TXGBE_DEV_MACSEC_SETTING(dev);
+
+   macsec->offload_en = 0;
+   macsec->encrypt_en = 0;
+   macsec->replayprotect_en = 0;
+}
+
+void
+txgbe_dev_macsec_register_enable(struct rte_eth_dev *dev,
+   struct txgbe_macsec_setting *macsec_setting)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   uint32_t ctrl;
+   uint8_t en = macsec_setting->encrypt_en;
+   uint8_t rp = macsec_setting->replayprotect_en;
+
+   /**
+* Workaround:
+* As no txgbe_disable_sec_rx_path equivalent is
+* implemented for tx in the base code, and we are
+* not allowed to modify the base code in DPDK, so
+* just call the hand-written one directly for now.
+* The hardware support has been checked by
+* txgbe_disable_sec_rx_path().
+*/
+   txgbe_disable_sec_tx_path(hw);
+
+   /* Enable Ethernet CRC (required by MACsec offload) */
+   ctrl = rd32(hw, TXGBE_SECRXCTL);
+   ctrl |= TXGBE_SECRXCTL_CRCSTRIP;
+   wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+   /* Enable the TX and RX crypto engines */
+   ctrl = rd32(hw, TXGBE_SECTXCTL);
+   ctrl &= ~TXGBE_SECTXCTL_XDSA;
+   wr32(hw, TXGBE_SECTXCTL, ctrl);
+
+   ctrl = rd32(hw, TXGBE_SECRXCTL);
+   ctrl &= ~TXGBE_SECRXCTL_XDSA;
+   wr32(hw, TXGBE_SECRXCTL, ctrl);
+
+   ctrl = rd32(hw, TXGBE_SECTXIFG);
+   ctrl &= ~TXGBE_SECTXIFG_MIN_MASK;
+   ctrl |= TXGBE_SECTXIFG_MIN(0x3);
+   wr32(hw, TXGBE_SECTXIFG, ctrl);
+
+   /* Enable SA lookup */
+   ctrl = rd32(hw, TXGBE_LSECTXCTL);
+   ctrl &= ~TXGBE_LSECTXCTL_MODE_MASK;
+   ctrl |= en ? TXGBE_LSECTXCTL_MODE_AENC : TXGBE_LSECTXCTL_MODE_AUTH;
+   ctrl &= ~TXGBE_LSECTXCTL_PNTRH_MASK;
+   ctrl |= TXGBE_LSECTXCTL_PNTRH(TXGBE_MACSEC_PNTHRSH);
+   wr32(hw, TXGBE_LSECTXCTL, ctrl);
+
+   ctrl = rd32(hw, TXGBE_LSECRXCTL);
+   ctrl &= ~TXGBE_LSECRXCTL_MODE_MASK;
+   ctrl |= TXGBE_LSECRXCTL_MODE_STRICT;
+   ctrl &= ~TXGBE_LSECRXCTL_POSTHDR;
+   if (rp)
+   ctrl |= TXGBE_LSECRXCTL_REPLAY;
+   else
+   ctrl &= ~TXGBE_LSECRXCTL_REPLAY;
+   wr32(hw, TXGBE_LSECRXCTL, ctrl);
+
+   /* Start the data paths */
+   txgbe_enable_sec_rx_path(hw);
+   /**
+* Workaround:
+* As no txgbe_enable_sec_rx_path equivalent is
+* implemented for tx in the base code, and we are
+* not allowed to modify the base code in DPDK, so
+* just call the hand-written one directly for now.
+*/
+   txgbe_enable_sec_tx_path(hw);
+}
+
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
.dev_configure  = txgbe_d

[dpdk-dev] [PATCH 33/37] net/txgbe: add IPsec context creation

2020-11-03 Thread Jiawen Wu
Initialize securiry context, and add support to get
security capabilities.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/txgbe.ini |   1 +
 drivers/net/txgbe/meson.build  |   3 +-
 drivers/net/txgbe/txgbe_ethdev.c   |  13 +++
 drivers/net/txgbe/txgbe_ethdev.h   |   3 +
 drivers/net/txgbe/txgbe_ipsec.c| 181 +
 drivers/net/txgbe/txgbe_ipsec.h|  13 +++
 6 files changed, 213 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/txgbe/txgbe_ipsec.c
 create mode 100644 drivers/net/txgbe/txgbe_ipsec.h

diff --git a/doc/guides/nics/features/txgbe.ini 
b/doc/guides/nics/features/txgbe.ini
index d6705f7aa..54daa382a 100644
--- a/doc/guides/nics/features/txgbe.ini
+++ b/doc/guides/nics/features/txgbe.ini
@@ -29,6 +29,7 @@ Flow control = Y
 Flow API = Y
 Rate limitation  = Y
 Traffic mirroring= Y
+Inline crypto= Y
 CRC offload  = P
 VLAN offload = P
 QinQ offload = P
diff --git a/drivers/net/txgbe/meson.build b/drivers/net/txgbe/meson.build
index 352baad8b..f6a51a998 100644
--- a/drivers/net/txgbe/meson.build
+++ b/drivers/net/txgbe/meson.build
@@ -8,13 +8,14 @@ sources = files(
'txgbe_ethdev.c',
'txgbe_fdir.c',
'txgbe_flow.c',
+   'txgbe_ipsec.c',
'txgbe_ptypes.c',
'txgbe_pf.c',
'txgbe_rxtx.c',
'txgbe_tm.c',
 )
 
-deps += ['hash']
+deps += ['hash', 'security']
 
 includes += include_directories('base')
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.c b/drivers/net/txgbe/txgbe_ethdev.c
index c92a4aa5f..c0582e264 100644
--- a/drivers/net/txgbe/txgbe_ethdev.c
+++ b/drivers/net/txgbe/txgbe_ethdev.c
@@ -16,6 +16,9 @@
 #include 
 #include 
 #include 
+#ifdef RTE_LIB_SECURITY
+#include 
+#endif
 
 #include "txgbe_logs.h"
 #include "base/txgbe.h"
@@ -549,6 +552,12 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* Unlock any pending hardware semaphore */
txgbe_swfw_lock_reset(hw);
 
+#ifdef RTE_LIB_SECURITY
+   /* Initialize security_ctx only for primary process*/
+   if (txgbe_ipsec_ctx_create(eth_dev))
+   return -ENOMEM;
+#endif
+
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
txgbe_dcb_init(hw, dcb_config);
@@ -1971,6 +1980,10 @@ txgbe_dev_close(struct rte_eth_dev *dev)
/* Remove all Traffic Manager configuration */
txgbe_tm_conf_uninit(dev);
 
+#ifdef RTE_LIB_SECURITY
+   rte_free(dev->security_ctx);
+#endif
+
return ret;
 }
 
diff --git a/drivers/net/txgbe/txgbe_ethdev.h b/drivers/net/txgbe/txgbe_ethdev.h
index 2d15e1ac3..1e7fa1f87 100644
--- a/drivers/net/txgbe/txgbe_ethdev.h
+++ b/drivers/net/txgbe/txgbe_ethdev.h
@@ -9,6 +9,9 @@
 
 #include "base/txgbe.h"
 #include "txgbe_ptypes.h"
+#ifdef RTE_LIB_SECURITY
+#include "txgbe_ipsec.h"
+#endif
 #include 
 #include 
 #include 
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
new file mode 100644
index 0..b21bba237
--- /dev/null
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2020
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "base/txgbe.h"
+#include "txgbe_ethdev.h"
+#include "txgbe_ipsec.h"
+
+static const struct rte_security_capability *
+txgbe_crypto_capabilities_get(void *device __rte_unused)
+{
+   static const struct rte_cryptodev_capabilities
+   aes_gcm_gmac_crypto_capabilities[] = {
+   {   /* AES GMAC (128-bit) */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+   {.sym = {
+   .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+   {.auth = {
+   .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+   .block_size = 16,
+   .key_size = {
+   .min = 16,
+   .max = 16,
+   .increment = 0
+   },
+   .digest_size = {
+   .min = 16,
+   .max = 16,
+   .increment = 0
+   },
+   .iv_size = {
+   .min = 12,
+   .max = 12,
+   .increment = 0
+   }
+   }, }
+   }, }
+   },
+   {   /* AES GCM (128-bit) */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRI

[dpdk-dev] [PATCH 37/37] net/txgbe: add security type in flow action

2020-11-03 Thread Jiawen Wu
Add security type in flow action.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_flow.c  | 52 +
 drivers/net/txgbe/txgbe_ipsec.c | 30 +++
 drivers/net/txgbe/txgbe_ipsec.h |  3 ++
 3 files changed, 85 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_flow.c b/drivers/net/txgbe/txgbe_flow.c
index b5f4073e2..5ca89d619 100644
--- a/drivers/net/txgbe/txgbe_flow.c
+++ b/drivers/net/txgbe/txgbe_flow.c
@@ -145,6 +145,9 @@ const struct rte_flow_action *next_no_void_action(
  * END
  * other members in mask and spec should set to 0x00.
  * item->last should be NULL.
+ *
+ * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
+ *
  */
 static int
 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
@@ -193,6 +196,43 @@ cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
 
+#ifdef RTE_LIB_SECURITY
+   /**
+*  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
+*/
+   act = next_no_void_action(actions, NULL);
+   if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
+   const void *conf = act->conf;
+   /* check if the next not void item is END */
+   act = next_no_void_action(actions, act);
+   if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+   memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ACTION,
+   act, "Not supported action.");
+   return -rte_errno;
+   }
+
+   /* get the IP pattern*/
+   item = next_no_void_pattern(pattern, NULL);
+   while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+   item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
+   if (item->last ||
+   item->type == RTE_FLOW_ITEM_TYPE_END) {
+   rte_flow_error_set(error, EINVAL,
+   RTE_FLOW_ERROR_TYPE_ITEM,
+   item, "IP pattern missing.");
+   return -rte_errno;
+   }
+   item = next_no_void_pattern(pattern, item);
+   }
+
+   filter->proto = IPPROTO_ESP;
+   return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
+   item->type == RTE_FLOW_ITEM_TYPE_IPV6);
+   }
+#endif
+
/* the first not void item can be MAC or IPv4 */
item = next_no_void_pattern(pattern, NULL);
 
@@ -563,6 +603,12 @@ txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
if (ret)
return ret;
 
+#ifdef RTE_LIB_SECURITY
+   /* ESP flow not really a flow */
+   if (filter->proto == IPPROTO_ESP)
+   return 0;
+#endif
+
/* txgbe doesn't support tcp flags */
if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
@@ -2690,6 +2736,12 @@ txgbe_flow_create(struct rte_eth_dev *dev,
ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
 
+#ifdef RTE_LIB_SECURITY
+   /* ESP flow not really a flow*/
+   if (ntuple_filter.proto == IPPROTO_ESP)
+   return flow;
+#endif
+
if (!ret) {
ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
if (!ret) {
diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index f8c54f3d4..d0543ce0b 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -658,6 +658,36 @@ txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
return 0;
 }
 
+int
+txgbe_crypto_add_ingress_sa_from_flow(const void *sess,
+ const void *ip_spec,
+ uint8_t is_ipv6)
+{
+   struct txgbe_crypto_session *ic_session =
+   get_sec_session_private_data(sess);
+
+   if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+   if (is_ipv6) {
+   const struct rte_flow_item_ipv6 *ipv6 = ip_spec;
+   ic_session->src_ip.type = IPv6;
+   ic_session->dst_ip.type = IPv6;
+   rte_memcpy(ic_session->src_ip.ipv6,
+  ipv6->hdr.src_addr, 16);
+   rte_memcpy(ic_session->dst_ip.ipv6,
+  ipv6->hdr.dst_addr, 16);
+   } else {
+   const struct rte_flow_item_ipv4 *ipv4 = ip_spec;
+   ic_session->src_ip.type = IPv4;
+   

[dpdk-dev] [PATCH 36/37] net/txgbe: add security offload in Rx and Tx process

2020-11-03 Thread Jiawen Wu
Add security offload in Rx and Tx process.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ipsec.c | 106 
 drivers/net/txgbe/txgbe_ipsec.h |   1 +
 drivers/net/txgbe/txgbe_rxtx.c  |  93 +++-
 drivers/net/txgbe/txgbe_rxtx.h  |  13 
 4 files changed, 211 insertions(+), 2 deletions(-)

diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index 0bdd1c061..f8c54f3d4 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -19,6 +19,55 @@
(a).ipv6[2] == (b).ipv6[2] && \
(a).ipv6[3] == (b).ipv6[3])
 
+static void
+txgbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+   int i = 0;
+
+   /* clear Rx IP table*/
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   uint16_t index = i << 3;
+   uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
+   TXGBE_IPSRXIDX_TB_IP | index;
+   wr32(hw, TXGBE_IPSRXADDR(0), 0);
+   wr32(hw, TXGBE_IPSRXADDR(1), 0);
+   wr32(hw, TXGBE_IPSRXADDR(2), 0);
+   wr32(hw, TXGBE_IPSRXADDR(3), 0);
+   wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+   }
+
+   /* clear Rx SPI and Rx/Tx SA tables*/
+   for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+   uint32_t index = i << 3;
+   uint32_t reg_val = TXGBE_IPSRXIDX_WRITE |
+   TXGBE_IPSRXIDX_TB_SPI | index;
+   wr32(hw, TXGBE_IPSRXSPI, 0);
+   wr32(hw, TXGBE_IPSRXADDRIDX, 0);
+   wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+   reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_KEY | index;
+   wr32(hw, TXGBE_IPSRXKEY(0), 0);
+   wr32(hw, TXGBE_IPSRXKEY(1), 0);
+   wr32(hw, TXGBE_IPSRXKEY(2), 0);
+   wr32(hw, TXGBE_IPSRXKEY(3), 0);
+   wr32(hw, TXGBE_IPSRXSALT, 0);
+   wr32(hw, TXGBE_IPSRXMODE, 0);
+   wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+   reg_val = TXGBE_IPSTXIDX_WRITE | index;
+   wr32(hw, TXGBE_IPSTXKEY(0), 0);
+   wr32(hw, TXGBE_IPSTXKEY(1), 0);
+   wr32(hw, TXGBE_IPSTXKEY(2), 0);
+   wr32(hw, TXGBE_IPSTXKEY(3), 0);
+   wr32(hw, TXGBE_IPSTXSALT, 0);
+   wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+   }
+
+   memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+   memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+   memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
+}
+
 static int
 txgbe_crypto_add_sa(struct txgbe_crypto_session *ic_session)
 {
@@ -552,6 +601,63 @@ txgbe_crypto_capabilities_get(void *device __rte_unused)
return txgbe_security_capabilities;
 }
 
+int
+txgbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   uint32_t reg;
+   uint64_t rx_offloads;
+   uint64_t tx_offloads;
+
+   rx_offloads = dev->data->dev_conf.rxmode.offloads;
+   tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+   /* sanity checks */
+   if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+   PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+   return -1;
+   }
+   if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+   PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+   return -1;
+   }
+
+   /* Set TXGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
+   wr32(hw, TXGBE_SECTXBUFAF, 0x14);
+
+   /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+* hang will occur with heavy traffic.
+*/
+   reg = rd32(hw, TXGBE_SECTXIFG);
+   reg = (reg & ~TXGBE_SECTXIFG_MIN_MASK) | TXGBE_SECTXIFG_MIN(0x3);
+   wr32(hw, TXGBE_SECTXIFG, reg);
+
+   reg = rd32(hw, TXGBE_SECRXCTL);
+   reg |= TXGBE_SECRXCTL_CRCSTRIP;
+   wr32(hw, TXGBE_SECRXCTL, reg);
+
+   if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+   wr32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA, 0);
+   reg = rd32m(hw, TXGBE_SECRXCTL, TXGBE_SECRXCTL_ODSA);
+   if (reg != 0) {
+   PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+   return -1;
+   }
+   }
+   if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+   wr32(hw, TXGBE_SECTXCTL, TXGBE_SECTXCTL_STFWD);
+   reg = rd32(hw, TXGBE_SECTXCTL);
+   if (reg != TXGBE_SECTXCTL_STFWD) {
+   PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+   return -1;
+   }
+   }
+
+   txgbe_crypto_clear_ipsec_tables(dev);
+
+   return 0;
+}

[dpdk-dev] [PATCH 35/37] net/txgbe: support security session destroy

2020-11-03 Thread Jiawen Wu
Add support to clear a security session's private data,
get the size of a security session,
add update the mbuf with provided metadata.

Signed-off-by: Jiawen Wu 
---
 drivers/net/txgbe/txgbe_ipsec.c | 167 
 drivers/net/txgbe/txgbe_ipsec.h |  15 +++
 2 files changed, 182 insertions(+)

diff --git a/drivers/net/txgbe/txgbe_ipsec.c b/drivers/net/txgbe/txgbe_ipsec.c
index 7501e25af..0bdd1c061 100644
--- a/drivers/net/txgbe/txgbe_ipsec.c
+++ b/drivers/net/txgbe/txgbe_ipsec.c
@@ -199,6 +199,106 @@ txgbe_crypto_add_sa(struct txgbe_crypto_session 
*ic_session)
return 0;
 }
 
+static int
+txgbe_crypto_remove_sa(struct rte_eth_dev *dev,
+  struct txgbe_crypto_session *ic_session)
+{
+   struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+   struct txgbe_ipsec *priv = TXGBE_DEV_IPSEC(dev);
+   uint32_t reg_val;
+   int sa_index = -1;
+
+   if (ic_session->op == TXGBE_OP_AUTHENTICATED_DECRYPTION) {
+   int i, ip_index = -1;
+
+   /* Find a match in the IP table*/
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   if (CMP_IP(priv->rx_ip_tbl[i].ip, ic_session->dst_ip)) {
+   ip_index = i;
+   break;
+   }
+   }
+
+   /* Fail if no match*/
+   if (ip_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "Entry not found in the Rx IP table\n");
+   return -1;
+   }
+
+   /* Find a free entry in the SA table*/
+   for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+   if (priv->rx_sa_tbl[i].spi ==
+ rte_cpu_to_be_32(ic_session->spi)) {
+   sa_index = i;
+   break;
+   }
+   }
+   /* Fail if no match*/
+   if (sa_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "Entry not found in the Rx SA table\n");
+   return -1;
+   }
+
+   /* Disable and clear Rx SPI and key table table entryes*/
+   reg_val = TXGBE_IPSRXIDX_WRITE |
+   TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
+   wr32(hw, TXGBE_IPSRXSPI, 0);
+   wr32(hw, TXGBE_IPSRXADDRIDX, 0);
+   wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+   reg_val = TXGBE_IPSRXIDX_WRITE |
+   TXGBE_IPSRXIDX_TB_KEY | (sa_index << 3);
+   wr32(hw, TXGBE_IPSRXKEY(0), 0);
+   wr32(hw, TXGBE_IPSRXKEY(1), 0);
+   wr32(hw, TXGBE_IPSRXKEY(2), 0);
+   wr32(hw, TXGBE_IPSRXKEY(3), 0);
+   wr32(hw, TXGBE_IPSRXSALT, 0);
+   wr32(hw, TXGBE_IPSRXMODE, 0);
+   wr32w(hw, TXGBE_IPSRXIDX, reg_val, TXGBE_IPSRXIDX_WRITE, 1000);
+   priv->rx_sa_tbl[sa_index].used = 0;
+
+   /* If last used then clear the IP table entry*/
+   priv->rx_ip_tbl[ip_index].ref_count--;
+   if (priv->rx_ip_tbl[ip_index].ref_count == 0) {
+   reg_val = TXGBE_IPSRXIDX_WRITE | TXGBE_IPSRXIDX_TB_IP |
+   (ip_index << 3);
+   wr32(hw, TXGBE_IPSRXADDR(0), 0);
+   wr32(hw, TXGBE_IPSRXADDR(1), 0);
+   wr32(hw, TXGBE_IPSRXADDR(2), 0);
+   wr32(hw, TXGBE_IPSRXADDR(3), 0);
+   }
+   } else { /* session->dir == RTE_CRYPTO_OUTBOUND */
+   int i;
+
+   /* Find a match in the SA table*/
+   for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+   if (priv->tx_sa_tbl[i].spi ==
+   rte_cpu_to_be_32(ic_session->spi)) {
+   sa_index = i;
+   break;
+   }
+   }
+   /* Fail if no match entries*/
+   if (sa_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "Entry not found in the Tx SA table\n");
+   return -1;
+   }
+   reg_val = TXGBE_IPSRXIDX_WRITE | (sa_index << 3);
+   wr32(hw, TXGBE_IPSTXKEY(0), 0);
+   wr32(hw, TXGBE_IPSTXKEY(1), 0);
+   wr32(hw, TXGBE_IPSTXKEY(2), 0);
+   wr32(hw, TXGBE_IPSTXKEY(3), 0);
+   wr32(hw, TXGBE_IPSTXSALT, 0);
+   wr32w(hw, TXGBE_IPSTXIDX, reg_val, TXGBE_IPSTXIDX_WRITE, 1000);
+
+   priv->tx_sa_tbl[sa_index].used = 0;
+   }
+
+   return 0;
+}
+
 static int
 txgbe_crypto_create_session(void *device,
struct rte_security_session_conf *conf,
@@ -262,6 +362,70 @@ txgbe_crypto_create_session(void *d

Re: [dpdk-dev] [PATCH v3] build: add pkg-config validation

2020-11-03 Thread Bruce Richardson
On Mon, Nov 02, 2020 at 09:34:26PM +0200, Gregory Etelson wrote:
> DPDK relies on pkg-config(1) to provide correct parameters for
> compiler and linker used in application build.
> Inaccurate build parameters, produced by pkg-config from DPDK .pc
> files could fail application build or cause unpredicted results
> during application runtime.
> 
> This patch validates host pkg-config utility and notifies about
> known issues.
> 
> Signed-off-by: Gregory Etelson 

All looks reasonably ok to me. Some suggestions inline below which might
shorten and simplify the script a bit.

Acked-by: Bruce Richardson 

> ---
>  buildtools/pkg-config/meson.build   | 11 ++
>  buildtools/pkg-config/pkgconfig-validate.sh | 43 +
>  doc/guides/linux_gsg/sys_reqs.rst   |  5 +++
>  3 files changed, 59 insertions(+)
>  create mode 100755 buildtools/pkg-config/pkgconfig-validate.sh
> 
> diff --git a/buildtools/pkg-config/meson.build 
> b/buildtools/pkg-config/meson.build
> index 5f19304289..4f907d7638 100644
> --- a/buildtools/pkg-config/meson.build
> +++ b/buildtools/pkg-config/meson.build
> @@ -53,3 +53,14 @@ This is required for a number of static inline functions 
> in the public headers.'
>  # For static linking with dependencies as shared libraries,
>  # the internal static libraries must be flagged explicitly.
>  run_command(py3, 'set-static-linker-flags.py', check: true)
> +
> +pkgconf = find_program('pkg-config', 'pkgconf', required: false)
> +if (pkgconf.found())
> + cmd = run_command('./pkgconfig-validate.sh', pkgconf.path(),
> +check:false)
> + if cmd.returncode() != 0
> + version = run_command(pkgconf, '--version')
> + warning('invalid pkg-config version @0@'.format(
> + version.stdout().strip()))
> + endif
> +endif
> diff --git a/buildtools/pkg-config/pkgconfig-validate.sh 
> b/buildtools/pkg-config/pkgconfig-validate.sh
> new file mode 100755
> index 00..4b3bd2a9e3
> --- /dev/null
> +++ b/buildtools/pkg-config/pkgconfig-validate.sh
> @@ -0,0 +1,43 @@
> +#! /bin/sh
> +# SPDX-License-Identifier: BSD-3-Clause
> +
> +# Statically linked private DPDK objects of form
> +# -l:file.a must be positionned between --whole-archive … --no-whole-archive
> +# linker parameters.
> +# Old pkg-config versions misplace --no-whole-archive parameter and put it
> +# next to --whole-archive.
> +test1_static_libs_order () {
> + PKG_CONFIG_PATH="$PKG_CONFIG_PATH" \
> + "$PKGCONF" --libs --static libdpdk | \
> + grep -q 'whole-archive.*l:lib.*no-whole-archive'
> + if test "$?" -ne 0 ; then
> + echo "WARNING: invalid static libraries order"
> + ret=1

Why not just set "ret=$?" before the condition check? Save having to
pre-init ret to 0 and having it as a global variable.

Also, since the meson.build file has the error printout, you can consider
dropping the warning text too, in which case you can have the function just
return the return-code from grep itself.

> + fi
> + return $ret
> +}
> +
> +if [ "$#" -ne 1 ]; then
> + echo "$0: no pkg-config parameter"
> + exit 1
> +fi
> +PKGCONF="$1"
> +
> +$PKGCONF --exists libdpdk
> +if [ $? -ne 0 ]; then
> + # pkgconf could not locate libdpdk.pc from existing PKG_CONFIG_PATH
> + # check meson template instead

Why bother checking first? Since all we care about is the pkg-config
behaviour, we can just always add on the path to PKG_CONFIG_PATH and
guarantee that way a dpdk file will be found.

> + pc_file=$(find "$MESON_BUILD_ROOT" -type f -name 'libdpdk.pc' -quit)
> + if [ ! -f "$pc_file" ]; then
> + echo "$0: cannot locate libdpdk.pc"
> + exit 1
> + fi
> + pc_dir=$(dirname "$pc_file")
> + PKG_CONFIG_PATH="${PKG_CONFIG_PATH}:$pc_dir"
> +fi
> +
> +ret=0
> +test1_static_libs_order
> +if [ $ret -ne 0 ]; then
> + exit $ret
> +fi

Rather than branching, why not just call "exit $ret"?

Given that the return code from the script will be the result of the last
command run, and if we are ok to dropping the print of the warning, I think
the test function can be dropped and the last line of the script just made
the call to pkg-config and grep.

> diff --git a/doc/guides/linux_gsg/sys_reqs.rst 
> b/doc/guides/linux_gsg/sys_reqs.rst
> index 6ecdc04aa9..b67da05e13 100644
> --- a/doc/guides/linux_gsg/sys_reqs.rst
> +++ b/doc/guides/linux_gsg/sys_reqs.rst
> @@ -60,6 +60,11 @@ Compilation of the DPDK
>  
>  *   Linux kernel headers or sources required to build kernel modules.
>  
> +
> +**Known Issues:**
> +
> +*   pkg-config v0.27 supplied with RHEL-7 does not process correctly 
> libdpdk.pc Libs.private section.
> +
>  .. note::
>  
> Please ensure that the latest patches are applied to third party libraries
> -- 
> 2.28.0
> 


Re: [dpdk-dev] [PATCH v3 08/16] net/nfb: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Olivier Matz
On Tue, Nov 03, 2020 at 01:13:59AM +0100, Thomas Monjalon wrote:
> The mbuf timestamp is moved to a dynamic field
> in order to allow removal of the deprecated static field.
> The related mbuf flag is also replaced.
> 
> Signed-off-by: Thomas Monjalon 
> ---
>  drivers/net/nfb/nfb_rx.c | 15 ++-
>  drivers/net/nfb/nfb_rx.h | 18 ++
>  2 files changed, 28 insertions(+), 5 deletions(-)

<...>

> index cf3899b2fb..e548226e0f 100644
> --- a/drivers/net/nfb/nfb_rx.h
> +++ b/drivers/net/nfb/nfb_rx.h
> @@ -15,6 +15,16 @@
>  
>  #define NFB_TIMESTAMP_FLAG (1 << 0)
>  
> +extern uint64_t nfb_timestamp_rx_dynflag;
> +extern int nfb_timestamp_dynfield_offset;
> +
> +static inline rte_mbuf_timestamp_t *
> +nfb_timestamp_dynfield(struct rte_mbuf *mbuf)
> +{
> + return RTE_MBUF_DYNFIELD(mbuf,
> + nfb_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
> +}
> +
>  struct ndp_rx_queue {
>   struct nfb_device *nfb;  /* nfb dev structure */
>   struct ndp_queue *queue; /* rx queue */
> @@ -191,15 +201,15 @@ nfb_eth_ndp_rx(void *queue,
>  
>   if (timestamping_enabled) {
>   /* nanoseconds */
> - mbuf->timestamp =
> + *nfb_timestamp_dynfield(mbuf) =
>   rte_le_to_cpu_32(*((uint32_t *)
>   (packets[i].header + 4)));
> - mbuf->timestamp <<= 32;
> + *nfb_timestamp_dynfield(mbuf) <<= 32;
>   /* seconds */
> - mbuf->timestamp |=
> + *nfb_timestamp_dynfield(mbuf) |=
>   rte_le_to_cpu_32(*((uint32_t *)
>   (packets[i].header + 8)));
> - mbuf->ol_flags |= PKT_RX_TIMESTAMP;
> + mbuf->ol_flags |= nfb_timestamp_rx_dynflag;
>   }
>  
>   bufs[num_rx++] = mbuf;

I think it would be better with a local variable.


Re: [dpdk-dev] performance degradation with fpic

2020-11-03 Thread Thomas Monjalon
02/11/2020 16:00, Bruce Richardson:
> On Mon, Nov 02, 2020 at 10:40:54AM +, Ali Alnubani wrote:
> > Hi Bruce,
> > 
> > I was able to pin this down on drivers/net/mlx5/mlx5_rxtx.c. Removing -fPIC 
> > from its ninja recipe in build.ninja resolves the issue (had to prevent 
> > creating shared libs in this case).
> > What do you suggest I do? Can we have per-pmd customized compilation flags?
> > 
> > Regards,
> > Ali
> > 
> There are multiple possible ways to achieve this, but below are some ideas:
> 
> 1. Take the changes for supporting function versioning and duplicate them
> from lib/meson.build to drivers/meson.build. Since function versioning
> support already requires everything to be built twice, we could set it to
> not use -fpic for the static libs in that case. Then mark mlx5 as using
> function versioning. This is a bit hackish though, so
> 
> 2. The "objs" parameter from each sub-directory is not widely used, so we
> could split this easily enough into objs-shared and objs-static, and allow
> the subdirectory build file, in this case mlx5/meson.ninja, to build any c
> files manually to pass them back. This is more flexible, and also means
> that you can limit the files which are to be built twice to only the single
> file, rather than marking the whole driver as needing rebuild.

Can it be done only in the driver?
No general meson change for this option?

> I'm sure there are other approaches too. However, I agree with Luca's
> comment that first approach should probably be to see if you can track down
> exactly why this one file is having problems. Could any of the slowdown be
> due to the fact that you use a common lib from your driver? Are there
> cross-driver calls in the fast-path that are suffering a penalty?

Of course the performance will be analyzed in the long run.
However, such analyzis is more convenient if meson is flexible enough
to allow customization of the build.
And in general, I think it is good to have meson flexible
to allow any kind of driver build customization.




Re: [dpdk-dev] [PATCH v3 11/16] app/testpmd: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Olivier Matz
On Tue, Nov 03, 2020 at 01:14:02AM +0100, Thomas Monjalon wrote:
> The mbuf timestamp is moved to a dynamic field
> in order to allow removal of the deprecated static field.
> The related mbuf flag is also replaced.
> 
> Signed-off-by: Thomas Monjalon 
> ---
>  app/test-pmd/util.c | 39 +--
>  1 file changed, 37 insertions(+), 2 deletions(-)
> 
> diff --git a/app/test-pmd/util.c b/app/test-pmd/util.c
> index 781a813759..eebb5166ad 100644
> --- a/app/test-pmd/util.c
> +++ b/app/test-pmd/util.c
> @@ -5,6 +5,7 @@
>  
>  #include 
>  
> +#include 
>  #include 
>  #include 
>  #include 
> @@ -22,6 +23,40 @@ print_ether_addr(const char *what, const struct 
> rte_ether_addr *eth_addr)
>   printf("%s%s", what, buf);
>  }
>  
> +static inline bool
> +is_timestamp_enabled(const struct rte_mbuf *mbuf)
> +{
> + static uint64_t timestamp_rx_dynflag;
> +
> + int timestamp_rx_dynflag_offset;

unneeded blank line

> +
> + if (timestamp_rx_dynflag == 0) {
> + timestamp_rx_dynflag_offset = rte_mbuf_dynflag_lookup(
> + RTE_MBUF_DYNFLAG_RX_TIMESTAMP_NAME, NULL);
> + if (timestamp_rx_dynflag_offset < 0)
> + return false;
> + timestamp_rx_dynflag = RTE_BIT64(timestamp_rx_dynflag_offset);
> + }
> +
> + return (mbuf->ol_flags & timestamp_rx_dynflag) != 0;
> +}
> +
> +static inline rte_mbuf_timestamp_t
> +get_timestamp(const struct rte_mbuf *mbuf)
> +{
> + static int timestamp_dynfield_offset = -1;
> +
> + if (timestamp_dynfield_offset < 0) {
> + timestamp_dynfield_offset = rte_mbuf_dynfield_lookup(
> + RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
> + if (timestamp_dynfield_offset < 0)
> + return 0;
> + }
> +
> + return *RTE_MBUF_DYNFIELD(mbuf,
> + timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
> +}
> +
>  static inline void
>  dump_pkt_burst(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[],
> uint16_t nb_pkts, int is_rx)
> @@ -107,8 +142,8 @@ dump_pkt_burst(uint16_t port_id, uint16_t queue, struct 
> rte_mbuf *pkts[],
>   printf("hash=0x%x ID=0x%x ",
>  mb->hash.fdir.hash, mb->hash.fdir.id);
>   }
> - if (ol_flags & PKT_RX_TIMESTAMP)
> - printf(" - timestamp %"PRIu64" ", mb->timestamp);
> + if (is_timestamp_enabled(mb))
> + printf(" - timestamp %"PRIu64" ", get_timestamp(mb));
>   if (ol_flags & PKT_RX_QINQ)
>   printf(" - QinQ VLAN tci=0x%x, VLAN tci outer=0x%x",
>  mb->vlan_tci, mb->vlan_tci_outer);
> -- 
> 2.28.0
> 


Re: [dpdk-dev] [PATCH 3/8] doc: fix driver names in crypto devices guide

2020-11-03 Thread Bruce Richardson
On Mon, Nov 02, 2020 at 09:02:26PM +0100, David Marchand wrote:
> On Mon, Nov 2, 2020 at 9:00 PM David Marchand  
> wrote:
> > We still have:
> > doc/guides/cryptodevs/mvsam.rst:The MVSAM CRYPTO PMD
> > (**librte_crypto_mvsam_pmd**) provides poll mode crypto driver
> 
> Ah.. and:
> doc/guides/cryptodevs/snow3g.rst:The SNOW3G PMD
> (**librte_snow3g_zuc**) provides poll mode crypto driver support for
> 
> Grepping librte_ is a better check than pmd.
> 
I'll fix these missing 2 in v2.

/Bruce


Re: [dpdk-dev] [PATCH 1/8] regex/octeontx2: fix unnecessary name override

2020-11-03 Thread Bruce Richardson
On Tue, Nov 03, 2020 at 10:45:37AM +0100, David Marchand wrote:
> On Tue, Nov 3, 2020 at 10:19 AM David Marchand
>  wrote:
> >
> > On Tue, Nov 3, 2020 at 10:06 AM Thomas Monjalon  wrote:
> > >
> > > 03/11/2020 09:19, David Marchand:
> > > > On Tue, Nov 3, 2020 at 1:30 AM Thomas Monjalon  
> > > > wrote:
> > > > > > > -name = 'octeontx2_regex'
> > > > >
> > > > > But it is not the same?
> > > > >
> > > > > The name will default to "octeontx2", which is fine.
> > > > > But the fmt_name should not take this default.
> > > > > I believe fmt_name should be "octeontx2_regex" as I did in my patch.
> > > >
> > > > fmt_name is only for maintaining config compat.
> > > > This driver is new to 20.11.
> > > > We can drop fmt_name too.
> > >
> > > If we don't set fmt_name, it defaults to "name", "octeontx2" here.
> > > What is the consequence in compat definitions?
> >
> 
> Ok, got it, the problem is when we disable the net/octeontx2 driver.
> Your patch correctly sets a RTE_LIBRTE_OCTEONTX2_REGEX_PMD compat
> option that is unused but that does not overwrite the
> RTE_LIBRTE_OCTEONTX2_PMD compat option (which indicates the presence
> of the net/octeontx2 driver).
> 
Yes, I forgot about compatibility macro settings. Given the context of this
patchset, I was instead just looking at the library filesnames.

I'll drop this patch from v2, and you can take Thomas' instead.

/Bruce


[dpdk-dev] [PATCH] net/mlx5: fix aging queue doorbell ringing

2020-11-03 Thread Dekel Peled
Recent patch introduced a new SQ for ASO flow hit management.
This SQ uses two WQEBB's for each WQE.
The SQ producer index is 16 bits wide.

The enqueue loop posts new WQEs to the ASO SQ, using WQE index for
the SQ management.
This 16 bits index multiplied by 2 was wrongly used also for SQ
doorbell ringing.
The multiplication caused the SW index overlapping to be out of sync
with the hardware index, causing it to get stuck.

This patch separates the WQE index management from the doorbell index
management.
So, for each WQE index incrementation by 1, the doorbell index is
incremented by 2.

Fixes: 18c88cf29c29 ("net/mlx5: support flow hit action for aging")

Signed-off-by: Dekel Peled 
Acked-by: Matan Azrad 
---
 drivers/common/mlx5/mlx5_prm.h   | 21 +--
 drivers/net/mlx5/mlx5.h  |  3 ++-
 drivers/net/mlx5/mlx5_flow_age.c | 36 ++--
 3 files changed, 37 insertions(+), 23 deletions(-)

diff --git a/drivers/common/mlx5/mlx5_prm.h b/drivers/common/mlx5/mlx5_prm.h
index 10f9b18d1b..58d180486e 100644
--- a/drivers/common/mlx5/mlx5_prm.h
+++ b/drivers/common/mlx5/mlx5_prm.h
@@ -293,6 +293,15 @@ struct mlx5_wqe_cseg {
uint32_t misc;
 } __rte_packed __rte_aligned(MLX5_WSEG_SIZE);
 
+/*
+ * WQE CSEG opcode field size is 32 bits, divided:
+ * Bits 31:24 OPC_MOD
+ * Bits 23:8 wqe_index
+ * Bits 7:0 OPCODE
+ */
+#define WQE_CSEG_OPC_MOD_OFFSET24
+#define WQE_CSEG_WQE_INDEX_OFFSET   8
+
 /* Header of data segment. Minimal size Data Segment */
 struct mlx5_wqe_dseg {
uint32_t bcount;
@@ -2359,12 +2368,12 @@ struct mlx5_ifc_create_flow_hit_aso_in_bits {
struct mlx5_ifc_flow_hit_aso_bits flow_hit_aso;
 };
 
-enum mlx5_access_aso_op_mod {
-   ASO_OP_MOD_IPSEC = 0x0,
-   ASO_OP_MOD_CONNECTION_TRACKING = 0x1,
-   ASO_OP_MOD_POLICER = 0x2,
-   ASO_OP_MOD_RACE_AVOIDANCE = 0x3,
-   ASO_OP_MOD_FLOW_HIT = 0x4,
+enum mlx5_access_aso_opc_mod {
+   ASO_OPC_MOD_IPSEC = 0x0,
+   ASO_OPC_MOD_CONNECTION_TRACKING = 0x1,
+   ASO_OPC_MOD_POLICER = 0x2,
+   ASO_OPC_MOD_RACE_AVOIDANCE = 0x3,
+   ASO_OPC_MOD_FLOW_HIT = 0x4,
 };
 
 #define ASO_CSEG_DATA_MASK_MODE_OFFSET 30
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 63d263384b..83beee3610 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -516,7 +516,8 @@ struct mlx5_aso_sq {
volatile uint64_t *uar_addr;
struct mlx5_aso_devx_mr mr;
uint16_t pi;
-   uint16_t ci;
+   uint32_t head;
+   uint32_t tail;
uint32_t sqn;
struct mlx5_aso_sq_elem elts[1 << MLX5_ASO_QUEUE_LOG_DESC];
uint16_t next; /* Pool index of the next pool to query. */
diff --git a/drivers/net/mlx5/mlx5_flow_age.c b/drivers/net/mlx5/mlx5_flow_age.c
index 0b7fa46e2a..829094d9cf 100644
--- a/drivers/net/mlx5/mlx5_flow_age.c
+++ b/drivers/net/mlx5/mlx5_flow_age.c
@@ -321,8 +321,9 @@ mlx5_aso_sq_create(void *ctx, struct mlx5_aso_sq *sq, int 
socket,
rte_errno  = ENOMEM;
goto error;
}
-   sq->ci = 0;
sq->pi = 0;
+   sq->head = 0;
+   sq->tail = 0;
sq->sqn = sq->sq->id;
sq->db_rec = RTE_PTR_ADD(sq->umem_buf, (uintptr_t)(wq_attr->dbr_addr));
sq->uar_addr = (volatile uint64_t *)((uint8_t *)sq->uar_obj->base_addr +
@@ -382,20 +383,20 @@ mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, 
uint16_t n)
uint16_t size = 1 << sq->log_desc_n;
uint16_t mask = size - 1;
uint16_t max;
-   uint16_t start_pi = sq->pi;
+   uint16_t start_head = sq->head;
 
-   max = RTE_MIN(size - (uint16_t)(sq->pi - sq->ci), n - sq->next);
+   max = RTE_MIN(size - (uint16_t)(sq->head - sq->tail), n - sq->next);
if (unlikely(!max))
return 0;
-   sq->elts[start_pi & mask].burst_size = max;
+   sq->elts[start_head & mask].burst_size = max;
do {
-   wqe = &sq->wqes[sq->pi & mask];
-   rte_prefetch0(&sq->wqes[(sq->pi + 1) & mask]);
+   wqe = &sq->wqes[sq->head & mask];
+   rte_prefetch0(&sq->wqes[(sq->head + 1) & mask]);
/* Fill next WQE. */
rte_spinlock_lock(&mng->resize_sl);
pool = mng->pools[sq->next];
rte_spinlock_unlock(&mng->resize_sl);
-   sq->elts[sq->pi & mask].pool = pool;
+   sq->elts[sq->head & mask].pool = pool;
wqe->general_cseg.misc =
rte_cpu_to_be_32(((struct mlx5_devx_obj *)
 (pool->flow_hit_aso_obj))->id);
@@ -403,20 +404,23 @@ mlx5_aso_sq_enqueue_burst(struct mlx5_aso_age_mng *mng, 
uint16_t n)
 MLX5_COMP_MODE_OFFSET);
wqe->general_cseg.opcode = rte_cpu_to_be_32
(MLX5_OPCODE_ACCESS_ASO |
- 

Re: [dpdk-dev] [pull-request] next-eventdev 20.11 RC2

2020-11-03 Thread Thomas Monjalon
02/11/2020 15:30, Jerin Jacob Kollanukkaran:
>   http://dpdk.org/git/next/dpdk-next-eventdev

Pulled with discussed change, thanks





Re: [dpdk-dev] performance degradation with fpic

2020-11-03 Thread Luca Boccassi
On Tue, 2020-11-03 at 11:18 +0100, Thomas Monjalon wrote:
> 02/11/2020 16:00, Bruce Richardson:
> > On Mon, Nov 02, 2020 at 10:40:54AM +, Ali Alnubani wrote:
> > > Hi Bruce,
> > > 
> > > I was able to pin this down on drivers/net/mlx5/mlx5_rxtx.c. Removing 
> > > -fPIC from its ninja recipe in build.ninja resolves the issue (had to 
> > > prevent creating shared libs in this case).
> > > What do you suggest I do? Can we have per-pmd customized compilation 
> > > flags?
> > > 
> > > Regards,
> > > Ali
> > > 
> > There are multiple possible ways to achieve this, but below are some ideas:
> > 
> > 1. Take the changes for supporting function versioning and duplicate them
> > from lib/meson.build to drivers/meson.build. Since function versioning
> > support already requires everything to be built twice, we could set it to
> > not use -fpic for the static libs in that case. Then mark mlx5 as using
> > function versioning. This is a bit hackish though, so
> > 
> > 2. The "objs" parameter from each sub-directory is not widely used, so we
> > could split this easily enough into objs-shared and objs-static, and allow
> > the subdirectory build file, in this case mlx5/meson.ninja, to build any c
> > files manually to pass them back. This is more flexible, and also means
> > that you can limit the files which are to be built twice to only the single
> > file, rather than marking the whole driver as needing rebuild.
> 
> Can it be done only in the driver?
> No general meson change for this option?
> 
> > I'm sure there are other approaches too. However, I agree with Luca's
> > comment that first approach should probably be to see if you can track down
> > exactly why this one file is having problems. Could any of the slowdown be
> > due to the fact that you use a common lib from your driver? Are there
> > cross-driver calls in the fast-path that are suffering a penalty?
> 
> Of course the performance will be analyzed in the long run.
> However, such analyzis is more convenient if meson is flexible enough
> to allow customization of the build.
> And in general, I think it is good to have meson flexible
> to allow any kind of driver build customization.

The problem is with the specific case, not with general customizations.
IIRC all libraries must have fpic to build a relocatable executable -
you cannot mix and match. Missing this feature means no address layout
randomization, which is really bad especially for a network
application.

-- 
Kind regards,
Luca Boccassi


Re: [dpdk-dev] [PATCH] doc: announce end of support for some Broadcom devices

2020-11-03 Thread Kevin Traynor
Hi Ajit,

On 26/10/2020 21:46, Ajit Khaparde wrote:
> Devices belonging to BCM573xx and BCM5740x family will not be supported
> from the 21.02 release.
> 
> Signed-off-by: Ajit Khaparde 
> ---
>  doc/guides/rel_notes/deprecation.rst | 6 ++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/doc/guides/rel_notes/deprecation.rst 
> b/doc/guides/rel_notes/deprecation.rst
> index 2e082499b8..f1fce4210d 100644
> --- a/doc/guides/rel_notes/deprecation.rst
> +++ b/doc/guides/rel_notes/deprecation.rst
> @@ -166,3 +166,9 @@ Deprecation Notices
>``make``. Given environments are too much variables for such a simple 
> script,
>it will be removed in DPDK 20.11.
>Some useful parts may be converted into specific scripts.
> +
> +* Broadcom bnxt PMD: NetXtreme devices belonging to the ``BCM573xx and
> +  BCM5740x`` families will no longer be supported as of DPDK 21.02.
> +  Specifically the support for the following Broadcom PCI ids will be removed
> +  from the release: ``0x16c8, 0x16c9, 0x16ca, 0x16ce, 0x16cf, 0x16df,``
> +  ``0x16d0, 0x16d1, 0x16d2, 0x16d4, 0x16d5, 0x16e7, 0x16e8, 0x16e9``.
> 

It might be worth adding to the bnxt.rst section about these NICs, as
there is no hint that they are deprecated for a user reading that.

Where I'm not clear is about 20.11. It will be an LTS, will they be
supported for the lifetime of 20.11 on the 20.11 branch?

Kevin.



Re: [dpdk-dev] [PATCH] regex/octeontx2: fix driver name

2020-11-03 Thread Thomas Monjalon
23/10/2020 19:41, Thomas Monjalon:
> Following the recent alignment of all driver names,
> this new driver get unaligned:
>   librte_regex_octeontx2_regex.so
> 
> The 'fmt_name' must be "octeontx2_regex", and if not provided,
> is taken from the 'name' variable.
> But the variable 'name' should not be overwritten,
> to keep the automatic value from the directory name.
> 
> The library name will be composed of the class directory
> and the driver directory name:
>   librte_regex_octeontx2.so
> 
> Reported-by: Christian Ehrhardt 
> Signed-off-by: Thomas Monjalon 
> Acked-by: David Marchand 

Applied




Re: [dpdk-dev] [PATCH v3 09/16] net/octeontx2: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Harman Kalra
On Tue, Nov 03, 2020 at 01:14:00AM +0100, Thomas Monjalon wrote:
> The mbuf timestamp is moved to a dynamic field
> in order to allow removal of the deprecated static field.
> The related mbuf flag is also replaced.
> 
> The dynamic offset and flag are stored in struct otx2_timesync_info
> to favor cache locality.
> 
> Signed-off-by: Thomas Monjalon 
Hi Thomas,

   With the following changes, ptpclient and testpmd(ieee1588 mode) is
   crashing for us. I am debugging the issue and will update soon.
  --
   Steps to reproduce:
   1. Testpmd:
  ./dpdk-testpmd -c 0x01 -n 4 -w 0002:05:00.0 -- -i
  --port-topology=loop
  testpmd> set fwd ieee1588
  testpmd> set port 0 ptype_mask 0xf
  testpmd> start

  I am sending ptp packets using scapy from the peer:
  >>> p = Ether(src='98:03:9b:67:b0:d0', dst='FA:62:0C:27:AD:BC',
  >>> type=35063)/Raw(load='\x00\x02')
  >>> sendp (p, iface="p5p2")

  I am observing seg fault even for 1 packet.

2. ./ptpclient -l 1 -n 4 -w 0002:05:00.0 -- -p 0xf-- on board
   ptp4l -E -2 -H -i p5p2  -m -q -p /dev/ptp4 ... on peer

Thanks
Harman

> ---
>  drivers/net/octeontx2/otx2_ethdev.c | 10 ++
>  drivers/net/octeontx2/otx2_rx.h | 19 ---
>  2 files changed, 26 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/net/octeontx2/otx2_ethdev.c 
> b/drivers/net/octeontx2/otx2_ethdev.c
> index cfb733a4b5..f6962be9b2 100644
> --- a/drivers/net/octeontx2/otx2_ethdev.c
> +++ b/drivers/net/octeontx2/otx2_ethdev.c
> @@ -2219,6 +2219,16 @@ otx2_nix_dev_start(struct rte_eth_dev *eth_dev)
>   else
>   otx2_nix_timesync_disable(eth_dev);
>  
> + if (dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F) {
> + rc = rte_mbuf_dyn_rx_timestamp_register(
> + &dev->tstamp.tstamp_dynfield_offset,
> + &dev->tstamp.rx_tstamp_dynflag);
> + if (rc != 0) {
> + otx2_err("Failed to register Rx timestamp field/flag");
> + return -rte_errno;
> + }
> + }
> +
>   /* Update VF about data off shifted by 8 bytes if PTP already
>* enabled in PF owning this VF
>*/
> diff --git a/drivers/net/octeontx2/otx2_rx.h b/drivers/net/octeontx2/otx2_rx.h
> index 61a5c436dd..926f614a4e 100644
> --- a/drivers/net/octeontx2/otx2_rx.h
> +++ b/drivers/net/octeontx2/otx2_rx.h
> @@ -49,6 +49,8 @@ struct otx2_timesync_info {
>   uint64_trx_tstamp;
>   rte_iova_t  tx_tstamp_iova;
>   uint64_t*tx_tstamp;
> + uint64_trx_tstamp_dynflag;
> + int tstamp_dynfield_offset;
>   uint8_t tx_ready;
>   uint8_t rx_ready;
>  } __rte_cache_aligned;
> @@ -63,6 +65,14 @@ union mbuf_initializer {
>   uint64_t value;
>  };
>  
> +static inline rte_mbuf_timestamp_t *
> +otx2_timestamp_dynfield(struct rte_mbuf *mbuf,
> + struct otx2_timesync_info *info)
> +{
> + return RTE_MBUF_DYNFIELD(mbuf,
> + info->tstamp_dynfield_offset, rte_mbuf_timestamp_t *);
> +}
> +
>  static __rte_always_inline void
>  otx2_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
>   struct otx2_timesync_info *tstamp, const uint16_t flag,
> @@ -77,15 +87,18 @@ otx2_nix_mbuf_to_tstamp(struct rte_mbuf *mbuf,
>   /* Reading the rx timestamp inserted by CGX, viz at
>* starting of the packet data.
>*/
> - mbuf->timestamp = rte_be_to_cpu_64(*tstamp_ptr);
> + *otx2_timestamp_dynfield(mbuf, tstamp) =
> + rte_be_to_cpu_64(*tstamp_ptr);
>   /* PKT_RX_IEEE1588_TMST flag needs to be set only in case
>* PTP packets are received.
>*/
>   if (mbuf->packet_type == RTE_PTYPE_L2_ETHER_TIMESYNC) {
> - tstamp->rx_tstamp = mbuf->timestamp;
> + tstamp->rx_tstamp =
> + *otx2_timestamp_dynfield(mbuf, tstamp);
>   tstamp->rx_ready = 1;
>   mbuf->ol_flags |= PKT_RX_IEEE1588_PTP |
> - PKT_RX_IEEE1588_TMST | PKT_RX_TIMESTAMP;
> + PKT_RX_IEEE1588_TMST |
> + tstamp->rx_tstamp_dynflag;
>   }
>   }
>  }
> -- 
> 2.28.0
> 


Re: [dpdk-dev] [PATCH v4 2/6] build: refactor Arm build

2020-11-03 Thread Juraj Linkeš


> -Original Message-
> From: Honnappa Nagarahalli 
> Sent: Monday, November 2, 2020 8:32 PM
> To: Juraj Linkeš ; bruce.richard...@intel.com;
> Ruifeng Wang ; Phil Yang ;
> vcchu...@amazon.com; Dharmik Thakkar ;
> jerinjac...@gmail.com; hemant.agra...@nxp.com
> Cc: dev@dpdk.org; nd ; Honnappa Nagarahalli
> ; nd 
> Subject: RE: [PATCH v4 2/6] build: refactor Arm build
> 
> 
> 
> > > > >
> > > > > > diff --git a/config/arm/meson.build b/config/arm/meson.build
> > > > > > index
> > > > > > 491842cad..6c31ab167 100644
> > > > > > --- a/config/arm/meson.build
> > > > > > +++ b/config/arm/meson.build
> > > > > > @@ -3,12 +3,12 @@
> > > > > >  # Copyright(c) 2017 Cavium, Inc  # Copyright(c) 2020
> > > > > > PANTHEON.tech s.r.o.
> > > > > >
> > > > > > -# for checking defines we need to use the correct compiler
> > > > > > flags -march_opt = '-march=@0@'.format(machine)
> > > > > > -
> > > > > > +# set arm_force_native_march if you want to use machine args
> > > > > > +below # instead of discovered values; only works when doing
> > > > > > +an actual native build
> > > > > >  arm_force_native_march = false -arm_force_generic_march =
> > > > > > (machine == 'generic')
> > > > > > +native_machine_args = ['-march=native', '-mtune=native']
> > > > > >
> > > > >
> > > > > [...]
> > > > >
> > > > > > -
> > > > > > -machine_args_default = [
> > > > > > -   ['default', ['-march=armv8-a+crc', '-moutline-atomics']],
> > > > > > -   ['native', ['-march=native']],
> > > > > > -   ['0xd03', ['-mcpu=cortex-a53']],
> > > > > > -   ['0xd04', ['-mcpu=cortex-a35']],
> > > > > > -   ['0xd07', ['-mcpu=cortex-a57']],
> > > > > > -   ['0xd08', ['-mcpu=cortex-a72']],
> > > > > > -   ['0xd09', ['-mcpu=cortex-a73']],
> > > > > > -   ['0xd0a', ['-mcpu=cortex-a75']],
> > > > > > -   ['0xd0b', ['-mcpu=cortex-a76']],
> > > > > > -   ['0xd0c', ['-march=armv8.2-a+crc+crypto', '-mcpu=neoverse-
> > n1'],
> > > > > > flags_n1sdp_extra]]
> > > > > > -
> > > > > > -machine_args_cavium = [
> > > > > > -   ['default', ['-march=armv8-a+crc+crypto','-mcpu=thunderx']],
> > > > > > -   ['native', ['-march=native']],
> > > > > > -   ['0xa1', ['-mcpu=thunderxt88'], flags_thunderx_extra],
> > > > > > -   ['0xa2', ['-mcpu=thunderxt81'], flags_thunderx_extra],
> > > > > > -   ['0xa3', ['-mcpu=thunderxt83'], flags_thunderx_extra],
> > > > > > -   ['0xaf', ['-march=armv8.1-a+crc+crypto','-
> > mcpu=thunderx2t99'],
> > > > > > flags_thunderx2_extra],
> > > > > > -   ['0xb2', ['-march=armv8.2-a+crc+crypto+lse','-
> > mcpu=octeontx2'],
> > > > > > flags_octeontx2_extra]]
> > > > > > -
> > > > > > -machine_args_emag = [
> > > > > > -   ['default', ['-march=armv8-a+crc+crypto', '-mtune=emag']],
> > > > > > -   ['native', ['-march=native']]]
> > > > > > +   ['RTE_USE_C11_MEM_MODEL', true] ] # arm config
> > (implementer
> > > > > > +0x41) is the default config pn_config_default
> > > > > What does it mean by 'default' here? I am somewhat confused
> > > > > between
> > > > 'default'
> > > > > and 'generic'. We should look to remove 'default' as much as
> > > > > possible and stick with 'generic'.
> > > > >
> > > >
> > > > This default means what default means, no special meaning, that is
> > > > if there isn't a more specific configuration, default to this one.
> > > > It's possible that generic is better, but now that I think about
> > > > it, using something else than default or generic might be the best
> > > > to avoid confusion. Possibly just part_number_arm, which will make
> > > > it in line with the
> > > other var names.
> > > Agree, better to call it 'part_number_arm'.
> > >
> > > >
> > > > > > += {
> > > > > > +   'generic': [['-march=armv8-a+crc', '-moutline-atomics']],
> > > > > I like that we have taken out 'native' from this list. Would it
> > > > > be possible to take out 'generic' from this and others below.
> > > > > This is because the binary built with 'generic' build should run
> > > > > on any Arm platform. There is no dependency on any underlying
> platform.
> > > > >
> > > >
> > > > This actually means generic part for the implementer, not generic
> > > > for everything. I understand this is here to produce binaries that
> > > > would run on everything from that impelemeter (in line of what you
> > > > mention below, this would be implementer-generic configuration, a
> > > > fourth category). In my patchset, it's also a fallback when
> > > > building for an unknown part number from the implementer.
> > > We do not need implementer-generic binaries/build. However, we will
> > > have some parameters that are common across all the part numbers
> > > from that implementer (probably we should not call it as
> > > 'implementer-generic' to avoid confusion. May be
> > > 'implementer-common-
> > flags' or 'implementer-flags-extra').
> > > Those parameters can be used for every part number.
> >
> > These are currently named flags_ such as flags_arm and
> > flags_cavium. We could rename them to implementer_flags_.
> Ok
> 
> >
> > >
> > > If we kno

[dpdk-dev] [PATCH v1] net/mlx5: fix meter packet missing

2020-11-03 Thread Xueming Li
For transfer flow with meter, packet was passed without applying flow
action. The group level was multiplied by 10 for group level 65531.

This patch fixes this issue by correcting suffix table group level
calculation.

Fixes: 3e8f3e51fd93 ("net/mlx5: fix meter table definitions")
Cc: suanmi...@nvidia.com
Cc: sta...@dpdk.org

Signed-off-by: Xueming Li 
---
 drivers/net/mlx5/mlx5.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 63d263384b..a28f30a5ab 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -610,9 +610,9 @@ struct mlx5_flow_tbl_resource {
 #define MLX5_FLOW_MREG_ACT_TABLE_GROUP (MLX5_MAX_TABLES - 1)
 #define MLX5_FLOW_MREG_CP_TABLE_GROUP (MLX5_MAX_TABLES - 2)
 /* Tables for metering splits should be added here. */
-#define MLX5_MAX_TABLES_EXTERNAL (MLX5_MAX_TABLES - 3)
-#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
 #define MLX5_FLOW_TABLE_LEVEL_SUFFIX (MLX5_MAX_TABLES - 3)
+#define MLX5_FLOW_TABLE_LEVEL_METER (MLX5_MAX_TABLES - 4)
+#define MLX5_MAX_TABLES_EXTERNAL MLX5_FLOW_TABLE_LEVEL_METER
 #define MLX5_MAX_TABLES_FDB UINT16_MAX
 #define MLX5_FLOW_TABLE_FACTOR 10
 
-- 
2.25.1



Re: [dpdk-dev] 【meson build fail for octeontx2】

2020-11-03 Thread oulijun



在 2020/10/27 16:31, David Marchand 写道:

On Tue, Oct 27, 2020 at 8:43 AM oulijun  wrote:


Hi
   I am trying to build dpdk-next-net code based Kunpeng920(ARM64) platform 
used meson build. However, it is build fail.
The libtmp_rte_event_octeontx2 build error happened. The output as follows:

   [1986/2298] Compiling C object 
drivers/libtmp_rte_event_octeontx2.a.p/event_octeontx2_otx2_tim_worker.c.o
FAILED: 
drivers/libtmp_rte_event_octeontx2.a.p/event_octeontx2_otx2_tim_worker.c.o
cc -Idrivers/libtmp_rte_event_octeontx2.a.p -Idrivers -I../drivers 
-Idrivers/event/octeontx2 -I../drivers/event/octeontx2 
-Idrivers/crypto/octeontx2 -I../drivers/crypto/octeontx2 -Idrivers/common/cpt 
-I../drivers/common/cpt -Ilib/librte_eventdev -I../lib/librte_eventdev -I. -I.. 
-Iconfig -I../config -Ilib/librte_eal/include -I../lib/librte_eal/include 
-Ilib/librte_eal/linux/include -I../lib/librte_eal/linux/include 
-Ilib/librte_eal/arm/include -I../lib/librte_eal/arm/include 
-Ilib/librte_eal/common -I../lib/librte_eal/common -Ilib/librte_eal 
-I../lib/librte_eal -Ilib/librte_kvargs -I../lib/librte_kvargs 
-Ilib/librte_metrics -I../lib/librte_metrics -Ilib/librte_telemetry 
-I../lib/librte_telemetry -Ilib/librte_ring -I../lib/librte_ring 
-Ilib/librte_ethdev -I../lib/librte_ethdev -Ilib/librte_net -I../lib/librte_net 
-Ilib/librte_mbuf -I../lib/librte_mbuf -Ilib/librte_mempool 
-I../lib/librte_mempool -Ilib/librte_meter -I../lib/librte_meter 
-Ilib/librte_hash -I../lib/librte_hash -Ilib/librte_timer -I../lib/librte_timer 
-Ilib/librte_cryptodev -I../lib/librte_cryptodev -Idrivers/bus/pci 
-I../drivers/bus/pci -I../drivers/bus/pci/linux -Ilib/librte_pci 
-I../lib/librte_pci -Idrivers/common/octeontx2 -I../drivers/common/octeontx2 
-Idrivers/mempool/octeontx2 -I../drivers/mempool/octeontx2 
-Idrivers/net/octeontx2 -I../drivers/net/octeontx2 -Idrivers/crypto/octeontx 
-I../drivers/crypto/octeontx -Ilib/librte_security -I../lib/librte_security 
-Idrivers/bus/vdev -I../drivers/bus/vdev -pipe -D_FILE_OFFSET_BITS=64 -Wall 
-Winvalid-pch -O3 -include rte_config.h -Wextra -Wcast-qual -Wdeprecated 
-Wformat-nonliteral -Wformat-security -Wmissing-declarations 
-Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpointer-arith 
-Wsign-compare -Wstrict-prototypes -Wundef -Wwrite-strings 
-Wno-missing-field-initializers -D_GNU_SOURCE -fPIC -march=armv8-a+crc 
-DALLOW_EXPERIMENTAL_API -DALLOW_INTERNAL_API -MD -MQ 
drivers/libtmp_rte_event_octeontx2.a.p/event_octeontx2_otx2_tim_worker.c.o -MF 
drivers/libtmp_rte_event_octeontx2.a.p/event_octeontx2_otx2_tim_worker.c.o.d -o 
drivers/libtmp_rte_event_octeontx2.a.p/event_octeontx2_otx2_tim_worker.c.o -c 
../drivers/event/octeontx2/otx2_tim_worker.c
../drivers/event/octeontx2/otx2_tim_worker.c: In function 
‘otx2_tim_arm_tmo_tick_burst_mod’:
../drivers/event/octeontx2/otx2_tim_worker.c:154:18: error: could not split insn
struct rte_event_timer **tim, \
   ^
../drivers/event/octeontx2/otx2_tim_evdev.h:208:1: note: in expansion of macro 
‘FP’
  FP(mod,   0, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB)  \
  ^
../drivers/event/octeontx2/otx2_tim_worker.c:161:1: note: in expansion of macro 
‘TIM_ARM_TMO_FASTPATH_MODES’
  TIM_ARM_TMO_FASTPATH_MODES
  ^
(insn 252 250 255 (parallel [
 (set (reg:DI 1 x1 [orig:230 D.16918 ] [230])
 (mem/v:DI (reg/f:DI 10 x10 [orig:229 D.16920 ] [229]) [-1  S8 
A64]))
 (set (mem/v:DI (reg/f:DI 10 x10 [orig:229 D.16920 ] [229]) [-1  S8 
A64])
 (unspec_volatile:DI [
 (plus:DI (mem/v:DI (reg/f:DI 10 x10 [orig:229 D.16920 
] [229]) [-1  S8 A64])
 (const_int 1099511627776 [0x100]))
 (const_int 2 [0x2])
 ] UNSPECV_ATOMIC_OP))
 (clobber (reg:CC 66 cc))
 (clobber (reg:DI 4 x4))
 (clobber (reg:SI 3 x3))
 ]) ../drivers/event/octeontx2/otx2_tim_worker.h:81 1832 
{atomic_fetch_adddi}
  (expr_list:REG_UNUSED (reg:CC 66 cc)
 (expr_list:REG_UNUSED (reg:DI 4 x4)
 (expr_list:REG_UNUSED (reg:SI 3 x3)
 (nil)
../drivers/event/octeontx2/otx2_tim_worker.c:154:18: internal compiler error: 
in final_scan_insn, at final.c:2897
struct rte_event_timer **tim, \
   ^
../drivers/event/octeontx2/otx2_tim_evdev.h:208:1: note: in expansion of macro 
‘FP’
  FP(mod,   0, 0, 0, OTX2_TIM_BKT_MOD | OTX2_TIM_ENA_DFB)  \
  ^
../drivers/event/octeontx2/otx2_tim_worker.c:161:1: note: in expansion of macro 
‘TIM_ARM_TMO_FASTPATH_MODES’
  TIM_ARM_TMO_FASTPATH_MODES
  ^
Please submit a full bug report,
with preprocessed source if appropriate.
See  for instructions.
{standard input}: Assembler messages:
{standard input}: Error: open CFI at the end of file; missing .cfi_endproc 
directive
Preprocessed source stored into /tmp/cchw0Ftu.out file, please attach this to 
your b

Re: [dpdk-dev] performance degradation with fpic

2020-11-03 Thread Bruce Richardson
On Tue, Nov 03, 2020 at 11:18:57AM +0100, Thomas Monjalon wrote:
> 02/11/2020 16:00, Bruce Richardson:
> > On Mon, Nov 02, 2020 at 10:40:54AM +, Ali Alnubani wrote:
> > > Hi Bruce,
> > > 
> > > I was able to pin this down on drivers/net/mlx5/mlx5_rxtx.c. Removing 
> > > -fPIC from its ninja recipe in build.ninja resolves the issue (had to 
> > > prevent creating shared libs in this case).
> > > What do you suggest I do? Can we have per-pmd customized compilation 
> > > flags?
> > > 
> > > Regards,
> > > Ali
> > > 
> > There are multiple possible ways to achieve this, but below are some ideas:
> > 
> > 1. Take the changes for supporting function versioning and duplicate them
> > from lib/meson.build to drivers/meson.build. Since function versioning
> > support already requires everything to be built twice, we could set it to
> > not use -fpic for the static libs in that case. Then mark mlx5 as using
> > function versioning. This is a bit hackish though, so
> > 
> > 2. The "objs" parameter from each sub-directory is not widely used, so we
> > could split this easily enough into objs-shared and objs-static, and allow
> > the subdirectory build file, in this case mlx5/meson.ninja, to build any c
> > files manually to pass them back. This is more flexible, and also means
> > that you can limit the files which are to be built twice to only the single
> > file, rather than marking the whole driver as needing rebuild.
> 
> Can it be done only in the driver?
> No general meson change for this option?
> 

Well, apart from splitting the objs variable into two, I don't see any
other general meson changes being needed in this case. So yes, it makes any
changes specific to the driver.

That said, I have not tried to implement such a change, so the "in
practice" may be different from the "in theory"!

> > I'm sure there are other approaches too. However, I agree with Luca's
> > comment that first approach should probably be to see if you can track down
> > exactly why this one file is having problems. Could any of the slowdown be
> > due to the fact that you use a common lib from your driver? Are there
> > cross-driver calls in the fast-path that are suffering a penalty?
> 
> Of course the performance will be analyzed in the long run.
> However, such analyzis is more convenient if meson is flexible enough
> to allow customization of the build.
> And in general, I think it is good to have meson flexible
> to allow any kind of driver build customization.
> 

I'm partially agreeing and partially disagreeing here. While flexibility is
something that people generally want, based off my experience with DPDK
builds over the last few years, I think that there is an awful lot to be
said for consistency! While we need to support special cases that we can't
work around, there are many advantages to having everything built in the
same way using common flags etc. I would really hate to see the flexibility
translate into drivers all choosing to do their own special build
customization.

/Bruce


Re: [dpdk-dev] [PATCH v3 09/16] net/octeontx2: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Thomas Monjalon
03/11/2020 11:52, Harman Kalra:
>With the following changes, ptpclient and testpmd(ieee1588 mode) is
>crashing for us. I am debugging the issue and will update soon.
>   --
>Steps to reproduce:
>1. Testpmd:
>   ./dpdk-testpmd -c 0x01 -n 4 -w 0002:05:00.0 -- -i
>   --port-topology=loop
>   testpmd> set fwd ieee1588
>   testpmd> set port 0 ptype_mask 0xf
>   testpmd> start
> 
>   I am sending ptp packets using scapy from the peer:
>   >>> p = Ether(src='98:03:9b:67:b0:d0', dst='FA:62:0C:27:AD:BC',
> >>> type=35063)/Raw(load='\x00\x02')
>   >>> sendp (p, iface="p5p2")
> 
>   I am observing seg fault even for 1 packet.

Where is the crash? Could you provide a backtrace?
Is the field well registered?





Re: [dpdk-dev] [PATCH] app/flow-perf: configurable rule batches

2020-11-03 Thread Georgios Katsikas
Hi,

Any news on this patch?
Is there anything else I could do?

Thanks,
Georgios

On Sun, Oct 11, 2020 at 1:03 PM Georgios Katsikas 
wrote:

> Currently, flow-perf measures the performance of
> rule installation/deletion operations by breaking
> down the entire number of operations into windows
> of fixed size (i.e., 10 operations per window).
> Then, flow-perf measures the total time per window
> and computes an average time across all windows.
>
> This commit allows flow-perf users to configure
> the number of rules per window instead of using
> a fixed pre-compiled value. To do so, users must
> pass --rules-batch=N, where N is the number of
> rules per window (or batch).
> For consistency reasons, flow_count variable is
> now renamed to rules_count. This variable is the
> total number of rules to be installed/deleted.
>
> For example, if a user wants to measure how much
> time it takes to install 1M rules in a certain NIC,
> he/she can input:
> --rules-count=100
> This way flow-perf will break down 1M flow rules into
> 10 batches of 100k flow rules each (this is the default
> batch size) and compute an average across the 10
> measurements.
> Now, if the user modifies the number of rules per
> batch as follows:
> --rules-count=100 --rules-batch=50
> then flow-perf will break down 1M flow rules into
> 2 batches of 500k flow rules each and compute the
> average across the 2 measurements.
>
> Finally, this commit also adds default variables
> to the usage function instead of hardcoded values.
>
> Signed-off-by: Georgios Katsikas 
> ---
>  app/test-flow-perf/main.c  | 86 --
>  doc/guides/tools/flow-perf.rst | 42 -
>  2 files changed, 79 insertions(+), 49 deletions(-)
>
> diff --git a/app/test-flow-perf/main.c b/app/test-flow-perf/main.c
> index c420da6a5..4cdab2c93 100644
> --- a/app/test-flow-perf/main.c
> +++ b/app/test-flow-perf/main.c
> @@ -40,7 +40,8 @@
>
>  #define MAX_ITERATIONS 100
>  #define DEFAULT_RULES_COUNT400
> -#define DEFAULT_ITERATION   10
> +#define DEFAULT_RULES_BATCH 10
> +#define DEFAULT_GROUP0
>
>  struct rte_flow *flow;
>  static uint8_t flow_group;
> @@ -62,8 +63,8 @@ static bool enable_fwd;
>
>  static struct rte_mempool *mbuf_mp;
>  static uint32_t nb_lcores;
> -static uint32_t flows_count;
> -static uint32_t iterations_number;
> +static uint32_t rules_count;
> +static uint32_t rules_batch;
>  static uint32_t hairpin_queues_num; /* total hairpin q number - default:
> 0 */
>  static uint32_t nb_lcores;
>
> @@ -98,8 +99,10 @@ usage(char *progname)
>  {
> printf("\nusage: %s\n", progname);
> printf("\nControl configurations:\n");
> -   printf("  --flows-count=N: to set the number of needed"
> -   " flows to insert, default is 4,000,000\n");
> +   printf("  --rules-count=N: to set the number of needed"
> +   " rules to insert, default is %d\n", DEFAULT_RULES_COUNT);
> +   printf("  --rules-batch=N: set number of batched rules,"
> +   " default is %d\n", DEFAULT_RULES_BATCH);
> printf("  --dump-iterations: To print rates for each"
> " iteration\n");
> printf("  --deletion-rate: Enable deletion rate"
> @@ -114,7 +117,7 @@ usage(char *progname)
> printf("  --egress: set egress attribute in flows\n");
> printf("  --transfer: set transfer attribute in flows\n");
> printf("  --group=N: set group for all flows,"
> -   " default is 0\n");
> +   " default is %d\n", DEFAULT_GROUP);
>
> printf("To set flow items:\n");
> printf("  --ether: add ether layer in flow items\n");
> @@ -527,7 +530,8 @@ args_parse(int argc, char **argv)
> static const struct option lgopts[] = {
> /* Control */
> { "help",   0, 0, 0 },
> -   { "flows-count",1, 0, 0 },
> +   { "rules-count",1, 0, 0 },
> +   { "rules-batch",1, 0, 0 },
> { "dump-iterations",0, 0, 0 },
> { "deletion-rate",  0, 0, 0 },
> { "dump-socket-mem",0, 0, 0 },
> @@ -705,16 +709,26 @@ args_parse(int argc, char **argv)
> }
> /* Control */
> if (strcmp(lgopts[opt_idx].name,
> -   "flows-count") == 0) {
> +   "rules-batch") == 0) {
> n = atoi(optarg);
> -   if (n > (int) iterations_number)
> -   flows_count = n;
> +   if (n >= DEFAULT_RULES_BATCH)
> +   rules_batch = n;
> else {
> -   printf("\

Re: [dpdk-dev] [PATCH] crypto/aesni_mb: fix cpu crypto cipher auth

2020-11-03 Thread Zhang, Roy Fan
Hi Akhil,

Thanks. Will do.

Regards,
Fan

> -Original Message-
> From: Akhil Goyal 
> Sent: Monday, November 2, 2020 7:57 PM
> To: Zhang, Roy Fan ; dev@dpdk.org
> Subject: RE: [PATCH] crypto/aesni_mb: fix cpu crypto cipher auth
> 
> Hi Fan,
> > Subject: [PATCH] crypto/aesni_mb: fix cpu crypto cipher auth
> >
> > This patch fixes the AESNI-MB PMD CPU crypto process function. Orignally
> > the function tried to access crypto vector's aad buffer even it is not
> > needed.
> >
> > Fixes: 8d928d47a29a ("cryptodev: change crypto symmetric vector
> structure")
> > Cc: roy.fan.zh...@intel.com
> >
> > Signed-off-by: Fan Zhang 
> 
> This patch need a rebase as it is conflicting with the recently added patch
> From pablo.
> 
> Regards,
> Akhil



Re: [dpdk-dev] [PATCH 7/8] net/hns3: fix some static check errors by coverity

2020-11-03 Thread Ferruh Yigit

On 11/2/2020 2:38 PM, Lijun Ou wrote:

From: Hongbo Zheng 

This patch fixes some warnings.
coverity:function format symbol does not match


I think we are not getting these warnings in the public coverity, is there a 
specific configuration for it?




coverity:not_unsigned: Violation: Operand
"hw->hw_tc_map & (1UL << i)",  Right Expression:
"1UL << i" is not an unsigned type



Can you please split this patch into multiple patches, one for each type of 
issue?


Coverity issue: 91708127
Coverity issue: 89776953


I assume these are internal coverity ids, it has not much value to record them 
in the commit log.


We record the public coverity [1] ids with this tag.

[1]
https://scan.coverity.com/projects/dpdk-data-plane-development-kit?tab=overview


Fixes: 62e3ccc2b94c ("net/hns3: support flow control")
Fixes: 19a3ca4c99cf ("net/hns3: add start/stop and configure operations")
Fixes: a5475d61fa34 ("net/hns3: support VF")
Fixes: fb94f359481f ("net/hns3: fix adding multicast MAC address")
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Fixes: a951c1ed3ab5 ("net/hns3: support different numbers of Rx and Tx queues")
Fixes: 2790c6464725 ("net/hns3: support device reset")
Fixes: 23d4b61fee5d ("net/hns3: support multiple process")
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Fixes: e31f123db06b ("net/hns3: support NEON Tx")
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: sta...@dpdk.org

Signed-off-by: Hongbo Zheng 
Signed-off-by: Lijun Ou 


<...>



Re: [dpdk-dev] [PATCH 15/15] mbuf: move pool pointer in hotterfirst half

2020-11-03 Thread Morten Brørup
> From: Thomas Monjalon [mailto:tho...@monjalon.net]
> Sent: Monday, November 2, 2020 4:58 PM
> 
> +Cc techboard
> 
> We need benchmark numbers in order to take a decision.
> Please all, prepare some arguments and numbers so we can discuss
> the mbuf layout in the next techboard meeting.

I propose that the techboard considers this from two angels:

1. Long term goals and their relative priority. I.e. what can be
achieved with wide-ranging modifications, requiring yet another ABI
break and due notices.

2. Short term goals, i.e. what can be achieved for this release.


My suggestions follow...

1. Regarding long term goals:

I have argued that simple forwarding of non-segmented packets using
only the first mbuf cache line can be achieved by making three
modifications:

a) Move m->tx_offload to the first cache line.
b) Use an 8 bit pktmbuf mempool index in the first cache line,
   instead of the 64 bit m->pool pointer in the second cache line.
c) Do not access m->next when we know that it is NULL.
   We can use m->nb_segs == 1 or some other invariant as the gate.
   It can be implemented by adding an m->next accessor function:
   struct rte_mbuf * rte_mbuf_next(struct rte_mbuf * m)
   {
   return m->nb_segs == 1 ? NULL : m->next;
   }

Regarding the priority of this goal, I guess that simple forwarding
of non-segmented packets is probably the path taken by the majority
of packets handled by DPDK.


An alternative goal could be:
Do not touch the second cache line during RX.
A comment in the mbuf structure says so, but it is not true anymore.

(I guess that regression testing didn't catch this because the tests
perform TX immediately after RX, so the cache miss just moves from
the TX to the RX part of the test application.)


2. Regarding short term goals:

The current DPDK source code looks to me like m->next is the most
frequently accessed field in the second cache line, so it makes sense
moving this to the first cache line, rather than m->pool.
Benchmarking may help here.

If we - without breaking the ABI - can introduce a gate to avoid
accessing m->next when we know that it is NULL, we should keep it in
the second cache line.

In this case, I would prefer to move m->tx_offload to the first cache
line, thereby providing a field available for application use, until
the application prepares the packet for transmission.


> 
> 
> 01/11/2020 21:59, Morten Brørup:
> > > From: Thomas Monjalon [mailto:tho...@monjalon.net]
> > > Sent: Sunday, November 1, 2020 5:38 PM
> > >
> > > 01/11/2020 10:12, Morten Brørup:
> > > > One thing has always puzzled me:
> > > > Why do we use 64 bits to indicate which memory pool
> > > > an mbuf belongs to?
> > > > The portid only uses 16 bits and an indirection index.
> > > > Why don't we use the same kind of indirection index for mbuf
> pools?
> > >
> > > I wonder what would be the cost of indirection. Probably
> neglectible.
> >
> > Probably. The portid does it, and that indirection is heavily used
> everywhere.
> >
> > The size of mbuf memory pool indirection array should be compile time
> configurable, like the size of the portid indirection array.
> >
> > And for reference, the indirection array will fit into one cache line
> if we default to 8 mbuf pools, thus supporting an 8 CPU socket system
> with one mbuf pool per CPU socket, or a 4 CPU socket system with two
> mbuf pools per CPU socket.
> >
> > (And as a side note: Our application is optimized for single-socket
> systems, and we only use one mbuf pool. I guess many applications were
> developed without carefully optimizing for multi-socket systems, and
> also just use one mbuf pool. In these cases, the mbuf structure doesn't
> really need a pool field. But it is still there, and the DPDK libraries
> use it, so we didn't bother removing it.)
> >
> > > I think it is a good proposal...
> > > ... for next year, after a deprecation notice.
> > >
> > > > I can easily imagine using one mbuf pool (or perhaps a few pools)
> > > > per CPU socket (or per physical memory bus closest to an attached
> NIC),
> > > > but not more than 256 mbuf memory pools in total.
> > > > So, let's introduce an mbufpoolid like the portid,
> > > > and cut this mbuf field down from 64 to 8 bits.
> 
> We will need to measure the perf of the solution.
> There is a chance for the cost to be too much high.
> 
> 
> > > > If we also cut down m->pkt_len from 32 to 24 bits,
> > >
> > > Who is using packets larger than 64k? Are 16 bits enough?
> >
> > I personally consider 64k a reasonable packet size limit. Exotic
> applications with even larger packets would have to live with this
> constraint. But let's see if there are any objections. For reference,
> 64k corresponds to ca. 44 Ethernet (1500 byte) packets.
> >
> > (The limit could be 65535 bytes, to avoid translation of the value 0
> into 65536 bytes.)
> >
> > This modification would go nicely hand in hand with the mbuf pool
> indirection modification.
> >
> > ... after yet another round of ABI stability 

Re: [dpdk-dev] [PATCH 7/8] net/hns3: fix some static check errors by coverity

2020-11-03 Thread oulijun




在 2020/11/3 19:41, Ferruh Yigit 写道:

On 11/2/2020 2:38 PM, Lijun Ou wrote:

From: Hongbo Zheng 

This patch fixes some warnings.
coverity:function format symbol does not match


I think we are not getting these warnings in the public coverity, is 
there a specific configuration for it?


Yes, the coverity scan is configured internally instead of the DPDK CI 
coverity warning. However, we think it is reasonable.


coverity:not_unsigned: Violation: Operand
"hw->hw_tc_map & (1UL << i)",  Right Expression:
"1UL << i" is not an unsigned type



Can you please split this patch into multiple patches, one for each type 
of issue?



Coverity issue: 91708127
Coverity issue: 89776953


I assume these are internal coverity ids, it has not much value to 
record them in the commit log.


We record the public coverity [1] ids with this tag.

Hi, Ferruh
  The warning is our configuration. As a result, it has no public id.If 
the ID is not added, the check-git-log check fails.


Do you have any good advice?

Thanks
Lijun Ou


[1]
https://scan.coverity.com/projects/dpdk-data-plane-development-kit?tab=overview 




Fixes: 62e3ccc2b94c ("net/hns3: support flow control")
Fixes: 19a3ca4c99cf ("net/hns3: add start/stop and configure operations")
Fixes: a5475d61fa34 ("net/hns3: support VF")
Fixes: fb94f359481f ("net/hns3: fix adding multicast MAC address")
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Fixes: a951c1ed3ab5 ("net/hns3: support different numbers of Rx and Tx 
queues")

Fixes: 2790c6464725 ("net/hns3: support device reset")
Fixes: 23d4b61fee5d ("net/hns3: support multiple process")
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Fixes: e31f123db06b ("net/hns3: support NEON Tx")
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: sta...@dpdk.org

Signed-off-by: Hongbo Zheng 
Signed-off-by: Lijun Ou 


<...>

.



Re: [dpdk-dev] [PATCH 0/8] misc fixes for hns3

2020-11-03 Thread Ferruh Yigit

On 11/2/2020 2:38 PM, Lijun Ou wrote:

This patch series includes three Tx checksum
bugs and two static check warnings fixes.
The remaining includes two features fixes.

Chengchang Tang (4):
   net/hns3: add limit promisc mode to VF
   net/hns3: fix Tx cksum outer header prepare
   net/hns3: fix Tx checksum with fix header length
   net/hns3: add VXLAN-GPE packets TSO and checksum support

Hongbo Zheng (2):
   net/hns3: fix visit unsupported QL register error
   net/hns3: fix some static check errors by coverity

Huisong Li (1):
   net/hns3: fix configurations of port-level scheduling rate

Lijun Ou (1):
   net/hns3: adjust some header files location



Except 7/8,
Series applied to dpdk-next-net/main, thanks.

Can you please send a new version of the 7/8 as separate patch(es)?


Re: [dpdk-dev] [PATCH 7/8] net/hns3: fix some static check errors by coverity

2020-11-03 Thread Ferruh Yigit

On 11/3/2020 12:11 PM, oulijun wrote:



在 2020/11/3 19:41, Ferruh Yigit 写道:

On 11/2/2020 2:38 PM, Lijun Ou wrote:

From: Hongbo Zheng 

This patch fixes some warnings.
coverity:function format symbol does not match


I think we are not getting these warnings in the public coverity, is there a 
specific configuration for it?


Yes, the coverity scan is configured internally instead of the DPDK CI coverity 
warning. However, we think it is reasonable.

>

That is OK, I just wonder if it is enabled explicitly with a config, and what 
that config is (for the record).




coverity:not_unsigned: Violation: Operand
"hw->hw_tc_map & (1UL << i)",  Right Expression:
"1UL << i" is not an unsigned type



Can you please split this patch into multiple patches, one for each type of 
issue?



Coverity issue: 91708127
Coverity issue: 89776953


I assume these are internal coverity ids, it has not much value to record them 
in the commit log.


We record the public coverity [1] ids with this tag.

Hi, Ferruh
   The warning is our configuration. As a result, it has no public id.If the ID 
is not added, the check-git-log check fails.


Do you have any good advice?



Doesn't mention from coverity at all in the commit log,
like first patch can be:
"
net/hns3: fix logging format symbols
  
"

etc...


Thanks
Lijun Ou


[1]
https://scan.coverity.com/projects/dpdk-data-plane-development-kit?tab=overview


Fixes: 62e3ccc2b94c ("net/hns3: support flow control")
Fixes: 19a3ca4c99cf ("net/hns3: add start/stop and configure operations")
Fixes: a5475d61fa34 ("net/hns3: support VF")
Fixes: fb94f359481f ("net/hns3: fix adding multicast MAC address")
Fixes: fcba820d9b9e ("net/hns3: support flow director")
Fixes: a951c1ed3ab5 ("net/hns3: support different numbers of Rx and Tx queues")
Fixes: 2790c6464725 ("net/hns3: support device reset")
Fixes: 23d4b61fee5d ("net/hns3: support multiple process")
Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Fixes: e31f123db06b ("net/hns3: support NEON Tx")
Fixes: 8839c5e202f3 ("net/hns3: support device stats")
Cc: sta...@dpdk.org

Signed-off-by: Hongbo Zheng 
Signed-off-by: Lijun Ou 


<...>

.





Re: [dpdk-dev] [PATCH v3 09/16] net/octeontx2: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Thomas Monjalon
03/11/2020 12:22, Thomas Monjalon:
> 03/11/2020 11:52, Harman Kalra:
> >With the following changes, ptpclient and testpmd(ieee1588 mode) is
> >crashing for us. I am debugging the issue and will update soon.
> >   --
> >Steps to reproduce:
> >1. Testpmd:
> >   ./dpdk-testpmd -c 0x01 -n 4 -w 0002:05:00.0 -- -i
> >   --port-topology=loop
> >   testpmd> set fwd ieee1588
> >   testpmd> set port 0 ptype_mask 0xf
> >   testpmd> start
> > 
> >   I am sending ptp packets using scapy from the peer:
> >   >>> p = Ether(src='98:03:9b:67:b0:d0', dst='FA:62:0C:27:AD:BC',
> >   >>> type=35063)/Raw(load='\x00\x02')
> >   >>> sendp (p, iface="p5p2")
> > 
> >   I am observing seg fault even for 1 packet.
> 
> Where is the crash? Could you provide a backtrace?
> Is the field well registered?

Sorry Harman, without any more explanation, we must move forward.
I am going to send a v4 without any change for octeontx2.
It should be merged today for -rc2.




[dpdk-dev] [PATCH v4 00/16] remove mbuf timestamp

2020-11-03 Thread Thomas Monjalon
The mbuf field timestamp was announced to be removed for three reasons:
  - a dynamic field already exist, used for Tx only
  - this field always used 8 bytes even if unneeded
  - this field is in the first half (cacheline) of mbuf

After this series, the dynamic field timestamp is used for both Rx and Tx
with separate dynamic flags to distinguish when the value is meaningful
without resetting the field during forwarding.

As a consequence, 8 bytes can be re-allocated to dynamic fields
in the first half of mbuf structure.
It is still open to change more the mbuf layout.

This mbuf layout change is important to allow adding more features
(consuming more dynamic fields) during the next year,
and can allow performance improvements with new usages in the first half.


v4:
- use local variable in nfb
- fix flag initialization
- remove useless blank line

v3:
- move ark variables declaration in a .h file
- improve cache locality for octeontx2
- add comments about cache locality in commit logs
- add comment for unused flag offset 17
- add timestamp register functions
- replace lookup with register in drivers and apps
- remove register in ethdev

v2:
- remove optimization to register only once in ethdev
- fix error message in latencystats
- convert rxtx_callbacks macro to inline function
- increase dynamic fields space
- do not move pool field


Thomas Monjalon (16):
  eventdev: remove software Rx timestamp
  mbuf: add Rx timestamp flag and helpers
  latency: switch Rx timestamp to dynamic mbuf field
  net/ark: switch Rx timestamp to dynamic mbuf field
  net/dpaa2: switch Rx timestamp to dynamic mbuf field
  net/mlx5: fix dynamic mbuf offset lookup check
  net/mlx5: switch Rx timestamp to dynamic mbuf field
  net/nfb: switch Rx timestamp to dynamic mbuf field
  net/octeontx2: switch Rx timestamp to dynamic mbuf field
  net/pcap: switch Rx timestamp to dynamic mbuf field
  app/testpmd: switch Rx timestamp to dynamic mbuf field
  examples/rxtx_callbacks: switch timestamp to dynamic field
  ethdev: add doxygen comment for Rx timestamp API
  mbuf: remove deprecated timestamp field
  mbuf: add Tx timestamp registration helper
  ethdev: include mbuf registration in Tx timestamp API

 app/test-pmd/config.c | 38 -
 app/test-pmd/util.c   | 38 -
 app/test/test_mbuf.c  |  1 -
 doc/guides/nics/mlx5.rst  |  5 +-
 .../prog_guide/event_ethernet_rx_adapter.rst  |  6 +-
 doc/guides/rel_notes/deprecation.rst  |  4 --
 doc/guides/rel_notes/release_20_11.rst|  4 ++
 drivers/net/ark/ark_ethdev.c  | 16 ++
 drivers/net/ark/ark_ethdev_rx.c   |  7 ++-
 drivers/net/ark/ark_ethdev_rx.h   |  2 +
 drivers/net/dpaa2/dpaa2_ethdev.c  | 11 
 drivers/net/dpaa2/dpaa2_ethdev.h  |  2 +
 drivers/net/dpaa2/dpaa2_rxtx.c| 25 ++---
 drivers/net/mlx5/mlx5_ethdev.c|  8 ++-
 drivers/net/mlx5/mlx5_rxq.c   |  8 +++
 drivers/net/mlx5/mlx5_rxtx.c  |  8 +--
 drivers/net/mlx5/mlx5_rxtx.h  | 19 +++
 drivers/net/mlx5/mlx5_rxtx_vec_altivec.h  | 41 +++---
 drivers/net/mlx5/mlx5_rxtx_vec_neon.h | 43 ---
 drivers/net/mlx5/mlx5_rxtx_vec_sse.h  | 35 ++--
 drivers/net/mlx5/mlx5_trigger.c   |  2 +-
 drivers/net/mlx5/mlx5_txq.c   |  2 +-
 drivers/net/nfb/nfb_rx.c  | 15 -
 drivers/net/nfb/nfb_rx.h  | 21 +--
 drivers/net/octeontx2/otx2_ethdev.c   | 10 
 drivers/net/octeontx2/otx2_rx.h   | 19 ++-
 drivers/net/pcap/rte_eth_pcap.c   | 20 ++-
 examples/rxtx_callbacks/main.c| 16 +-
 lib/librte_ethdev/rte_ethdev.h| 13 -
 .../rte_event_eth_rx_adapter.c| 11 
 .../rte_event_eth_rx_adapter.h|  6 +-
 lib/librte_latencystats/rte_latencystats.c| 30 --
 lib/librte_mbuf/rte_mbuf.c|  2 -
 lib/librte_mbuf/rte_mbuf.h|  2 +-
 lib/librte_mbuf/rte_mbuf_core.h   | 12 +---
 lib/librte_mbuf/rte_mbuf_dyn.c| 51 +
 lib/librte_mbuf/rte_mbuf_dyn.h| 55 +--
 lib/librte_mbuf/version.map   |  2 +
 38 files changed, 431 insertions(+), 179 deletions(-)

-- 
2.28.0



[dpdk-dev] [PATCH v4 01/16] eventdev: remove software Rx timestamp

2020-11-03 Thread Thomas Monjalon
This a revert of the commit 569758758dcd ("eventdev: add Rx timestamp").
If the Rx timestamp is not configured on the ethdev port,
there is no reason to set one.
Also the accuracy  of the timestamp was bad because set at a late stage.
Anyway there is no trace of the usage of this timestamp.

Signed-off-by: Thomas Monjalon 
Acked-by: David Marchand 
---
 doc/guides/prog_guide/event_ethernet_rx_adapter.rst |  6 +-
 lib/librte_eventdev/rte_event_eth_rx_adapter.c  | 11 ---
 lib/librte_eventdev/rte_event_eth_rx_adapter.h  |  6 +-
 3 files changed, 2 insertions(+), 21 deletions(-)

diff --git a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst 
b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
index 236f43f455..cb44ce0e47 100644
--- a/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
+++ b/doc/guides/prog_guide/event_ethernet_rx_adapter.rst
@@ -12,11 +12,7 @@ be supported in hardware or require a software thread to 
receive packets from
 the ethdev port using ethdev poll mode APIs and enqueue these as events to the
 event device using the eventdev API. Both transfer mechanisms may be present on
 the same platform depending on the particular combination of the ethdev and
-the event device. For SW based packet transfer, if the mbuf does not have a
-timestamp set, the adapter adds a timestamp to the mbuf using
-rte_get_tsc_cycles(), this provides a more accurate timestamp as compared to
-if the application were to set the timestamp since it avoids event device
-schedule latency.
+the event device.
 
 The Event Ethernet Rx Adapter library is intended for the application code to
 configure both transfer mechanisms using a common API. A capability API allows
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c 
b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
index fd1ede..3c73046551 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c
@@ -763,7 +763,6 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter 
*rx_adapter,
uint32_t rss_mask;
uint32_t rss;
int do_rss;
-   uint64_t ts;
uint16_t nb_cb;
uint16_t dropped;
 
@@ -771,16 +770,6 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter 
*rx_adapter,
rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1);
do_rss = !rss_mask && !eth_rx_queue_info->flow_id_mask;
 
-   if ((m->ol_flags & PKT_RX_TIMESTAMP) == 0) {
-   ts = rte_get_tsc_cycles();
-   for (i = 0; i < num; i++) {
-   m = mbufs[i];
-
-   m->timestamp = ts;
-   m->ol_flags |= PKT_RX_TIMESTAMP;
-   }
-   }
-
for (i = 0; i < num; i++) {
m = mbufs[i];
 
diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.h 
b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
index 2dd259c279..21bb1e54c8 100644
--- a/lib/librte_eventdev/rte_event_eth_rx_adapter.h
+++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.h
@@ -21,11 +21,7 @@
  *
  * The adapter uses a EAL service core function for SW based packet transfer
  * and uses the eventdev PMD functions to configure HW based packet transfer
- * between the ethernet device and the event device. For SW based packet
- * transfer, if the mbuf does not have a timestamp set, the adapter adds a
- * timestamp to the mbuf using rte_get_tsc_cycles(), this provides a more
- * accurate timestamp as compared to if the application were to set the time
- * stamp since it avoids event device schedule latency.
+ * between the ethernet device and the event device.
  *
  * The ethernet Rx event adapter's functions are:
  *  - rte_event_eth_rx_adapter_create_ext()
-- 
2.28.0



[dpdk-dev] [PATCH v4 03/16] latency: switch Rx timestamp to dynamic mbuf field

2020-11-03 Thread Thomas Monjalon
The mbuf timestamp is moved to a dynamic field
in order to allow removal of the deprecated static field.
The related mbuf flag is also replaced with the dynamic one.

Signed-off-by: Thomas Monjalon 
Acked-by: David Marchand 
---
 lib/librte_latencystats/rte_latencystats.c | 30 ++
 1 file changed, 25 insertions(+), 5 deletions(-)

diff --git a/lib/librte_latencystats/rte_latencystats.c 
b/lib/librte_latencystats/rte_latencystats.c
index ba2fff3bcb..ab8db7a139 100644
--- a/lib/librte_latencystats/rte_latencystats.c
+++ b/lib/librte_latencystats/rte_latencystats.c
@@ -9,6 +9,7 @@
 
 #include 
 #include 
+#include 
 #include 
 #include 
 #include 
@@ -31,6 +32,16 @@ latencystat_cycles_per_ns(void)
 /* Macros for printing using RTE_LOG */
 #define RTE_LOGTYPE_LATENCY_STATS RTE_LOGTYPE_USER1
 
+static uint64_t timestamp_dynflag;
+static int timestamp_dynfield_offset = -1;
+
+static inline rte_mbuf_timestamp_t *
+timestamp_dynfield(struct rte_mbuf *mbuf)
+{
+   return RTE_MBUF_DYNFIELD(mbuf,
+   timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
+}
+
 static const char *MZ_RTE_LATENCY_STATS = "rte_latencystats";
 static int latency_stats_index;
 static uint64_t samp_intvl;
@@ -128,10 +139,10 @@ add_time_stamps(uint16_t pid __rte_unused,
diff_tsc = now - prev_tsc;
timer_tsc += diff_tsc;
 
-   if ((pkts[i]->ol_flags & PKT_RX_TIMESTAMP) == 0
+   if ((pkts[i]->ol_flags & timestamp_dynflag) == 0
&& (timer_tsc >= samp_intvl)) {
-   pkts[i]->timestamp = now;
-   pkts[i]->ol_flags |= PKT_RX_TIMESTAMP;
+   *timestamp_dynfield(pkts[i]) = now;
+   pkts[i]->ol_flags |= timestamp_dynflag;
timer_tsc = 0;
}
prev_tsc = now;
@@ -161,8 +172,8 @@ calc_latency(uint16_t pid __rte_unused,
 
now = rte_rdtsc();
for (i = 0; i < nb_pkts; i++) {
-   if (pkts[i]->ol_flags & PKT_RX_TIMESTAMP)
-   latency[cnt++] = now - pkts[i]->timestamp;
+   if (pkts[i]->ol_flags & timestamp_dynflag)
+   latency[cnt++] = now - *timestamp_dynfield(pkts[i]);
}
 
rte_spinlock_lock(&glob_stats->lock);
@@ -241,6 +252,15 @@ rte_latencystats_init(uint64_t app_samp_intvl,
return -1;
}
 
+   /* Register mbuf field and flag for Rx timestamp */
+   ret = rte_mbuf_dyn_rx_timestamp_register(×tamp_dynfield_offset,
+   ×tamp_dynflag);
+   if (ret != 0) {
+   RTE_LOG(ERR, LATENCY_STATS,
+   "Cannot register mbuf field/flag for timestamp\n");
+   return -rte_errno;
+   }
+
/** Register Rx/Tx callbacks */
RTE_ETH_FOREACH_DEV(pid) {
struct rte_eth_dev_info dev_info;
-- 
2.28.0



  1   2   3   >