[PATCH v2] gro : fix reordering of packets in GRO library

2022-10-28 Thread Kumara Parameshwaran
From: Kumara Parameshwaran 

When a TCP packet contains flags like PSH it is returned
immediately to the application though there might be packets of
the same flow in the GRO table. If PSH flag is set on a segment
packets up to the segment should be delivered immediately. But the
current implementation delivers the last arrived packet with PSH flag
set causing re-ordering

With this patch, if a packet does not contain only ACK flag and if
there are no previous packets for the flow the packet would be returned
immediately, else will be merged with the previous segment and the
flag on the last segment will be set on the entire segment.
This is the behaviour with linux stack as well.

Signed-off-by: Kumara Parameshwaran 
---
v1:
If the received packet is not a pure ACK packet, we check if
there are any previous packets in the flow, if present we indulge
the received packet also in the coalescing logic and update the flags
of the last recived packet to the entire segment which would avoid
re-ordering.

Lets say a case where P1(PSH), P2(ACK), P3(ACK)  are received in burst mode,
P1 contains PSH flag and since it does not contain any prior packets in the 
flow
we copy it to unprocess_packets and P2(ACK) and P3(ACK) are merged together.
In the existing case the  P2,P3 would be delivered as single segment first 
and the
unprocess_packets will be copied later which will cause reordering. With 
the patch
copy the unprocess packets first and then the packets from the GRO table.

Testing done
The csum test-pmd was modifited to support the following
GET request of 10MB from client to server via test-pmd (static arp entries 
added in client
and server). Enable GRO and TSO in test-pmd where the packets recived from 
the client mac
would be sent to server mac and vice versa.
In above testing, without the patch the client observerd re-ordering of 25 
packets
and with the patch there were no packet re-ordering observerd.

v2:
* Fix warnings in commit and comment
* Do not consider packet as candidate to merge if it contains SYN/RST flag

 lib/gro/gro_tcp4.c | 43 ---
 lib/gro/rte_gro.c  | 18 +-
 2 files changed, 45 insertions(+), 16 deletions(-)

diff --git a/lib/gro/gro_tcp4.c b/lib/gro/gro_tcp4.c
index 8f5e800250..c4fa8ff226 100644
--- a/lib/gro/gro_tcp4.c
+++ b/lib/gro/gro_tcp4.c
@@ -188,6 +188,19 @@ update_header(struct gro_tcp4_item *item)
pkt->l2_len);
 }
 
+static inline void
+update_tcp_hdr_flags(struct rte_tcp_hdr *tcp_hdr, struct rte_mbuf *pkt)
+{
+   struct rte_ether_hdr *eth_hdr;
+   struct rte_ipv4_hdr *ipv4_hdr;
+   struct rte_tcp_hdr *merged_tcp_hdr;
+
+   eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+   ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
+   merged_tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+   merged_tcp_hdr->tcp_flags |= tcp_hdr->tcp_flags;
+}
+
 int32_t
 gro_tcp4_reassemble(struct rte_mbuf *pkt,
struct gro_tcp4_tbl *tbl,
@@ -206,6 +219,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
uint32_t i, max_flow_num, remaining_flow_num;
int cmp;
uint8_t find;
+   uint32_t start_idx;
 
/*
 * Don't process the packet whose TCP header length is greater
@@ -219,12 +233,6 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
 
-   /*
-* Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
-* or CWR set.
-*/
-   if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
-   return -1;
/*
 * Don't process the packet whose payload length is less than or
 * equal to 0.
@@ -263,12 +271,29 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
find = 1;
+   start_idx = tbl->flows[i].start_index;
break;
}
remaining_flow_num--;
}
}
 
+   if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) {
+   /*
+* Check and try merging the current TCP segment with the 
previous 
+* TCP segment if the TCP header does not contain RST and SYN 
flag
+* There are cases where the last segment is sent with 
FIN|PSH|ACK 
+* which should also be considered for merging with previous 
segments.
+*/
+   if (find && !(tcp_hdr->tcp_flags & 
(RTE_TCP_RST_FLAG|RTE_TCP_SYN_FLAG)))
+   /* 
+* Since PSH flag is set, start time

[PATCH v5 1/3] sched: fix subport profile ID

2022-10-28 Thread Megha Ajmera
In rte_sched_subport_config() API, subport_profile_id is not set correctly.

Fixes: ac6fcb841b0f ("sched: update subport rate dynamically")
Cc: cristian.dumitre...@intel.com

Signed-off-by: Megha Ajmera 
Acked-by: Dumitrescu, Cristian 
---
 lib/sched/rte_sched.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
index c5fa9e4582..c91697131d 100644
--- a/lib/sched/rte_sched.c
+++ b/lib/sched/rte_sched.c
@@ -1257,8 +1257,6 @@ rte_sched_subport_config(struct rte_sched_port *port,
 
n_subports++;
 
-   subport_profile_id = 0;
-
/* Port */
port->subports[subport_id] = s;
 
-- 
2.25.1



[PATCH v5 3/3] sched: support for 100G+ rates in subport/pipe config

2022-10-28 Thread Megha Ajmera
Config load functions updated to support 100G rates
for subport and pipes.
Added new parse function to convert string to unsigned
long long.
Added error checks.

Signed-off-by: Megha Ajmera 
---
 examples/qos_sched/cfg_file.c | 180 +-
 examples/qos_sched/cfg_file.h |   2 +
 examples/qos_sched/init.c |  16 ++-
 3 files changed, 128 insertions(+), 70 deletions(-)

diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index ca871d3287..d203621fa4 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -25,6 +25,21 @@ uint32_t n_active_queues;
 
 struct rte_sched_cman_params cman_params;
 
+int parse_u64(const char *entry, uint64_t *val)
+{
+   char *endptr;
+   if(!entry || !val)
+   return -EINVAL;
+
+   errno = 0;
+
+   *val = strtoull(entry, &endptr, 0);
+   if (errno == EINVAL || errno == ERANGE || *endptr != '\0') {
+   return -EINVAL;
+   }
+   return 0;
+}
+
 int
 cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params 
*port_params)
 {
@@ -47,7 +62,7 @@ cfg_load_port(struct rte_cfgfile *cfg, struct 
rte_sched_port_params *port_params
 int
 cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params 
*pipe_params)
 {
-   int i, j;
+   int i, j, ret = 0;
char *next;
const char *entry;
int profiles;
@@ -63,68 +78,84 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct 
rte_sched_pipe_params *pipe_params
snprintf(pipe_name, sizeof(pipe_name), "pipe profile %d", j);
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tb rate");
-   if (entry)
-   pipe_params[j].tb_rate = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tb_rate);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tb size");
-   if (entry)
-   pipe_params[j].tb_size = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tb_size);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc period");
-   if (entry)
-   pipe_params[j].tc_period = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_period);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 rate");
-   if (entry)
-   pipe_params[j].tc_rate[0] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[0]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 rate");
-   if (entry)
-   pipe_params[j].tc_rate[1] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[1]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 rate");
-   if (entry)
-   pipe_params[j].tc_rate[2] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[2]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 rate");
-   if (entry)
-   pipe_params[j].tc_rate[3] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[3]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 4 rate");
-   if (entry)
-   pipe_params[j].tc_rate[4] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[4]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 5 rate");
-   if (entry)
-   pipe_params[j].tc_rate[5] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[5]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 6 rate");
-   if (entry)
-   pipe_params[j].tc_rate[6] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[6]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 7 rate");
-   if (entry)
-   pipe_params[j].tc_rate[7] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[7]);
+   if (ret)
+   return ret;
 
entry = rte_

[PATCH v5 2/3] sched: fix number of subport profiles

2022-10-28 Thread Megha Ajmera
Removed unused subport field from profile.cfg
Correctly using subport profile id in subport config load.

Fixes: 802d214dc880 ("examples/qos_sched: update subport rate dynamically")
Cc: sta...@dpdk.org

Signed-off-by: Megha Ajmera 
Acked-by: Cristian Dumitrescu 
---
 examples/qos_sched/cfg_file.c  | 2 +-
 examples/qos_sched/profile.cfg | 2 --
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index 3d5d75fcf0..ca871d3287 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -157,7 +157,7 @@ cfg_load_subport_profile(struct rte_cfgfile *cfg,
 
profiles = rte_cfgfile_num_sections(cfg, "subport profile",
   sizeof("subport profile") - 1);
-   subport_params[0].n_pipe_profiles = profiles;
+   port_params.n_subport_profiles = profiles;
 
for (i = 0; i < profiles; i++) {
char sec_name[32];
diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg
index c9ec187c93..e8de101b6c 100644
--- a/examples/qos_sched/profile.cfg
+++ b/examples/qos_sched/profile.cfg
@@ -26,8 +26,6 @@ number of subports per port = 1
 number of pipes per subport = 4096
 queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
 
-subport 0-8 = 0; These subports are configured with subport 
profile 0
-
 [subport profile 0]
 tb rate = 125000   ; Bytes per second
 tb size = 100  ; Bytes
-- 
2.25.1



[PATCH v3] gro : fix reordering of packets in GRO library

2022-10-28 Thread Kumara Parameshwaran
From: Kumara Parameshwaran 

When a TCP packet contains flags like PSH it is returned
immediately to the application though there might be packets of
the same flow in the GRO table. If PSH flag is set on a segment
packets up to the segment should be delivered immediately. But the
current implementation delivers the last arrived packet with PSH flag
set causing re-ordering

With this patch, if a packet does not contain only ACK flag and if
there are no previous packets for the flow the packet would be returned
immediately, else will be merged with the previous segment and the
flag on the last segment will be set on the entire segment.
This is the behaviour with linux stack as well.

Signed-off-by: Kumara Parameshwaran 
---
v1:
If the received packet is not a pure ACK packet, we check if
there are any previous packets in the flow, if present we indulge
the received packet also in the coalescing logic and update the flags
of the last recived packet to the entire segment which would avoid
re-ordering.

Lets say a case where P1(PSH), P2(ACK), P3(ACK)  are received in burst 
mode,
P1 contains PSH flag and since it does not contain any prior packets in 
the flow
we copy it to unprocess_packets and P2(ACK) and P3(ACK) are merged 
together.
In the existing case the  P2,P3 would be delivered as single segment 
first and the
unprocess_packets will be copied later which will cause reordering. 
With the patch
copy the unprocess packets first and then the packets from the GRO 
table.

Testing done
The csum test-pmd was modifited to support the following
GET request of 10MB from client to server via test-pmd (static arp 
entries added in client
and server). Enable GRO and TSO in test-pmd where the packets recived 
from the client mac
would be sent to server mac and vice versa.
In above testing, without the patch the client observerd re-ordering of 
25 packets
and with the patch there were no packet re-ordering observerd.

v2: 
Fix warnings in commit and comment.
Do not consider packet as candidate to merge if it contains SYN/RST 
flag.

v3:
Fix warnings.

 lib/gro/gro_tcp4.c | 44 +---
 lib/gro/rte_gro.c  | 18 +-
 2 files changed, 46 insertions(+), 16 deletions(-)

diff --git a/lib/gro/gro_tcp4.c b/lib/gro/gro_tcp4.c
index 8f5e800250..2ce0c1391c 100644
--- a/lib/gro/gro_tcp4.c
+++ b/lib/gro/gro_tcp4.c
@@ -188,6 +188,19 @@ update_header(struct gro_tcp4_item *item)
pkt->l2_len);
 }
 
+static inline void
+update_tcp_hdr_flags(struct rte_tcp_hdr *tcp_hdr, struct rte_mbuf *pkt)
+{
+   struct rte_ether_hdr *eth_hdr;
+   struct rte_ipv4_hdr *ipv4_hdr;
+   struct rte_tcp_hdr *merged_tcp_hdr;
+
+   eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+   ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
+   merged_tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+   merged_tcp_hdr->tcp_flags |= tcp_hdr->tcp_flags;
+}
+
 int32_t
 gro_tcp4_reassemble(struct rte_mbuf *pkt,
struct gro_tcp4_tbl *tbl,
@@ -206,6 +219,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
uint32_t i, max_flow_num, remaining_flow_num;
int cmp;
uint8_t find;
+   uint32_t start_idx;
 
/*
 * Don't process the packet whose TCP header length is greater
@@ -219,12 +233,6 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
 
-   /*
-* Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
-* or CWR set.
-*/
-   if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
-   return -1;
/*
 * Don't process the packet whose payload length is less than or
 * equal to 0.
@@ -263,12 +271,30 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
find = 1;
+   start_idx = tbl->flows[i].start_index;
break;
}
remaining_flow_num--;
}
}
 
+   if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) {
+   /*
+* Check and try merging the current TCP segment with the 
previous
+* TCP segment if the TCP header does not contain RST and SYN 
flag
+* There are cases where the last segment is sent with 
FIN|PSH|ACK
+* which should also be considered for merging with previous 
segments.
+*/
+   if (find && !(tcp_hdr->tcp_flags & 
(RTE_TCP_RST_FLAG|RTE_TCP

[PATCH] net/iavf: fix Tx descriptors for IPSec

2022-10-28 Thread Zhichao Zeng
This patch fixes the building of context and data descriptor
on the scalar path for IPSec.

Fixes: f7c8c36fdeb7 ("net/iavf: enable inner and outer Tx checksum offload")

Signed-off-by: Radu Nicolau 
Signed-off-by: Zhichao Zeng 
Tested-by: Ke Xu 
---
 drivers/net/iavf/iavf_rxtx.c | 80 +++-
 1 file changed, 43 insertions(+), 37 deletions(-)

diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
index 3292541ad9..bd5dd2d4ed 100644
--- a/drivers/net/iavf/iavf_rxtx.c
+++ b/drivers/net/iavf/iavf_rxtx.c
@@ -2417,43 +2417,45 @@ iavf_fill_ctx_desc_tunnelling_field(volatile uint64_t 
*qw0,
break;
}
 
-   /* L4TUNT: L4 Tunneling Type */
-   switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
-   case RTE_MBUF_F_TX_TUNNEL_IPIP:
-   /* for non UDP / GRE tunneling, set to 00b */
-   break;
-   case RTE_MBUF_F_TX_TUNNEL_VXLAN:
-   case RTE_MBUF_F_TX_TUNNEL_GTP:
-   case RTE_MBUF_F_TX_TUNNEL_GENEVE:
-   eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
-   break;
-   case RTE_MBUF_F_TX_TUNNEL_GRE:
-   eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
-   break;
-   default:
-   PMD_TX_LOG(ERR, "Tunnel type not supported");
-   return;
-   }
+   if (!(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD)) {
+   /* L4TUNT: L4 Tunneling Type */
+   switch (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+   case RTE_MBUF_F_TX_TUNNEL_IPIP:
+   /* for non UDP / GRE tunneling, set to 00b */
+   break;
+   case RTE_MBUF_F_TX_TUNNEL_VXLAN:
+   case RTE_MBUF_F_TX_TUNNEL_GTP:
+   case RTE_MBUF_F_TX_TUNNEL_GENEVE:
+   eip_typ |= IAVF_TXD_CTX_UDP_TUNNELING;
+   break;
+   case RTE_MBUF_F_TX_TUNNEL_GRE:
+   eip_typ |= IAVF_TXD_CTX_GRE_TUNNELING;
+   break;
+   default:
+   PMD_TX_LOG(ERR, "Tunnel type not supported");
+   return;
+   }
 
-   /* L4TUNLEN: L4 Tunneling Length, in Words
-*
-* We depend on app to set rte_mbuf.l2_len correctly.
-* For IP in GRE it should be set to the length of the GRE
-* header;
-* For MAC in GRE or MAC in UDP it should be set to the length
-* of the GRE or UDP headers plus the inner MAC up to including
-* its last Ethertype.
-* If MPLS labels exists, it should include them as well.
-*/
-   eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
+   /* L4TUNLEN: L4 Tunneling Length, in Words
+*
+* We depend on app to set rte_mbuf.l2_len correctly.
+* For IP in GRE it should be set to the length of the GRE
+* header;
+* For MAC in GRE or MAC in UDP it should be set to the length
+* of the GRE or UDP headers plus the inner MAC up to including
+* its last Ethertype.
+* If MPLS labels exists, it should include them as well.
+*/
+   eip_typ |= (m->l2_len >> 1) << IAVF_TXD_CTX_QW0_NATLEN_SHIFT;
 
-   /**
-* Calculate the tunneling UDP checksum.
-* Shall be set only if L4TUNT = 01b and EIPT is not zero
-*/
-   if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
-   (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
-   eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
+   /**
+* Calculate the tunneling UDP checksum.
+* Shall be set only if L4TUNT = 01b and EIPT is not zero
+*/
+   if (!(eip_typ & IAVF_TX_CTX_EXT_IP_NONE) &&
+   (eip_typ & IAVF_TXD_CTX_UDP_TUNNELING))
+   eip_typ |= IAVF_TXD_CTX_QW0_L4T_CS_MASK;
+   }
 
*qw0 = eip_typ << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPT_SHIFT |
eip_len << IAVF_TXD_CTX_QW0_TUN_PARAMS_EIPLEN_SHIFT |
@@ -2591,7 +2593,8 @@ iavf_build_data_desc_cmd_offset_fields(volatile uint64_t 
*qw1,
}
 
/* Set MACLEN */
-   if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+   if (m->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK &&
+   !(m->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD))
offset |= (m->outer_l2_len >> 1)
<< IAVF_TX_DESC_LENGTH_MACLEN_SHIFT;
else
@@ -2844,7 +2847,10 @@ iavf_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pkts)
 
txe->mbuf = mb_seg;
 
-   if (mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
+   if ((mb_seg->ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) &&
+   (mb_seg->ol_flags &
+   (RTE_MBUF_F_TX_TCP_SEG |
+ 

[PATCH v4] gro : fix reordering of packets in GRO library

2022-10-28 Thread Kumara Parameshwaran
From: Kumara Parameshwaran 

When a TCP packet contains flags like PSH it is returned
immediately to the application though there might be packets of
the same flow in the GRO table. If PSH flag is set on a segment
packets up to the segment should be delivered immediately. But the
current implementation delivers the last arrived packet with PSH flag
set causing re-ordering

With this patch, if a packet does not contain only ACK flag and if
there are no previous packets for the flow the packet would be returned
immediately, else will be merged with the previous segment and the
flag on the last segment will be set on the entire segment.
This is the behaviour with linux stack as well.

Signed-off-by: Kumara Parameshwaran 
---
v1:
If the received packet is not a pure ACK packet, we check if
there are any previous packets in the flow, if present we indulge
the received packet also in the coalescing logic and update the flags
of the last recived packet to the entire segment which would avoid
re-ordering.

Lets say a case where P1(PSH), P2(ACK), P3(ACK)  are received in burst 
mode,
P1 contains PSH flag and since it does not contain any prior packets in 
the flow
we copy it to unprocess_packets and P2(ACK) and P3(ACK) are merged 
together.
In the existing case the  P2,P3 would be delivered as single segment 
first and the
unprocess_packets will be copied later which will cause reordering. 
With the patch
copy the unprocess packets first and then the packets from the GRO 
table.

Testing done
The csum test-pmd was modifited to support the following
GET request of 10MB from client to server via test-pmd (static arp 
entries added in client
and server). Enable GRO and TSO in test-pmd where the packets recived 
from the client mac
would be sent to server mac and vice versa.
In above testing, without the patch the client observerd re-ordering of 
25 packets
and with the patch there were no packet re-ordering observerd.

v2: 
Fix warnings in commit and comment.
Do not consider packet as candidate to merge if it contains SYN/RST 
flag.

v3:
Fix warnings.

v4:
Rebase with master.

 lib/gro/gro_tcp4.c | 45 +
 lib/gro/rte_gro.c  | 18 +-
 2 files changed, 46 insertions(+), 17 deletions(-)

diff --git a/lib/gro/gro_tcp4.c b/lib/gro/gro_tcp4.c
index 0014096e63..7363c5d540 100644
--- a/lib/gro/gro_tcp4.c
+++ b/lib/gro/gro_tcp4.c
@@ -188,6 +188,19 @@ update_header(struct gro_tcp4_item *item)
pkt->l2_len);
 }
 
+static inline void
+update_tcp_hdr_flags(struct rte_tcp_hdr *tcp_hdr, struct rte_mbuf *pkt)
+{
+   struct rte_ether_hdr *eth_hdr;
+   struct rte_ipv4_hdr *ipv4_hdr;
+   struct rte_tcp_hdr *merged_tcp_hdr;
+
+   eth_hdr = rte_pktmbuf_mtod(pkt, struct rte_ether_hdr *);
+   ipv4_hdr = (struct rte_ipv4_hdr *)((char *)eth_hdr + pkt->l2_len);
+   merged_tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
+   merged_tcp_hdr->tcp_flags |= tcp_hdr->tcp_flags;
+}
+
 int32_t
 gro_tcp4_reassemble(struct rte_mbuf *pkt,
struct gro_tcp4_tbl *tbl,
@@ -206,6 +219,7 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
uint32_t i, max_flow_num, remaining_flow_num;
int cmp;
uint8_t find;
+   uint32_t start_idx;
 
/*
 * Don't process the packet whose TCP header length is greater
@@ -219,13 +233,6 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
hdr_len = pkt->l2_len + pkt->l3_len + pkt->l4_len;
 
-   /*
-* Don't process the packet which has FIN, SYN, RST, PSH, URG, ECE
-* or CWR set.
-*/
-   if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG)
-   return -1;
-
/* trim the tail padding bytes */
ip_tlen = rte_be_to_cpu_16(ipv4_hdr->total_length);
if (pkt->pkt_len > (uint32_t)(ip_tlen + pkt->l2_len))
@@ -264,12 +271,30 @@ gro_tcp4_reassemble(struct rte_mbuf *pkt,
if (tbl->flows[i].start_index != INVALID_ARRAY_INDEX) {
if (is_same_tcp4_flow(tbl->flows[i].key, key)) {
find = 1;
+   start_idx = tbl->flows[i].start_index;
break;
}
remaining_flow_num--;
}
}
 
+   if (tcp_hdr->tcp_flags != RTE_TCP_ACK_FLAG) {
+   /*
+* Check and try merging the current TCP segment with the 
previous
+* TCP segment if the TCP header does not contain RST and SYN 
flag
+* There are cases where the last segment is sent with 
FIN|PSH|ACK
+* which should also be considered for merging with previous 
segments.
+   

[PATCH v6 1/3] sched: fix subport profile ID

2022-10-28 Thread Megha Ajmera
In rte_sched_subport_config() API, subport_profile_id is not set correctly.

Fixes: ac6fcb841b0f ("sched: update subport rate dynamically")
Cc: cristian.dumitre...@intel.com

Signed-off-by: Megha Ajmera 
Acked-by: Dumitrescu, Cristian 
---
 lib/sched/rte_sched.c | 2 --
 1 file changed, 2 deletions(-)

diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c
index c5fa9e4582..c91697131d 100644
--- a/lib/sched/rte_sched.c
+++ b/lib/sched/rte_sched.c
@@ -1257,8 +1257,6 @@ rte_sched_subport_config(struct rte_sched_port *port,
 
n_subports++;
 
-   subport_profile_id = 0;
-
/* Port */
port->subports[subport_id] = s;
 
-- 
2.25.1



[PATCH v6 2/3] sched: fix number of subport profiles

2022-10-28 Thread Megha Ajmera
Removed unused subport field from profile.cfg
Correctly using subport profile id in subport config load.

Fixes: 802d214dc880 ("examples/qos_sched: update subport rate dynamically")
Cc: sta...@dpdk.org

Signed-off-by: Megha Ajmera 
Acked-by: Cristian Dumitrescu 
---
 examples/qos_sched/cfg_file.c  | 2 +-
 examples/qos_sched/profile.cfg | 2 --
 2 files changed, 1 insertion(+), 3 deletions(-)

diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index 3d5d75fcf0..ca871d3287 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -157,7 +157,7 @@ cfg_load_subport_profile(struct rte_cfgfile *cfg,
 
profiles = rte_cfgfile_num_sections(cfg, "subport profile",
   sizeof("subport profile") - 1);
-   subport_params[0].n_pipe_profiles = profiles;
+   port_params.n_subport_profiles = profiles;
 
for (i = 0; i < profiles; i++) {
char sec_name[32];
diff --git a/examples/qos_sched/profile.cfg b/examples/qos_sched/profile.cfg
index c9ec187c93..e8de101b6c 100644
--- a/examples/qos_sched/profile.cfg
+++ b/examples/qos_sched/profile.cfg
@@ -26,8 +26,6 @@ number of subports per port = 1
 number of pipes per subport = 4096
 queue sizes = 64 64 64 64 64 64 64 64 64 64 64 64 64
 
-subport 0-8 = 0; These subports are configured with subport 
profile 0
-
 [subport profile 0]
 tb rate = 125000   ; Bytes per second
 tb size = 100  ; Bytes
-- 
2.25.1



[PATCH v6 3/3] sched: support for 100G+ rates in subport/pipe config

2022-10-28 Thread Megha Ajmera
- Config load functions updated to support 100G rates
for subport and pipes.
- Added new parse function to convert string to unsigned
long long.
- Added error checks.
- Fixed format warnings.

Signed-off-by: Megha Ajmera 
---
 examples/qos_sched/cfg_file.c | 179 +-
 examples/qos_sched/cfg_file.h |   2 +
 examples/qos_sched/init.c |  23 -
 3 files changed, 133 insertions(+), 71 deletions(-)

diff --git a/examples/qos_sched/cfg_file.c b/examples/qos_sched/cfg_file.c
index ca871d3287..c75cf9db2e 100644
--- a/examples/qos_sched/cfg_file.c
+++ b/examples/qos_sched/cfg_file.c
@@ -25,6 +25,21 @@ uint32_t n_active_queues;
 
 struct rte_sched_cman_params cman_params;
 
+int parse_u64(const char *entry, uint64_t *val)
+{
+   char *endptr;
+   if (!entry || !val)
+   return -EINVAL;
+
+   errno = 0;
+
+   *val = strtoull(entry, &endptr, 0);
+   if (errno == EINVAL || errno == ERANGE || *endptr != '\0')
+   return -EINVAL;
+
+   return 0;
+}
+
 int
 cfg_load_port(struct rte_cfgfile *cfg, struct rte_sched_port_params 
*port_params)
 {
@@ -47,7 +62,7 @@ cfg_load_port(struct rte_cfgfile *cfg, struct 
rte_sched_port_params *port_params
 int
 cfg_load_pipe(struct rte_cfgfile *cfg, struct rte_sched_pipe_params 
*pipe_params)
 {
-   int i, j;
+   int i, j, ret = 0;
char *next;
const char *entry;
int profiles;
@@ -63,68 +78,84 @@ cfg_load_pipe(struct rte_cfgfile *cfg, struct 
rte_sched_pipe_params *pipe_params
snprintf(pipe_name, sizeof(pipe_name), "pipe profile %d", j);
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tb rate");
-   if (entry)
-   pipe_params[j].tb_rate = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tb_rate);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tb size");
-   if (entry)
-   pipe_params[j].tb_size = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tb_size);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc period");
-   if (entry)
-   pipe_params[j].tc_period = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_period);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 0 rate");
-   if (entry)
-   pipe_params[j].tc_rate[0] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[0]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 1 rate");
-   if (entry)
-   pipe_params[j].tc_rate[1] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[1]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 2 rate");
-   if (entry)
-   pipe_params[j].tc_rate[2] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[2]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 3 rate");
-   if (entry)
-   pipe_params[j].tc_rate[3] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[3]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 4 rate");
-   if (entry)
-   pipe_params[j].tc_rate[4] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[4]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 5 rate");
-   if (entry)
-   pipe_params[j].tc_rate[5] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[5]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 6 rate");
-   if (entry)
-   pipe_params[j].tc_rate[6] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[6]);
+   if (ret)
+   return ret;
 
entry = rte_cfgfile_get_entry(cfg, pipe_name, "tc 7 rate");
-   if (entry)
-   pipe_params[j].tc_rate[7] = (uint64_t)atoi(entry);
+   ret = parse_u64(entry, &pipe_params[j].tc_rate[7]);
+   if (ret)
+   return ret;
 

Re: [PATCH v4 0/3] ethdev: AGE action preparation

2022-10-28 Thread Andrew Rybchenko

On 10/27/22 00:49, Michael Baum wrote:

RFC's:
https://patchwork.dpdk.org/project/dpdk/patch/7a45693f478b1b721b4e05131141b526185a175c.1654063912.git.jack...@nvidia.com/
https://patchwork.dpdk.org/project/dpdk/patch/608febf8d5d3c434a1eddb2e56f425ebbd6ff0b4.1654063912.git.jack...@nvidia.com/

v2:
- rebase.
- Add reference to "rte_flow_update_age" structure in
   "RTE_FLOW_ACTION_TYPE_AGE" definition.
- Add reference to "rte_flow_get_q_aged_flows" function in
   "RTE_FLOW_ACTION_TYPE_AGE" definition.
- Change the order of "rte_flow_update_age" structure members in
   documentation, to be aligned with the structure definition.
- Place long comment before struct member definition.

v3:
- Fix miss "break" in indirect action update switch-case.

v4:
- Remove unrelated doc fixes.


Applied to dpdk-next-net/main, thanks.



RE: [PATCH v6 3/3] sched: support for 100G+ rates in subport/pipe config

2022-10-28 Thread Dumitrescu, Cristian



> -Original Message-
> From: Ajmera, Megha 
> Sent: Friday, October 28, 2022 10:56 AM
> To: dev@dpdk.org; Singh, Jasvinder ;
> Dumitrescu, Cristian ;
> step...@networkplumber.org
> Cc: sta...@dpdk.org
> Subject: [PATCH v6 3/3] sched: support for 100G+ rates in subport/pipe
> config
> 
> - Config load functions updated to support 100G rates
> for subport and pipes.
> - Added new parse function to convert string to unsigned
> long long.
> - Added error checks.
> - Fixed format warnings.
> 
> Signed-off-by: Megha Ajmera 
> ---
>  examples/qos_sched/cfg_file.c | 179 +-
>  examples/qos_sched/cfg_file.h |   2 +
>  examples/qos_sched/init.c |  23 -
>  3 files changed, 133 insertions(+), 71 deletions(-)
> 

Acked-by: Cristian Dumitrescu 



Re: [PATCH V2] app/testpmd: update bond port configurations when add slave

2022-10-28 Thread humin (Q)



在 2022/10/28 14:20, Huisong Li 写道:

Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
device in dev_info is zero when no slave is added. And its capability will
be updated when add a new slave device.

The capability to update dynamically may introduce some problems if not
handled properly. For example, the reconfig() is called to initialize
bonding port configurations when create a bonding device. The global
tx_mode is assigned to dev_conf.txmode. The DEV_TX_OFFLOAD_MBUF_FAST_FREE
which is the default value of global tx_mode.offloads in testpmd is removed
from bonding device configuration because of zero rx_offload_capa.
As a result, this offload isn't set to bonding device.

Generally, port configurations of bonding device must be within the
intersection of the capability of all slave devices. If use original port
configurations, the removed capabilities because of adding a new slave may
cause failure when re-initialize bonding device.

So port configurations of bonding device also need to be updated because of
the added and removed capabilities. In addition, this also helps to ensure
consistency between testpmd and bonding device.

Signed-off-by: Huisong Li 
---
  - v2: fix a spelling error in commit log

---
  app/test-pmd/testpmd.c| 40 +++
  app/test-pmd/testpmd.h|  3 +-
  drivers/net/bonding/bonding_testpmd.c |  2 ++
  3 files changed, 44 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 97adafacd0..7324b8865c 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2805,6 +2805,41 @@ fill_xstats_display_info(void)
fill_xstats_display_info_for_port(pi);
  }
  
+/*

+ * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
+ * device in dev_info is zero when no slave is added. And its capability of


“And its capability of will be ”, what does this mean ?


+ * will be updated when add a new slave device. So adding a device slave need
+ * to update the port configurations of bonding device.
+ */
+static void
+update_bonding_port_dev_conf(portid_t bond_pid)
+{
+#ifdef RTE_NET_BOND
+   struct rte_port *port = &ports[bond_pid];
+   uint16_t i;
+   int ret;
+
+   ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
+   if (ret != 0) {
+   fprintf(stderr, "Failed to get dev info for port = %u\n",
+   bond_pid);
+   return;
+   }
+
+   if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+   port->dev_conf.txmode.offloads |=
+   RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+   /* Apply Tx offloads configuration */
+   for (i = 0; i < port->dev_info.max_tx_queues; i++)
+   port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
+
+   port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
+   port->dev_info.flow_type_rss_offloads;
+#else
+   RTE_SET_USED(bond_pid);
+#endif
+}
+
  int
  start_port(portid_t pid)
  {
@@ -2869,6 +2904,11 @@ start_port(portid_t pid)
return -1;
}
  
+			if (port->bond_flag == 1 && port->update_conf == 1) {

+   update_bonding_port_dev_conf(pi);
+   port->update_conf = 0;
+   }
+
/* configure port */
diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
 nb_txq + nb_hairpinq,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7fef96f9b1..82714119e8 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -316,7 +316,8 @@ struct rte_port {
queueid_t   queue_nb; /**< nb. of queues for flow rules */
uint32_tqueue_sz; /**< size of a queue for flow rules */
uint8_t slave_flag : 1, /**< bonding slave port */
-   bond_flag : 1; /**< port is bond device */
+   bond_flag : 1, /**< port is bond device */
+   update_conf : 1; /**< need to update bonding 
device configuration */
struct port_template*pattern_templ_list; /**< Pattern templates. */
struct port_template*actions_templ_list; /**< Actions templates. */
struct port_table   *table_list; /**< Flow tables. */
diff --git a/drivers/net/bonding/bonding_testpmd.c 
b/drivers/net/bonding/bonding_testpmd.c
index 3941f4cf23..9529e16fb6 100644
--- a/drivers/net/bonding/bonding_testpmd.c
+++ b/drivers/net/bonding/bonding_testpmd.c
@@ -625,6 +625,7 @@ static void cmd_add_bonding_slave_parsed(void 
*parsed_result,
slave_port_id, master_port_id);
return;
}
+   ports[master_port_id].update_conf = 1;
init_

Re: [PATCH v4 1/2] build: allow to conditionally build apps

2022-10-28 Thread David Marchand
On Fri, Oct 14, 2022 at 10:45 AM Bruce Richardson
 wrote:
>
> On Fri, Oct 14, 2022 at 09:51:17AM +0200, Markus Theil wrote:
> > Makes apps configurable from meson, like already
> > possible for drivers.
> >
> > Signed-off-by: Markus Theil 
> Acked-by: Bruce Richardson 

Series applied, thanks.
I'll send the followup patch I suggested, please (formally) review it.


-- 
David Marchand



[PATCH] build: list selected applications

2022-10-28 Thread David Marchand
With the addition of enable/disable_apps meson options, it is a bit
harder to figure out which application is built, and why.

Display the list of applications in the same way we do for drivers and
libraries.

Signed-off-by: David Marchand 
---
 app/meson.build | 37 +++--
 meson.build | 23 +--
 2 files changed, 48 insertions(+), 12 deletions(-)

diff --git a/app/meson.build b/app/meson.build
index 96b9a78d3a..c3eea8acbd 100644
--- a/app/meson.build
+++ b/app/meson.build
@@ -1,8 +1,14 @@
 # SPDX-License-Identifier: BSD-3-Clause
 # Copyright(c) 2017-2019 Intel Corporation
 
-enabled_apps = get_option('enable_apps')
-disabled_apps = get_option('disable_apps')
+disable_apps = ',' + get_option('disable_apps')
+disable_apps = run_command(list_dir_globs, disable_apps, check: 
true).stdout().split()
+
+enable_apps = ',' + get_option('enable_apps')
+enable_apps = run_command(list_dir_globs, enable_apps, check: 
true).stdout().split()
+if enable_apps.length() == 0
+enable_apps = run_command(list_dir_globs, '*', check: 
true).stdout().split()
+endif
 
 apps = [
 'dumpcap',
@@ -29,13 +35,12 @@ if get_option('default_library') == 'static' and not 
is_windows
 default_ldflags += ['-Wl,--export-dynamic']
 endif
 
+enabled_apps = [] # used to print summary at the end
+
 foreach app:apps
-build = enabled_apps == '' or enabled_apps.contains(app)
-# let disabled_apps override enabled_apps
-if disabled_apps != ''
-build = build and not disabled_apps.contains(app)
-endif
 name = app
+build = true
+reason = '' # set if build == false to explain
 sources = []
 includes = []
 cflags = default_cflags
@@ -48,11 +53,17 @@ foreach app:apps
 ext_deps = []
 deps = []
 
-if not build
-continue
+if not enable_apps.contains(app)
+build = false
+reason = 'not in enabled apps build config'
+elif disable_apps.contains(app)
+build = false
+reason = 'explicitly disabled via build config'
 endif
 
-subdir(name)
+if build
+subdir(name)
+endif
 
 if build
 dep_objs = []
@@ -60,6 +71,7 @@ foreach app:apps
 var_name = get_option('default_library') + '_rte_' + d
 if not is_variable(var_name)
 build = false
+reason = 'missing internal dependency, "@0@"'.format(d)
 message('Missing dependency "@0@" for app "@1@"'.format(d, 
name))
 break
 endif
@@ -68,9 +80,14 @@ foreach app:apps
 endif
 
 if not build
+if reason != ''
+dpdk_apps_disabled += app
+set_variable(app.underscorify() + '_disable_reason', reason)
+endif
 continue
 endif
 
+enabled_apps += app
 link_libs = []
 if get_option('default_library') == 'static'
 link_libs = dpdk_static_libraries + dpdk_drivers
diff --git a/meson.build b/meson.build
index d1cf039297..f91d652bc5 100644
--- a/meson.build
+++ b/meson.build
@@ -42,6 +42,7 @@ dpdk_driver_classes = []
 dpdk_drivers = []
 dpdk_extra_ldflags = []
 dpdk_libs_deprecated = []
+dpdk_apps_disabled = []
 dpdk_libs_disabled = []
 dpdk_drvs_disabled = []
 testpmd_drivers_sources = []
@@ -115,8 +116,21 @@ if meson.is_subproject()
 subdir('buildtools/subproject')
 endif
 
-# final output, list all the libs and drivers to be built
-# this does not affect any part of the build, for information only.
+# Final output, list all the parts to be built.
+# This does not affect any part of the build, for information only.
+output_message = '\n=\nApplications 
Enabled\n=\n'
+output_message += '\napps:\n\t'
+output_count = 0
+foreach app:enabled_apps
+output_message += app + ', '
+output_count += 1
+if output_count == 8
+output_message += '\n\t'
+output_count = 0
+endif
+endforeach
+message(output_message + '\n')
+
 output_message = '\n=\nLibraries Enabled\n=\n'
 output_message += '\nlibs:\n\t'
 output_count = 0
@@ -147,6 +161,11 @@ endforeach
 message(output_message + '\n')
 
 output_message = '\n=\nContent Skipped\n=\n'
+output_message += '\napps:\n\t'
+foreach app:dpdk_apps_disabled
+reason = get_variable(app.underscorify() + '_disable_reason')
+output_message += app + ':\t' + reason + '\n\t'
+endforeach
 output_message += '\nlibs:\n\t'
 foreach lib:dpdk_libs_disabled
 reason = get_variable(lib.underscorify() + '_disable_reason')
-- 
2.37.3



Re: [PATCH] build: list selected applications

2022-10-28 Thread Bruce Richardson
On Fri, Oct 28, 2022 at 02:34:19PM +0200, David Marchand wrote:
> With the addition of enable/disable_apps meson options, it is a bit
> harder to figure out which application is built, and why.
> 
> Display the list of applications in the same way we do for drivers and
> libraries.
> 
> Signed-off-by: David Marchand 
> ---
Acked-by: Bruce Richardson 


Re: [PATCH v6 1/3] sched: fix subport profile ID

2022-10-28 Thread David Marchand
On Fri, Oct 28, 2022 at 11:58 AM Megha Ajmera  wrote:
>
> In rte_sched_subport_config() API, subport_profile_id is not set correctly.
>
> Fixes: ac6fcb841b0f ("sched: update subport rate dynamically")
> Cc: cristian.dumitre...@intel.com

Cc: sta...@dpdk.org

>
> Signed-off-by: Megha Ajmera 
> Acked-by: Dumitrescu, Cristian 

It should be Cristian Dumitrescu.

And there is no "dumitre...@dpdk.org".
I removed it before replying.


-- 
David Marchand



Re: [PATCH v6 2/3] sched: fix number of subport profiles

2022-10-28 Thread David Marchand
On Fri, Oct 28, 2022 at 11:59 AM Megha Ajmera  wrote:
>

Wrong prefix in the title, it should be examples/qos_sched:.


> Removed unused subport field from profile.cfg
> Correctly using subport profile id in subport config load.

This reads odd... but I'll keep untouched.


>
> Fixes: 802d214dc880 ("examples/qos_sched: update subport rate dynamically")
> Cc: sta...@dpdk.org
>
> Signed-off-by: Megha Ajmera 
> Acked-by: Cristian Dumitrescu 


-- 
David Marchand



Re: [PATCH v6 3/3] sched: support for 100G+ rates in subport/pipe config

2022-10-28 Thread David Marchand
On Fri, Oct 28, 2022 at 12:57 PM Dumitrescu, Cristian
 wrote:
> > Subject: [PATCH v6 3/3] sched: support for 100G+ rates in subport/pipe
> > config

As mentionned in 2/3, the title prefix is incorrect.

> >
> > - Config load functions updated to support 100G rates
> > for subport and pipes.
> > - Added new parse function to convert string to unsigned
> > long long.
> > - Added error checks.
> > - Fixed format warnings.

I reformated this a bit as I don't understand why this patch deserves a list.


> >
> > Signed-off-by: Megha Ajmera 
> > ---
> >  examples/qos_sched/cfg_file.c | 179 +-
> >  examples/qos_sched/cfg_file.h |   2 +
> >  examples/qos_sched/init.c |  23 -
> >  3 files changed, 133 insertions(+), 71 deletions(-)
> >
>
> Acked-by: Cristian Dumitrescu 
>

Series applied with fixes.


-- 
David Marchand



Re: [PATCH] maintainers: remove obsolete contact details

2022-10-28 Thread David Marchand
On Tue, Oct 25, 2022 at 5:12 PM Mcnamara, John  wrote:
> > Bernard is no longer with Intel and is no longer involved in the DPDK
> > community, so remove him from the MAINTAINERS file.
>
> Acked-by: John McNamara 

Let's wish Bernard best of luck for the future.
Applied, thanks.


-- 
David Marchand



Re: [PATCH] flow_classify: mark library as deprecated

2022-10-28 Thread David Marchand
On Thu, Oct 27, 2022 at 4:14 PM Thomas Monjalon  wrote:
> > This library has no maintainer and, for now, nobody expressed interest
> > in taking over.
> > Mark this experimental library as deprecated and announce plan for
> > removal in v23.11.
> >
> > Signed-off-by: David Marchand 
>
> Acked-by: Thomas Monjalon 

Applied, thanks.


-- 
David Marchand



Re: [PATCH v11 02/18] net/idpf: add support for device initialization

2022-10-28 Thread Andrew Rybchenko

On 10/25/22 11:57, Andrew Rybchenko wrote:

On 10/24/22 16:12, Junfeng Guo wrote:

Support device init and add the following dev ops:
  - dev_configure
  - dev_close
  - dev_infos_get

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Xiao Wang 
Signed-off-by: Junfeng Guo 


[snip]


+struct idpf_adapter *
+idpf_find_adapter(struct rte_pci_device *pci_dev)


It looks like the function requires corresponding lock to be
held. If yes, it should be documented and code fixed. If no,
it should be explaiend why.


I still don't understand it is a new patch. It is hardly safe to
return a pointer to an element list when you drop lock.


+    /* valid only if rxq_model is split Q */
+    uint16_t num_rx_bufq;
+
+    uint16_t max_mtu;


unused


Comments? It is still in place in a new version.


+int
+idpf_vc_get_caps(struct idpf_adapter *adapter)
+{
+    struct virtchnl2_get_capabilities caps_msg;
+    struct idpf_cmd_info args;
+    int err;
+
+ memset(&caps_msg, 0, sizeof(struct virtchnl2_get_capabilities));
+ caps_msg.csum_caps =
+ VIRTCHNL2_CAP_TX_CSUM_L3_IPV4    |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP    |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP    |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP    |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP    |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP    |
+ VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP    |
+ VIRTCHNL2_CAP_TX_CSUM_GENERIC    |
+ VIRTCHNL2_CAP_RX_CSUM_L3_IPV4    |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP    |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP    |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP    |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP    |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP    |
+ VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP    |
+ VIRTCHNL2_CAP_RX_CSUM_GENERIC;
+
+ caps_msg.seg_caps =
+ VIRTCHNL2_CAP_SEG_IPV4_TCP    |
+ VIRTCHNL2_CAP_SEG_IPV4_UDP    |
+ VIRTCHNL2_CAP_SEG_IPV4_SCTP    |
+ VIRTCHNL2_CAP_SEG_IPV6_TCP    |
+ VIRTCHNL2_CAP_SEG_IPV6_UDP    |
+ VIRTCHNL2_CAP_SEG_IPV6_SCTP    |
+ VIRTCHNL2_CAP_SEG_GENERIC;
+
+ caps_msg.rss_caps =
+ VIRTCHNL2_CAP_RSS_IPV4_TCP    |
+ VIRTCHNL2_CAP_RSS_IPV4_UDP    |
+ VIRTCHNL2_CAP_RSS_IPV4_SCTP    |
+ VIRTCHNL2_CAP_RSS_IPV4_OTHER    |
+ VIRTCHNL2_CAP_RSS_IPV6_TCP    |
+ VIRTCHNL2_CAP_RSS_IPV6_UDP    |
+ VIRTCHNL2_CAP_RSS_IPV6_SCTP    |
+ VIRTCHNL2_CAP_RSS_IPV6_OTHER    |
+ VIRTCHNL2_CAP_RSS_IPV4_AH    |
+ VIRTCHNL2_CAP_RSS_IPV4_ESP    |
+ VIRTCHNL2_CAP_RSS_IPV4_AH_ESP    |
+ VIRTCHNL2_CAP_RSS_IPV6_AH    |
+ VIRTCHNL2_CAP_RSS_IPV6_ESP    |
+ VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
+
+ caps_msg.hsplit_caps =
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L2    |
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L3    |
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4    |
+ VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6;
+
+ caps_msg.rsc_caps =
+ VIRTCHNL2_CAP_RSC_IPV4_TCP    |
+ VIRTCHNL2_CAP_RSC_IPV4_SCTP    |
+ VIRTCHNL2_CAP_RSC_IPV6_TCP    |
+ VIRTCHNL2_CAP_RSC_IPV6_SCTP;
+
+ caps_msg.other_caps =
+ VIRTCHNL2_CAP_RDMA    |
+ VIRTCHNL2_CAP_SRIOV    |
+ VIRTCHNL2_CAP_MACFILTER    |
+ VIRTCHNL2_CAP_FLOW_DIRECTOR    |
+ VIRTCHNL2_CAP_SPLITQ_QSCHED    |
+ VIRTCHNL2_CAP_CRC    |
+ VIRTCHNL2_CAP_WB_ON_ITR    |
+ VIRTCHNL2_CAP_PROMISC    |
+ VIRTCHNL2_CAP_LINK_SPEED    |
+ VIRTCHNL2_CAP_VLAN;


I'm wondering why all above capabilities are mentioned in the
patch? What does the API do? Do it is request it? Negotiage?


Can I have answer on my question?



Re: [PATCH v14 02/18] net/idpf: add support for device initialization

2022-10-28 Thread Andrew Rybchenko

On 10/27/22 10:47, Junfeng Guo wrote:

Support device init and add the following dev ops:
  - dev_configure
  - dev_close
  - dev_infos_get

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Xiao Wang 
Signed-off-by: Wenjun Wu 
Signed-off-by: Junfeng Guo 


[snip]


+static int idpf_dev_configure(struct rte_eth_dev *dev);
+static int idpf_dev_close(struct rte_eth_dev *dev);
+static int idpf_dev_info_get(struct rte_eth_dev *dev,
+struct rte_eth_dev_info *dev_info);
+static void idpf_adapter_rel(struct idpf_adapter *adapter);
+
+static const struct eth_dev_ops idpf_eth_dev_ops = {
+   .dev_configure  = idpf_dev_configure,
+   .dev_close  = idpf_dev_close,
+   .dev_infos_get  = idpf_dev_info_get,
+};


Typically it is better to avoid forward static declarations and
simply define the ops structure after callbacks.


+
+static int
+idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+   struct idpf_vport *vport = dev->data->dev_private;
+   struct idpf_adapter *adapter = vport->adapter;
+
+   dev_info->max_rx_queues = adapter->caps->max_rx_q;
+   dev_info->max_tx_queues = adapter->caps->max_tx_q;
+   dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
+   dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;
+
+   dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
+   dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+
+   dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;


I guess it make sense if and only if you support API
to add/remove unicast MAC addresses.


+
+   return 0;
+


[snip]


+static int
+idpf_init_vport(struct rte_eth_dev *dev)
+{
+   struct idpf_vport *vport = dev->data->dev_private;
+   struct idpf_adapter *adapter = vport->adapter;
+   uint16_t idx = adapter->cur_vport_idx;
+   struct virtchnl2_create_vport *vport_info =
+   (struct virtchnl2_create_vport *)adapter->vport_recv_info[idx];
+   int i, type, ret;
+
+   vport->vport_id = vport_info->vport_id;
+   vport->txq_model = vport_info->txq_model;
+   vport->rxq_model = vport_info->rxq_model;
+   vport->num_tx_q = vport_info->num_tx_q;
+   vport->num_tx_complq = vport_info->num_tx_complq;
+   vport->num_rx_q = vport_info->num_rx_q;
+   vport->num_rx_bufq = vport_info->num_rx_bufq;
+   vport->max_mtu = vport_info->max_mtu;
+   rte_memcpy(vport->default_mac_addr,
+  vport_info->default_mac_addr, ETH_ALEN);
+   vport->sw_idx = idx;
+
+   for (i = 0; i < vport_info->chunks.num_chunks; i++) {
+   type = vport_info->chunks.chunks[i].type;
+   switch (type) {
+   case VIRTCHNL2_QUEUE_TYPE_TX:
+   vport->chunks_info.tx_start_qid =
+   vport_info->chunks.chunks[i].start_queue_id;
+   vport->chunks_info.tx_qtail_start =
+   vport_info->chunks.chunks[i].qtail_reg_start;
+   vport->chunks_info.tx_qtail_spacing =
+   vport_info->chunks.chunks[i].qtail_reg_spacing;
+   break;
+   case VIRTCHNL2_QUEUE_TYPE_RX:
+   vport->chunks_info.rx_start_qid =
+   vport_info->chunks.chunks[i].start_queue_id;
+   vport->chunks_info.rx_qtail_start =
+   vport_info->chunks.chunks[i].qtail_reg_start;
+   vport->chunks_info.rx_qtail_spacing =
+   vport_info->chunks.chunks[i].qtail_reg_spacing;
+   break;
+   case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+   vport->chunks_info.tx_compl_start_qid =
+   vport_info->chunks.chunks[i].start_queue_id;
+   vport->chunks_info.tx_compl_qtail_start =
+   vport_info->chunks.chunks[i].qtail_reg_start;
+   vport->chunks_info.tx_compl_qtail_spacing =
+   vport_info->chunks.chunks[i].qtail_reg_spacing;
+   break;
+   case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+   vport->chunks_info.rx_buf_start_qid =
+   vport_info->chunks.chunks[i].start_queue_id;
+   vport->chunks_info.rx_buf_qtail_start =
+   vport_info->chunks.chunks[i].qtail_reg_start;
+   vport->chunks_info.rx_buf_qtail_spacing =
+   vport_info->chunks.chunks[i].qtail_reg_spacing;
+   break;
+   default:
+   PMD_INIT_LOG(ERR, "Unsupported queue type");
+   break;
+   }
+   }
+
+   ret = idpf_parse_devarg_id(dev->data->name);
+   if (ret < 0) {
+ 

[PATCH] MAINTAINERS: remove sthemmin@microsoft

2022-10-28 Thread Stephen Hemminger
These are my last couple of days at Microsoft.
Remove the old email from MAINTAINERS.
Will no longer have free access to Azure to work on Netvsc.

Signed-off-by: Stephen Hemminger 
---
 MAINTAINERS | 2 --
 1 file changed, 2 deletions(-)

diff --git a/MAINTAINERS b/MAINTAINERS
index 2bd4a55f1b0a..bce5a825c967 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -577,7 +577,6 @@ F: drivers/bus/vdev/
 F: app/test/test_vdev.c
 
 VMBUS bus driver
-M: Stephen Hemminger 
 M: Long Li 
 F: drivers/bus/vmbus/
 
@@ -840,7 +839,6 @@ F: drivers/net/vdev_netvsc/
 F: doc/guides/nics/vdev_netvsc.rst
 
 Microsoft Hyper-V netvsc
-M: Stephen Hemminger 
 M: Long Li 
 F: drivers/net/netvsc/
 F: doc/guides/nics/netvsc.rst
-- 
2.35.1



Re: [PATCH v14 05/18] net/idpf: add support for device start and stop

2022-10-28 Thread Andrew Rybchenko

On 10/27/22 10:47, Junfeng Guo wrote:

Add dev ops dev_start, dev_stop and link_update.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 


[snip]


@@ -284,6 +305,40 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
  }
  
+static int

+idpf_dev_start(struct rte_eth_dev *dev)
+{
+   struct idpf_vport *vport = dev->data->dev_private;
+
+   if (dev->data->mtu > vport->max_mtu) {
+   PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
+   return -1;


Negative errno must be returned.


+   }
+
+   vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
+
+   /* TODO: start queues */
+
+   if (idpf_vc_ena_dis_vport(vport, true) != 0) {
+   PMD_DRV_LOG(ERR, "Failed to enable vport");
+   return -1;


same here


+   }
+
+   return 0;
+}


[snip]



Re: [PATCH v14 06/18] net/idpf: add support for queue start

2022-10-28 Thread Andrew Rybchenko

On 10/27/22 10:47, Junfeng Guo wrote:

Add support for these device ops:
  - rx_queue_start
  - tx_queue_start

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 


[snip]


+#define IDPF_RX_BUF_STRIDE 64
+int
+idpf_vc_config_rxqs(struct idpf_vport *vport)
+{
+   struct idpf_adapter *adapter = vport->adapter;
+   struct idpf_rx_queue **rxq =
+   (struct idpf_rx_queue **)vport->dev_data->rx_queues;
+   struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+   struct virtchnl2_rxq_info *rxq_info;
+   struct idpf_cmd_info args;
+   uint16_t total_qs, num_qs;
+   int size, i, j;
+   int err = 0;
+   int k = 0;
+
+   total_qs = vport->num_rx_q + vport->num_rx_bufq;
+   while (total_qs) {
+   if (total_qs > adapter->max_rxq_per_msg) {
+   num_qs = adapter->max_rxq_per_msg;
+   total_qs -= adapter->max_rxq_per_msg;
+   } else {
+   num_qs = total_qs;
+   total_qs = 0;
+   }
+
+   size = sizeof(*vc_rxqs) + (num_qs - 1) *
+   sizeof(struct virtchnl2_rxq_info);
+   vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+   if (vc_rxqs == NULL) {
+   PMD_DRV_LOG(ERR, "Failed to allocate 
virtchnl2_config_rx_queues");
+   err = -ENOMEM;
+   break;
+   }
+   vc_rxqs->vport_id = vport->vport_id;
+   vc_rxqs->num_qinfo = num_qs;
+   if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+   for (i = 0; i < num_qs; i++, k++) {
+   rxq_info = &vc_rxqs->qinfo[i];
+   rxq_info->dma_ring_addr = 
rxq[k]->rx_ring_phys_addr;
+   rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+   rxq_info->queue_id = rxq[k]->queue_id;
+   rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SINGLE;
+   rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+   rxq_info->max_pkt_size = vport->max_pkt_len;
+
+   rxq_info->desc_ids = 
VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
+   rxq_info->qflags |= 
VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+   rxq_info->ring_len = rxq[k]->nb_rx_desc;
+   }
+   } else {
+   for (i = 0; i < num_qs / 3; i++, k++) {
+   /* Rx queue */
+   rxq_info = &vc_rxqs->qinfo[i * 3];
+   rxq_info->dma_ring_addr =
+   rxq[k]->rx_ring_phys_addr;
+   rxq_info->type = VIRTCHNL2_QUEUE_TYPE_RX;
+   rxq_info->queue_id = rxq[k]->queue_id;
+   rxq_info->model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+   rxq_info->data_buffer_size = rxq[k]->rx_buf_len;
+   rxq_info->max_pkt_size = vport->max_pkt_len;
+
+   rxq_info->desc_ids = 
VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+   rxq_info->qflags |= 
VIRTCHNL2_RX_DESC_SIZE_32BYTE;
+
+   rxq_info->ring_len = rxq[k]->nb_rx_desc;
+   rxq_info->rx_bufq1_id = rxq[k]->bufq1->queue_id;
+   rxq_info->rx_bufq2_id = rxq[k]->bufq2->queue_id;
+   rxq_info->rx_buffer_low_watermark = 64;
+
+   /* Buffer queue */
+   for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
+   struct idpf_rx_queue *bufq = j == 1 ?
+   rxq[k]->bufq1 : rxq[k]->bufq2;
+   rxq_info = &vc_rxqs->qinfo[i * 3 + j];
+   rxq_info->dma_ring_addr =
+   bufq->rx_ring_phys_addr;
+   rxq_info->type =
+   VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+   rxq_info->queue_id = bufq->queue_id;
+   rxq_info->model = 
VIRTCHNL2_QUEUE_MODEL_SPLIT;
+   rxq_info->data_buffer_size = 
bufq->rx_buf_len;
+   rxq_info->desc_ids =
+   VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+   rxq_info->ring_len = bufq->nb_rx_desc;
+
+   rxq_info->buffer_notif_stride =
+   IDPF_RX_BUF_STRIDE;
+

Re: [PATCH] build: list selected applications

2022-10-28 Thread Markus Theil

On 10/28/22 14:34, David Marchand wrote:

With the addition of enable/disable_apps meson options, it is a bit
harder to figure out which application is built, and why.

Display the list of applications in the same way we do for drivers and
libraries.

Signed-off-by: David Marchand
---

|Reviewed-by: Markus Theil 
|

RE: [PATCH v11 02/18] net/idpf: add support for device initialization

2022-10-28 Thread Xing, Beilei


> -Original Message-
> From: Andrew Rybchenko 
> Sent: Friday, October 28, 2022 11:14 PM
> To: Guo, Junfeng ; Zhang, Qi Z
> ; Wu, Jingjing ; Xing, Beilei
> 
> Cc: dev@dpdk.org; Li, Xiaoyun ; Wang, Xiao W
> 
> Subject: Re: [PATCH v11 02/18] net/idpf: add support for device initialization
> 
> On 10/25/22 11:57, Andrew Rybchenko wrote:
> > On 10/24/22 16:12, Junfeng Guo wrote:
> >> Support device init and add the following dev ops:
> >>   - dev_configure
> >>   - dev_close
> >>   - dev_infos_get
> >>
> >> Signed-off-by: Beilei Xing 
> >> Signed-off-by: Xiaoyun Li 
> >> Signed-off-by: Xiao Wang 
> >> Signed-off-by: Junfeng Guo 
> 
> [snip]
> 
> >> +struct idpf_adapter *
> >> +idpf_find_adapter(struct rte_pci_device *pci_dev)
> >
> > It looks like the function requires corresponding lock to be held. If
> > yes, it should be documented and code fixed. If no, it should be
> > explaiend why.
> 
> I still don't understand it is a new patch. It is hardly safe to return a 
> pointer
> to an element list when you drop lock.

Sorry I misunderstood your last comment, I thought you meant lock for 
adapter_list.
I don't think we need a lock for adapter, one adapter here doesn't map one 
ethdev,
but one PCI device, we can create some vports for one adapter, and one vport 
maps
one ethdev.
  
> 
> >> +    /* valid only if rxq_model is split Q */
> >> +    uint16_t num_rx_bufq;
> >> +
> >> +    uint16_t max_mtu;
> >
> > unused
> 
> Comments? It is still in place in a new version.

All the above info is returned by backend when creating a vport, so save it 
after creating vport. 

> 
> >> +int
> >> +idpf_vc_get_caps(struct idpf_adapter *adapter) {
> >> +    struct virtchnl2_get_capabilities caps_msg;
> >> +    struct idpf_cmd_info args;
> >> +    int err;
> >> +
> >> + memset(&caps_msg, 0, sizeof(struct
> >> +virtchnl2_get_capabilities));
> >> + caps_msg.csum_caps =
> >> + VIRTCHNL2_CAP_TX_CSUM_L3_IPV4    |
> >> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_TCP    |
> >> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_UDP    |
> >> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV4_SCTP    |
> >> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_TCP    |
> >> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_UDP    |
> >> + VIRTCHNL2_CAP_TX_CSUM_L4_IPV6_SCTP    |
> >> + VIRTCHNL2_CAP_TX_CSUM_GENERIC    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L3_IPV4    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_TCP    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_UDP    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV4_SCTP    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_TCP    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_UDP    |
> >> + VIRTCHNL2_CAP_RX_CSUM_L4_IPV6_SCTP    |
> >> + VIRTCHNL2_CAP_RX_CSUM_GENERIC;
> >> +
> >> + caps_msg.seg_caps =
> >> + VIRTCHNL2_CAP_SEG_IPV4_TCP    |
> >> + VIRTCHNL2_CAP_SEG_IPV4_UDP    |
> >> + VIRTCHNL2_CAP_SEG_IPV4_SCTP    |
> >> + VIRTCHNL2_CAP_SEG_IPV6_TCP    |
> >> + VIRTCHNL2_CAP_SEG_IPV6_UDP    |
> >> + VIRTCHNL2_CAP_SEG_IPV6_SCTP    |
> >> + VIRTCHNL2_CAP_SEG_GENERIC;
> >> +
> >> + caps_msg.rss_caps =
> >> + VIRTCHNL2_CAP_RSS_IPV4_TCP    |
> >> + VIRTCHNL2_CAP_RSS_IPV4_UDP    |
> >> + VIRTCHNL2_CAP_RSS_IPV4_SCTP    |
> >> + VIRTCHNL2_CAP_RSS_IPV4_OTHER    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_TCP    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_UDP    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_SCTP    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_OTHER    |
> >> + VIRTCHNL2_CAP_RSS_IPV4_AH    |
> >> + VIRTCHNL2_CAP_RSS_IPV4_ESP    |
> >> + VIRTCHNL2_CAP_RSS_IPV4_AH_ESP    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_AH    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_ESP    |
> >> + VIRTCHNL2_CAP_RSS_IPV6_AH_ESP;
> >> +
> >> + caps_msg.hsplit_caps =
> >> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L2    |
> >> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L3    |
> >> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V4    |
> >> + VIRTCHNL2_CAP_RX_HSPLIT_AT_L4V6;
> >> +
> >> + caps_msg.rsc_caps =
> >> + VIRTCHNL2_CAP_RSC_IPV4_TCP    |
> >> + VIRTCHNL2_CAP_RSC_IPV4_SCTP    |
> >> + VIRTCHNL2_CAP_RSC_IPV6_TCP    |
> >> + VIRTCHNL2_CAP_RSC_IPV6_SCTP;
> >> +
> >> + caps_msg.other_caps =
> >> + VIRTCHNL2_CAP_RDMA    |
> >> + VIRTCHNL2_CAP_SRIOV    |
> >> + VIRTCHNL2_CAP_MACFILTER    |
> >> + VIRTCHNL2_CAP_FLOW_DIRECTOR    |
> >> + VIRTCHNL2_CAP_SPLITQ_QSCHED    |
> >> + VIRTCHNL2_CAP_CRC    |
> >> + VIRTCHNL2_CAP_WB_ON_ITR    |
> >> + VIRTCHNL2_CAP_PROMISC    |
> >> + VIRTCHNL2_CAP_LINK_SPEED    |
> >> + VIRTCHNL2_CAP_VLAN;
> >
> > I'm wondering why all above capabilities are mentioned in the patch?
> > What does the API 

RE: [PATCH v14 02/18] net/idpf: add support for device initialization

2022-10-28 Thread Xing, Beilei


> -Original Message-
> From: Andrew Rybchenko 
> Sent: Friday, October 28, 2022 11:35 PM
> To: Guo, Junfeng ; Zhang, Qi Z
> ; Wu, Jingjing ; Xing, Beilei
> 
> Cc: dev@dpdk.org; Li, Xiaoyun ; Wang, Xiao W
> ; Wu, Wenjun1 
> Subject: Re: [PATCH v14 02/18] net/idpf: add support for device initialization
> 
> On 10/27/22 10:47, Junfeng Guo wrote:
> > Support device init and add the following dev ops:
> >   - dev_configure
> >   - dev_close
> >   - dev_infos_get
> >
> > Signed-off-by: Beilei Xing 
> > Signed-off-by: Xiaoyun Li 
> > Signed-off-by: Xiao Wang 
> > Signed-off-by: Wenjun Wu 
> > Signed-off-by: Junfeng Guo 
> 
> [snip]
> 
> > +static int idpf_dev_configure(struct rte_eth_dev *dev); static int
> > +idpf_dev_close(struct rte_eth_dev *dev); static int
> > +idpf_dev_info_get(struct rte_eth_dev *dev,
> > +struct rte_eth_dev_info *dev_info); static void
> > +idpf_adapter_rel(struct idpf_adapter *adapter);
> > +
> > +static const struct eth_dev_ops idpf_eth_dev_ops = {
> > +   .dev_configure  = idpf_dev_configure,
> > +   .dev_close  = idpf_dev_close,
> > +   .dev_infos_get  = idpf_dev_info_get,
> > +};
> 
> Typically it is better to avoid forward static declarations and simply define
> the ops structure after callbacks.

OK, will fix it in v15.

> 
> > +
> > +static int
> > +idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info
> > +*dev_info) {
> > +   struct idpf_vport *vport = dev->data->dev_private;
> > +   struct idpf_adapter *adapter = vport->adapter;
> > +
> > +   dev_info->max_rx_queues = adapter->caps->max_rx_q;
> > +   dev_info->max_tx_queues = adapter->caps->max_tx_q;
> > +   dev_info->min_rx_bufsize = IDPF_MIN_BUF_SIZE;
> > +   dev_info->max_rx_pktlen = IDPF_MAX_FRAME_SIZE;
> > +
> > +   dev_info->max_mtu = dev_info->max_rx_pktlen -
> IDPF_ETH_OVERHEAD;
> > +   dev_info->min_mtu = RTE_ETHER_MIN_MTU;
> > +
> > +   dev_info->max_mac_addrs = IDPF_NUM_MACADDR_MAX;
> 
> I guess it make sense if and only if you support API to add/remove unicast
> MAC addresses.

Yes, will remove this info.
> 
> > +
> > +   return 0;
> > +
> 
> [snip]
> 
> > +static int
> > +idpf_init_vport(struct rte_eth_dev *dev) {
> > +   struct idpf_vport *vport = dev->data->dev_private;
> > +   struct idpf_adapter *adapter = vport->adapter;
> > +   uint16_t idx = adapter->cur_vport_idx;
> > +   struct virtchnl2_create_vport *vport_info =
> > +   (struct virtchnl2_create_vport *)adapter-
> >vport_recv_info[idx];
> > +   int i, type, ret;
> > +
> > +   vport->vport_id = vport_info->vport_id;
> > +   vport->txq_model = vport_info->txq_model;
> > +   vport->rxq_model = vport_info->rxq_model;
> > +   vport->num_tx_q = vport_info->num_tx_q;
> > +   vport->num_tx_complq = vport_info->num_tx_complq;
> > +   vport->num_rx_q = vport_info->num_rx_q;
> > +   vport->num_rx_bufq = vport_info->num_rx_bufq;
> > +   vport->max_mtu = vport_info->max_mtu;
> > +   rte_memcpy(vport->default_mac_addr,
> > +  vport_info->default_mac_addr, ETH_ALEN);
> > +   vport->sw_idx = idx;
> > +
> > +   for (i = 0; i < vport_info->chunks.num_chunks; i++) {
> > +   type = vport_info->chunks.chunks[i].type;
> > +   switch (type) {
> > +   case VIRTCHNL2_QUEUE_TYPE_TX:
> > +   vport->chunks_info.tx_start_qid =
> > +   vport_info->chunks.chunks[i].start_queue_id;
> > +   vport->chunks_info.tx_qtail_start =
> > +   vport_info->chunks.chunks[i].qtail_reg_start;
> > +   vport->chunks_info.tx_qtail_spacing =
> > +   vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> > +   break;
> > +   case VIRTCHNL2_QUEUE_TYPE_RX:
> > +   vport->chunks_info.rx_start_qid =
> > +   vport_info->chunks.chunks[i].start_queue_id;
> > +   vport->chunks_info.rx_qtail_start =
> > +   vport_info->chunks.chunks[i].qtail_reg_start;
> > +   vport->chunks_info.rx_qtail_spacing =
> > +   vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> > +   break;
> > +   case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
> > +   vport->chunks_info.tx_compl_start_qid =
> > +   vport_info->chunks.chunks[i].start_queue_id;
> > +   vport->chunks_info.tx_compl_qtail_start =
> > +   vport_info->chunks.chunks[i].qtail_reg_start;
> > +   vport->chunks_info.tx_compl_qtail_spacing =
> > +   vport_info-
> >chunks.chunks[i].qtail_reg_spacing;
> > +   break;
> > +   case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
> > +   vport->chunks_info.rx_buf_start_qid =
> > +   vport_info->chunks.chunks[i].start_queue_id;
> > +   vport-

RE: [PATCH v14 06/18] net/idpf: add support for queue start

2022-10-28 Thread Xing, Beilei


> -Original Message-
> From: Andrew Rybchenko 
> Sent: Friday, October 28, 2022 11:51 PM
> To: Guo, Junfeng ; Zhang, Qi Z
> ; Wu, Jingjing ; Xing, Beilei
> 
> Cc: dev@dpdk.org; Li, Xiaoyun 
> Subject: Re: [PATCH v14 06/18] net/idpf: add support for queue start
> 
> On 10/27/22 10:47, Junfeng Guo wrote:
> > Add support for these device ops:
> >   - rx_queue_start
> >   - tx_queue_start
> >
> > Signed-off-by: Beilei Xing 
> > Signed-off-by: Xiaoyun Li 
> > Signed-off-by: Junfeng Guo 
> 
> [snip]
> 
> > +#define IDPF_RX_BUF_STRIDE 64
> > +int
> > +idpf_vc_config_rxqs(struct idpf_vport *vport) {
> > +   struct idpf_adapter *adapter = vport->adapter;
> > +   struct idpf_rx_queue **rxq =
> > +   (struct idpf_rx_queue **)vport->dev_data->rx_queues;
> > +   struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
> > +   struct virtchnl2_rxq_info *rxq_info;
> > +   struct idpf_cmd_info args;
> > +   uint16_t total_qs, num_qs;
> > +   int size, i, j;
> > +   int err = 0;
> > +   int k = 0;
> > +
> > +   total_qs = vport->num_rx_q + vport->num_rx_bufq;
> > +   while (total_qs) {
> > +   if (total_qs > adapter->max_rxq_per_msg) {
> > +   num_qs = adapter->max_rxq_per_msg;
> > +   total_qs -= adapter->max_rxq_per_msg;
> > +   } else {
> > +   num_qs = total_qs;
> > +   total_qs = 0;
> > +   }
> > +
> > +   size = sizeof(*vc_rxqs) + (num_qs - 1) *
> > +   sizeof(struct virtchnl2_rxq_info);
> > +   vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
> > +   if (vc_rxqs == NULL) {
> > +   PMD_DRV_LOG(ERR, "Failed to allocate
> virtchnl2_config_rx_queues");
> > +   err = -ENOMEM;
> > +   break;
> > +   }
> > +   vc_rxqs->vport_id = vport->vport_id;
> > +   vc_rxqs->num_qinfo = num_qs;
> > +   if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
> {
> > +   for (i = 0; i < num_qs; i++, k++) {
> > +   rxq_info = &vc_rxqs->qinfo[i];
> > +   rxq_info->dma_ring_addr = rxq[k]-
> >rx_ring_phys_addr;
> > +   rxq_info->type =
> VIRTCHNL2_QUEUE_TYPE_RX;
> > +   rxq_info->queue_id = rxq[k]->queue_id;
> > +   rxq_info->model =
> VIRTCHNL2_QUEUE_MODEL_SINGLE;
> > +   rxq_info->data_buffer_size = rxq[k]-
> >rx_buf_len;
> > +   rxq_info->max_pkt_size = vport-
> >max_pkt_len;
> > +
> > +   rxq_info->desc_ids =
> VIRTCHNL2_RXDID_2_FLEX_SQ_NIC_M;
> > +   rxq_info->qflags |=
> VIRTCHNL2_RX_DESC_SIZE_32BYTE;
> > +
> > +   rxq_info->ring_len = rxq[k]->nb_rx_desc;
> > +   }
> > +   } else {
> > +   for (i = 0; i < num_qs / 3; i++, k++) {
> > +   /* Rx queue */
> > +   rxq_info = &vc_rxqs->qinfo[i * 3];
> > +   rxq_info->dma_ring_addr =
> > +   rxq[k]->rx_ring_phys_addr;
> > +   rxq_info->type =
> VIRTCHNL2_QUEUE_TYPE_RX;
> > +   rxq_info->queue_id = rxq[k]->queue_id;
> > +   rxq_info->model =
> VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > +   rxq_info->data_buffer_size = rxq[k]-
> >rx_buf_len;
> > +   rxq_info->max_pkt_size = vport-
> >max_pkt_len;
> > +
> > +   rxq_info->desc_ids =
> VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
> > +   rxq_info->qflags |=
> VIRTCHNL2_RX_DESC_SIZE_32BYTE;
> > +
> > +   rxq_info->ring_len = rxq[k]->nb_rx_desc;
> > +   rxq_info->rx_bufq1_id = rxq[k]->bufq1-
> >queue_id;
> > +   rxq_info->rx_bufq2_id = rxq[k]->bufq2-
> >queue_id;
> > +   rxq_info->rx_buffer_low_watermark = 64;
> > +
> > +   /* Buffer queue */
> > +   for (j = 1; j <= IDPF_RX_BUFQ_PER_GRP; j++) {
> > +   struct idpf_rx_queue *bufq = j == 1 ?
> > +   rxq[k]->bufq1 : rxq[k]->bufq2;
> > +   rxq_info = &vc_rxqs->qinfo[i * 3 + j];
> > +   rxq_info->dma_ring_addr =
> > +   bufq->rx_ring_phys_addr;
> > +   rxq_info->type =
> > +
>   VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
> > +   rxq_info->queue_id = bufq-
> >queue_id;
> > +   rxq_info->model =
> VIRTCHNL2_QUEUE_MODEL_SPLIT;
> > +   rxq_info->data_buffer_size = bufq-
> >rx_buf_len;
> > +

Re: [PATCH v5] net/iavf: add thread for event callbacks

2022-10-28 Thread Thomas Monjalon
20/10/2022 07:00, Yiding Zhou:
> This commit add a new thread to call all event callbacks.

You may be interested to look at the new API rte_thread_*
instead of calling pthread directly.





[PATCH] Add support for IBM Z s390x

2022-10-28 Thread David Miller
Signed-off-by: David Miller 
Reviewed-by: Mathew S Thoennes 
---
 app/test-acl/main.c  |   4 +
 app/test-pmd/config.c|  12 +-
 app/test/test_acl.c  |   1 +
 app/test/test_atomic.c   |   7 +-
 app/test/test_cmdline.c  |   6 +-
 app/test/test_cmdline_ipaddr.c   |  11 +
 app/test/test_cmdline_num.c  | 110 
 app/test/test_hash_functions.c   |  29 +
 app/test/test_xmmt_ops.h |  14 +
 buildtools/pmdinfogen.py |  11 +-
 config/meson.build   |   2 +
 config/s390x/meson.build |  51 ++
 config/s390x/s390x_linux_clang_ubuntu|  19 +
 doc/guides/nics/features/i40e.ini|   1 +
 drivers/common/mlx5/mlx5_common.h|   9 +
 drivers/net/i40e/i40e_rxtx_vec_s390x.c   | 630 +++
 drivers/net/i40e/meson.build |   2 +
 drivers/net/ixgbe/ixgbe_rxtx.c   |   2 +-
 drivers/net/memif/rte_eth_memif.h|   2 +
 drivers/net/mlx5/mlx5_rx.c   |  22 +-
 drivers/net/octeontx/base/octeontx_pki_var.h |   6 +
 examples/l3fwd-acl/main.c|   4 +
 examples/l3fwd/l3fwd_em.c|   8 +
 examples/l3fwd/l3fwd_lpm_s390x.h | 137 
 examples/l3fwd/l3fwd_s390x.h | 259 
 lib/acl/acl_bld.c|   3 +
 lib/acl/acl_gen.c|   9 +
 lib/acl/acl_run_scalar.c |   8 +
 lib/acl/rte_acl.c|  27 +
 lib/acl/rte_acl.h|   5 +-
 lib/eal/s390x/include/meson.build|  16 +
 lib/eal/s390x/include/rte_atomic.h   |  47 ++
 lib/eal/s390x/include/rte_byteorder.h|  43 ++
 lib/eal/s390x/include/rte_cpuflags.h |  42 ++
 lib/eal/s390x/include/rte_cycles.h   |  44 ++
 lib/eal/s390x/include/rte_io.h   | 184 ++
 lib/eal/s390x/include/rte_mcslock.h  |  18 +
 lib/eal/s390x/include/rte_memcpy.h   |  55 ++
 lib/eal/s390x/include/rte_pause.h|  22 +
 lib/eal/s390x/include/rte_power_intrinsics.h |  20 +
 lib/eal/s390x/include/rte_prefetch.h |  46 ++
 lib/eal/s390x/include/rte_rwlock.h   |  42 ++
 lib/eal/s390x/include/rte_spinlock.h |  85 +++
 lib/eal/s390x/include/rte_ticketlock.h   |  18 +
 lib/eal/s390x/include/rte_vect.h |  35 ++
 lib/eal/s390x/meson.build|  16 +
 lib/eal/s390x/rte_cpuflags.c |  91 +++
 lib/eal/s390x/rte_cycles.c   |  11 +
 lib/eal/s390x/rte_hypervisor.c   |  11 +
 lib/eal/s390x/rte_power_intrinsics.c |  51 ++
 lib/hash/rte_fbk_hash.h  |   7 +
 lib/lpm/meson.build  |   1 +
 lib/lpm/rte_lpm.h|   2 +
 lib/lpm/rte_lpm6.c   |  18 +
 lib/lpm/rte_lpm_s390x.h  | 130 
 meson.build  |   2 +
 56 files changed, 2450 insertions(+), 18 deletions(-)
 create mode 100644 config/s390x/meson.build
 create mode 100644 config/s390x/s390x_linux_clang_ubuntu
 create mode 100644 drivers/net/i40e/i40e_rxtx_vec_s390x.c
 create mode 100644 examples/l3fwd/l3fwd_lpm_s390x.h
 create mode 100644 examples/l3fwd/l3fwd_s390x.h
 create mode 100644 lib/eal/s390x/include/meson.build
 create mode 100644 lib/eal/s390x/include/rte_atomic.h
 create mode 100644 lib/eal/s390x/include/rte_byteorder.h
 create mode 100644 lib/eal/s390x/include/rte_cpuflags.h
 create mode 100644 lib/eal/s390x/include/rte_cycles.h
 create mode 100644 lib/eal/s390x/include/rte_io.h
 create mode 100644 lib/eal/s390x/include/rte_mcslock.h
 create mode 100644 lib/eal/s390x/include/rte_memcpy.h
 create mode 100644 lib/eal/s390x/include/rte_pause.h
 create mode 100644 lib/eal/s390x/include/rte_power_intrinsics.h
 create mode 100644 lib/eal/s390x/include/rte_prefetch.h
 create mode 100644 lib/eal/s390x/include/rte_rwlock.h
 create mode 100644 lib/eal/s390x/include/rte_spinlock.h
 create mode 100644 lib/eal/s390x/include/rte_ticketlock.h
 create mode 100644 lib/eal/s390x/include/rte_vect.h
 create mode 100644 lib/eal/s390x/meson.build
 create mode 100644 lib/eal/s390x/rte_cpuflags.c
 create mode 100644 lib/eal/s390x/rte_cycles.c
 create mode 100644 lib/eal/s390x/rte_hypervisor.c
 create mode 100644 lib/eal/s390x/rte_power_intrinsics.c
 create mode 100644 lib/lpm/rte_lpm_s390x.h

diff --git a/app/test-acl/main.c b/app/test-acl/main.c
index 06e3847ab9..1f567c5359 100644
--- a/app/test-acl/main.c
+++ b/app/test-acl/main.c
@@ -83,6 +83,10 @@ static const struct acl_alg acl_alg[] = {
.name = "altivec",
.alg = RTE_ACL_CLASSIFY_ALTIVEC,
},
+   {
+   .name = "s390x",
+   .alg = RTE_ACL_CLASSIFY_S390X,
+ 

Re: [PATCH] Add support for IBM Z s390x

2022-10-28 Thread Stephen Hemminger
On Fri, 28 Oct 2022 17:52:40 -0400
David Miller  wrote:

> diff --git a/app/test/test_cmdline.c b/app/test/test_cmdline.c
> index 115bee966d..e0720ff345 100644
> --- a/app/test/test_cmdline.c
> +++ b/app/test/test_cmdline.c
> @@ -10,21 +10,21 @@
>  static int
>  test_cmdline(void)
>  {
> - printf("Testind parsing ethernet addresses...\n");
> + printf("Testing parsing ethernet addresses...\n");
>   if (test_parse_etheraddr_valid() < 0)
>   return -1;
>   if (test_parse_etheraddr_invalid_data() < 0)
>   return -1;
>   if (test_parse_etheraddr_invalid_param() < 0)
>   return -1;
> - printf("Testind parsing port lists...\n");
> + printf("Testing parsing port lists...\n");
>   if (test_parse_portlist_valid() < 0)
>   return -1;
>   if (test_parse_portlist_invalid_data() < 0)
>   return -1;
>   if (test_parse_portlist_invalid_param() < 0)
>   return -1;
> - printf("Testind parsing numbers...\n");
> + printf("Testing parsing numbers...\n");
>   if (test_parse_num_valid() < 0)
>   return -1;
>   if (test_parse_num_invalid_data() < 0)

This spelling fix should be its own patch, not related to s390x


Re: [PATCH] Add support for IBM Z s390x

2022-10-28 Thread Stephen Hemminger
On Fri, 28 Oct 2022 17:52:40 -0400
David Miller  wrote:

> +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
> +switch (type) {
> +case RTE_UINT8:
> +{
> +uint8_t *temp = (uint8_t *)&result;
> +result = *temp;
> +break;
> +}
> +case RTE_UINT16:
> +{
> +uint16_t *temp = (uint16_t *)&result;
> +result = *temp;
> +break;
> +}
> +case RTE_UINT32:
> +{
> +uint32_t *temp = (uint32_t *)&result;
> +result = *temp;
> +break;
> +}
> +case RTE_INT8:
> +{
> +int8_t *temp = (int8_t *)&result;
> +result = *temp;
> +break;
> +}
> +case RTE_INT16:
> +{
> +int16_t *temp = (int16_t *)&result;
> +result = *temp;
> +break;
> +}
> +case RTE_INT32:
> +{
> +int32_t *temp = (int32_t *)&result;
> +result = *temp;
> +break;
> +}
> +default:
> +break;
> +}
> +#endif

You are indenting with 4 spaces.
The DPDK uses tab indentation like the Linux kernel.
Surprised that checkpatch isn't catching this.


Re: [PATCH] Add support for IBM Z s390x

2022-10-28 Thread David Miller
Will move to a new patch,  and likely I opened in IDE again and it's
set up for the previous OSS project.
I got an email full of complaints from automated checkpatch,  will
resubmit again early next week.

Best Regards
  - David Miller

On Fri, Oct 28, 2022 at 6:45 PM Stephen Hemminger
 wrote:
>
> On Fri, 28 Oct 2022 17:52:40 -0400
> David Miller  wrote:
>
> > diff --git a/app/test/test_cmdline.c b/app/test/test_cmdline.c
> > index 115bee966d..e0720ff345 100644
> > --- a/app/test/test_cmdline.c
> > +++ b/app/test/test_cmdline.c
> > @@ -10,21 +10,21 @@
> >  static int
> >  test_cmdline(void)
> >  {
> > - printf("Testind parsing ethernet addresses...\n");
> > + printf("Testing parsing ethernet addresses...\n");
> >   if (test_parse_etheraddr_valid() < 0)
> >   return -1;
> >   if (test_parse_etheraddr_invalid_data() < 0)
> >   return -1;
> >   if (test_parse_etheraddr_invalid_param() < 0)
> >   return -1;
> > - printf("Testind parsing port lists...\n");
> > + printf("Testing parsing port lists...\n");
> >   if (test_parse_portlist_valid() < 0)
> >   return -1;
> >   if (test_parse_portlist_invalid_data() < 0)
> >   return -1;
> >   if (test_parse_portlist_invalid_param() < 0)
> >   return -1;
> > - printf("Testind parsing numbers...\n");
> > + printf("Testing parsing numbers...\n");
> >   if (test_parse_num_valid() < 0)
> >   return -1;
> >   if (test_parse_num_invalid_data() < 0)
>
> This spelling fix should be its own patch, not related to s390x


Re: [PATCH V2] app/testpmd: update bond port configurations when add slave

2022-10-28 Thread lihuisong (C)



在 2022/10/28 18:59, humin (Q) 写道:


在 2022/10/28 14:20, Huisong Li 写道:

Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
device in dev_info is zero when no slave is added. And its capability 
will

be updated when add a new slave device.

The capability to update dynamically may introduce some problems if not
handled properly. For example, the reconfig() is called to initialize
bonding port configurations when create a bonding device. The global
tx_mode is assigned to dev_conf.txmode. The 
DEV_TX_OFFLOAD_MBUF_FAST_FREE
which is the default value of global tx_mode.offloads in testpmd is 
removed

from bonding device configuration because of zero rx_offload_capa.
As a result, this offload isn't set to bonding device.

Generally, port configurations of bonding device must be within the
intersection of the capability of all slave devices. If use original 
port
configurations, the removed capabilities because of adding a new 
slave may

cause failure when re-initialize bonding device.

So port configurations of bonding device also need to be updated 
because of
the added and removed capabilities. In addition, this also helps to 
ensure

consistency between testpmd and bonding device.

Signed-off-by: Huisong Li 
---
  - v2: fix a spelling error in commit log

---
  app/test-pmd/testpmd.c    | 40 +++
  app/test-pmd/testpmd.h    |  3 +-
  drivers/net/bonding/bonding_testpmd.c |  2 ++
  3 files changed, 44 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 97adafacd0..7324b8865c 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2805,6 +2805,41 @@ fill_xstats_display_info(void)
  fill_xstats_display_info_for_port(pi);
  }
  +/*
+ * Some capabilities (like, rx_offload_capa and tx_offload_capa) of 
bonding
+ * device in dev_info is zero when no slave is added. And its 
capability of


“And its capability of will be ”, what does this mean ?

will fix in next version.


+ * will be updated when add a new slave device. So adding a device 
slave need

+ * to update the port configurations of bonding device.
+ */
+static void
+update_bonding_port_dev_conf(portid_t bond_pid)
+{
+#ifdef RTE_NET_BOND
+    struct rte_port *port = &ports[bond_pid];
+    uint16_t i;
+    int ret;
+
+    ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
+    if (ret != 0) {
+    fprintf(stderr, "Failed to get dev info for port = %u\n",
+    bond_pid);
+    return;
+    }
+
+    if (port->dev_info.tx_offload_capa & 
RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)

+    port->dev_conf.txmode.offloads |=
+    RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+    /* Apply Tx offloads configuration */
+    for (i = 0; i < port->dev_info.max_tx_queues; i++)
+    port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
+
+    port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
+    port->dev_info.flow_type_rss_offloads;
+#else
+    RTE_SET_USED(bond_pid);
+#endif
+}
+
  int
  start_port(portid_t pid)
  {
@@ -2869,6 +2904,11 @@ start_port(portid_t pid)
  return -1;
  }
  +    if (port->bond_flag == 1 && port->update_conf == 1) {
+    update_bonding_port_dev_conf(pi);
+    port->update_conf = 0;
+    }
+
  /* configure port */
  diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
   nb_txq + nb_hairpinq,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7fef96f9b1..82714119e8 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -316,7 +316,8 @@ struct rte_port {
  queueid_t   queue_nb; /**< nb. of queues for flow 
rules */
  uint32_t    queue_sz; /**< size of a queue for flow 
rules */

  uint8_t slave_flag : 1, /**< bonding slave port */
-    bond_flag : 1; /**< port is bond device */
+    bond_flag : 1, /**< port is bond device */
+    update_conf : 1; /**< need to update bonding device 
configuration */
  struct port_template    *pattern_templ_list; /**< Pattern 
templates. */
  struct port_template    *actions_templ_list; /**< Actions 
templates. */

  struct port_table   *table_list; /**< Flow tables. */
diff --git a/drivers/net/bonding/bonding_testpmd.c 
b/drivers/net/bonding/bonding_testpmd.c

index 3941f4cf23..9529e16fb6 100644
--- a/drivers/net/bonding/bonding_testpmd.c
+++ b/drivers/net/bonding/bonding_testpmd.c
@@ -625,6 +625,7 @@ static void cmd_add_bonding_slave_parsed(void 
*parsed_result,

  slave_port_id, master_port_id);
  return;
  }
+    ports[master_port_id].update_conf = 1;
  init_port_config();
  set_port_slave_flag(slave_port_id);
  }
@@ -762,6 +763,7 @@ static void cmd_create_bonded_device_parsed(void 
*parsed_result,
  fprintf(stderr, "Failed to enable promiscuous

[PATCH V3] app/testpmd: update bond port configurations when add slave

2022-10-28 Thread Huisong Li
Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
device in dev_info is zero when no slave is added. And its capability will
be updated when add a new slave device.

The capability to update dynamically may introduce some problems if not
handled properly. For example, the reconfig() is called to initialize
bonding port configurations when create a bonding device. The global
tx_mode is assigned to dev_conf.txmode. The DEV_TX_OFFLOAD_MBUF_FAST_FREE
which is the default value of global tx_mode.offloads in testpmd is removed
from bonding device configuration because of zero rx_offload_capa.
As a result, this offload isn't set to bonding device.

Generally, port configurations of bonding device must be within the
intersection of the capability of all slave devices. If use original port
configurations, the removed capabilities because of adding a new slave may
cause failure when re-initialize bonding device.

So port configurations of bonding device also need to be updated because of
the added and removed capabilities. In addition, this also helps to ensure
consistency between testpmd and bonding device.

Signed-off-by: Huisong Li 
---
 - v3: fix code comment
 - v2: fix a spelling error in commit log
---
 app/test-pmd/testpmd.c| 40 +++
 app/test-pmd/testpmd.h|  3 +-
 drivers/net/bonding/bonding_testpmd.c |  2 ++
 3 files changed, 44 insertions(+), 1 deletion(-)

diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 97adafacd0..7c9de07367 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -2805,6 +2805,41 @@ fill_xstats_display_info(void)
fill_xstats_display_info_for_port(pi);
 }
 
+/*
+ * Some capabilities (like, rx_offload_capa and tx_offload_capa) of bonding
+ * device in dev_info is zero when no slave is added. And its capability
+ * will be updated when add a new slave device. So adding a device slave need
+ * to update the port configurations of bonding device.
+ */
+static void
+update_bonding_port_dev_conf(portid_t bond_pid)
+{
+#ifdef RTE_NET_BOND
+   struct rte_port *port = &ports[bond_pid];
+   uint16_t i;
+   int ret;
+
+   ret = eth_dev_info_get_print_err(bond_pid, &port->dev_info);
+   if (ret != 0) {
+   fprintf(stderr, "Failed to get dev info for port = %u\n",
+   bond_pid);
+   return;
+   }
+
+   if (port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+   port->dev_conf.txmode.offloads |=
+   RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
+   /* Apply Tx offloads configuration */
+   for (i = 0; i < port->dev_info.max_tx_queues; i++)
+   port->txq[i].conf.offloads = port->dev_conf.txmode.offloads;
+
+   port->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
+   port->dev_info.flow_type_rss_offloads;
+#else
+   RTE_SET_USED(bond_pid);
+#endif
+}
+
 int
 start_port(portid_t pid)
 {
@@ -2869,6 +2904,11 @@ start_port(portid_t pid)
return -1;
}
 
+   if (port->bond_flag == 1 && port->update_conf == 1) {
+   update_bonding_port_dev_conf(pi);
+   port->update_conf = 0;
+   }
+
/* configure port */
diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
 nb_txq + nb_hairpinq,
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 7fef96f9b1..82714119e8 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -316,7 +316,8 @@ struct rte_port {
queueid_t   queue_nb; /**< nb. of queues for flow rules */
uint32_tqueue_sz; /**< size of a queue for flow rules */
uint8_t slave_flag : 1, /**< bonding slave port */
-   bond_flag : 1; /**< port is bond device */
+   bond_flag : 1, /**< port is bond device */
+   update_conf : 1; /**< need to update bonding 
device configuration */
struct port_template*pattern_templ_list; /**< Pattern templates. */
struct port_template*actions_templ_list; /**< Actions templates. */
struct port_table   *table_list; /**< Flow tables. */
diff --git a/drivers/net/bonding/bonding_testpmd.c 
b/drivers/net/bonding/bonding_testpmd.c
index 3941f4cf23..9529e16fb6 100644
--- a/drivers/net/bonding/bonding_testpmd.c
+++ b/drivers/net/bonding/bonding_testpmd.c
@@ -625,6 +625,7 @@ static void cmd_add_bonding_slave_parsed(void 
*parsed_result,
slave_port_id, master_port_id);
return;
}
+   ports[master_port_id].update_conf = 1;
init_port_config();
set_port_slave_flag(slave_port_id);
 }
@@ -762,6 

[PATCH v15 00/18] add support for idpf PMD in DPDK

2022-10-28 Thread beilei . xing
From: Beilei Xing 

This patchset introduced the idpf (Infrastructure Data Path Function) PMD in 
DPDK for Intel® IPU E2000 (Device ID: 0x1452).
The Intel® IPU E2000 targets to deliver high performance under real workloads 
with security and isolation.
Please refer to
https://www.intel.com/content/www/us/en/products/network-io/infrastructure-processing-units/asic/e2000-asic.html
for more information.

Linux upstream is still ongoing, previous work refers to 
https://patchwork.ozlabs.org/project/intel-wired-lan/patch/20220128001009.721392-20-alan.br...@intel.com/.

v2-v4:
fixed some coding style issues and did some refactors.

v5:
fixed typo.

v6-v9:
fixed build errors and coding style issues.

v11:
 - move shared code to common/idpf/base
 - Create one vport if there's no vport devargs
 - Refactor if conditions according to coding style
 - Refactor virtual channel return values
 - Refine dev_stop function
 - Refine RSS lut/key
 - Fix build error

v12:
 - Refine dev_configure
 - Fix coding style according to the comments
 - Re-order patch
 - Romove dev_supported_ptypes_get

v13:
 - refine dev_start/stop and queue_start/stop
 - fix timestamp offload

v14:
 - fix wrong position for rte_validate_tx_offload

v15:
 - refine the return value for ethdev ops.
 - removce forward static declarations.
 - refine get caps.
 - fix lock/unlock handling.

Junfeng Guo (18):
  common/idpf: introduce common library
  net/idpf: add support for device initialization
  net/idpf: add Tx queue setup
  net/idpf: add Rx queue setup
  net/idpf: add support for device start and stop
  net/idpf: add support for queue start
  net/idpf: add support for queue stop
  net/idpf: add queue release
  net/idpf: add support for MTU configuration
  net/idpf: add support for basic Rx datapath
  net/idpf: add support for basic Tx datapath
  net/idpf: support parsing packet type
  net/idpf: add support for write back based on ITR expire
  net/idpf: add support for RSS
  net/idpf: add support for Rx offloading
  net/idpf: add support for Tx offloading
  net/idpf: add AVX512 data path for single queue model
  net/idpf: add support for timestamp offload

 MAINTAINERS   |9 +
 doc/guides/nics/features/idpf.ini |   17 +
 doc/guides/nics/idpf.rst  |   85 +
 doc/guides/nics/index.rst |1 +
 doc/guides/rel_notes/release_22_11.rst|6 +
 drivers/common/idpf/base/idpf_alloc.h |   22 +
 drivers/common/idpf/base/idpf_common.c|  364 +++
 drivers/common/idpf/base/idpf_controlq.c  |  691 
 drivers/common/idpf/base/idpf_controlq.h  |  224 ++
 drivers/common/idpf/base/idpf_controlq_api.h  |  234 ++
 .../common/idpf/base/idpf_controlq_setup.c|  179 +
 drivers/common/idpf/base/idpf_devids.h|   18 +
 drivers/common/idpf/base/idpf_lan_pf_regs.h   |  134 +
 drivers/common/idpf/base/idpf_lan_txrx.h  |  428 +++
 drivers/common/idpf/base/idpf_lan_vf_regs.h   |  114 +
 drivers/common/idpf/base/idpf_osdep.h |  364 +++
 drivers/common/idpf/base/idpf_prototype.h |   45 +
 drivers/common/idpf/base/idpf_type.h  |  106 +
 drivers/common/idpf/base/meson.build  |   14 +
 drivers/common/idpf/base/siov_regs.h  |   47 +
 drivers/common/idpf/base/virtchnl.h   | 2866 +
 drivers/common/idpf/base/virtchnl2.h  | 1462 +
 drivers/common/idpf/base/virtchnl2_lan_desc.h |  606 
 .../common/idpf/base/virtchnl_inline_ipsec.h  |  567 
 drivers/common/idpf/meson.build   |4 +
 drivers/common/idpf/version.map   |   12 +
 drivers/common/meson.build|1 +
 drivers/net/idpf/idpf_ethdev.c| 1293 
 drivers/net/idpf/idpf_ethdev.h|  252 ++
 drivers/net/idpf/idpf_logs.h  |   56 +
 drivers/net/idpf/idpf_rxtx.c  | 2308 +
 drivers/net/idpf/idpf_rxtx.h  |  291 ++
 drivers/net/idpf/idpf_rxtx_vec_avx512.c   |  871 +
 drivers/net/idpf/idpf_rxtx_vec_common.h   |  100 +
 drivers/net/idpf/idpf_vchnl.c | 1416 
 drivers/net/idpf/meson.build  |   44 +
 drivers/net/idpf/version.map  |3 +
 drivers/net/meson.build   |1 +
 38 files changed, 15255 insertions(+)
 create mode 100644 doc/guides/nics/features/idpf.ini
 create mode 100644 doc/guides/nics/idpf.rst
 create mode 100644 drivers/common/idpf/base/idpf_alloc.h
 create mode 100644 drivers/common/idpf/base/idpf_common.c
 create mode 100644 drivers/common/idpf/base/idpf_controlq.c
 create mode 100644 drivers/common/idpf/base/idpf_controlq.h
 create mode 100644 drivers/common/idpf/base/idpf_controlq_api.h
 create mode 100644 drivers/common/idpf/base/idpf_controlq_setup.c
 create mode 100644 drivers/common/idpf/base/idpf_devids.h
 create mode 100644 drivers/common/idpf/base/idpf_lan_pf_regs.h
 create mode 100644 d

[PATCH v15 02/18] net/idpf: add support for device initialization

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Support device init and add the following dev ops:
 - dev_configure
 - dev_close
 - dev_infos_get

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Xiao Wang 
Signed-off-by: Wenjun Wu 
Signed-off-by: Junfeng Guo 
---
 MAINTAINERS|   9 +
 doc/guides/nics/features/idpf.ini  |   9 +
 doc/guides/nics/idpf.rst   |  66 ++
 doc/guides/nics/index.rst  |   1 +
 doc/guides/rel_notes/release_22_11.rst |   6 +
 drivers/net/idpf/idpf_ethdev.c | 891 +
 drivers/net/idpf/idpf_ethdev.h | 189 ++
 drivers/net/idpf/idpf_logs.h   |  56 ++
 drivers/net/idpf/idpf_vchnl.c  | 416 
 drivers/net/idpf/meson.build   |  15 +
 drivers/net/idpf/version.map   |   3 +
 drivers/net/meson.build|   1 +
 12 files changed, 1662 insertions(+)
 create mode 100644 doc/guides/nics/features/idpf.ini
 create mode 100644 doc/guides/nics/idpf.rst
 create mode 100644 drivers/net/idpf/idpf_ethdev.c
 create mode 100644 drivers/net/idpf/idpf_ethdev.h
 create mode 100644 drivers/net/idpf/idpf_logs.h
 create mode 100644 drivers/net/idpf/idpf_vchnl.c
 create mode 100644 drivers/net/idpf/meson.build
 create mode 100644 drivers/net/idpf/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index bdf233c9f8..cc66db25e8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -770,6 +770,15 @@ F: drivers/net/ice/
 F: doc/guides/nics/ice.rst
 F: doc/guides/nics/features/ice.ini
 
+Intel idpf
+M: Jingjing Wu 
+M: Beilei Xing 
+T: git://dpdk.org/next/dpdk-next-net-intel
+F: drivers/net/idpf/
+F: drivers/common/idpf/
+F: doc/guides/nics/idpf.rst
+F: doc/guides/nics/features/idpf.ini
+
 Intel igc
 M: Junfeng Guo 
 M: Simei Su 
diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
new file mode 100644
index 00..46aab2eb61
--- /dev/null
+++ b/doc/guides/nics/features/idpf.ini
@@ -0,0 +1,9 @@
+;
+; Supported features of the 'idpf' network poll mode driver.
+;
+; Refer to default.ini for the full list of available PMD features.
+;
+[Features]
+Linux= Y
+x86-32   = Y
+x86-64   = Y
diff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst
new file mode 100644
index 00..c1001d5d0c
--- /dev/null
+++ b/doc/guides/nics/idpf.rst
@@ -0,0 +1,66 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+Copyright(c) 2022 Intel Corporation.
+
+IDPF Poll Mode Driver
+==
+
+The [*EXPERIMENTAL*] idpf PMD (**librte_net_idpf**) provides poll mode driver 
support for
+Intel® Infrastructure Processing Unit (Intel® IPU) E2000.
+
+
+Linux Prerequisites
+---
+
+- Follow the DPDK :ref:`Getting Started Guide for Linux ` to setup 
the basic DPDK environment.
+
+- To get better performance on Intel platforms, please follow the "How to get 
best performance with NICs on Intel platforms"
+  section of the :ref:`Getting Started Guide for Linux `.
+
+
+Pre-Installation Configuration
+--
+
+Runtime Config Options
+~~
+
+- ``vport`` (default ``0``)
+
+  The IDPF PMD supports creation of multiple vports for one PCI device, each 
vport
+  corresponds to a single ethdev. Using the ``devargs`` parameter ``vport`` 
the user
+  can specify the vports with specific ID to be created, for example::
+
+-a ca:00.0,vport=[0,2,3]
+
+  Then idpf PMD will create 3 vports (ethdevs) for device ca:00.0.
+  NOTE: If the parameter is not provided, the vport 0 will be created by 
default.
+
+- ``rx_single`` (default ``0``)
+
+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, 
single queue
+  mode and split queue mode for Rx queue. User can choose Rx queue mode by the 
``devargs``
+  parameter ``rx_single``.
+
+-a ca:00.0,rx_single=1
+
+  Then idpf PMD will configure Rx queue with single queue mode. Otherwise, 
split queue
+  mode is chosen by default.
+
+- ``tx_single`` (default ``0``)
+
+  There're two queue modes supported by Intel® IPU Ethernet ES2000 Series, 
single queue
+  mode and split queue mode for Tx queue. User can choose Tx queue mode by the 
``devargs``
+  parameter ``tx_single``.
+
+-a ca:00.0,tx_single=1
+
+  Then idpf PMD will configure Tx queue with single queue mode. Otherwise, 
split queue
+  mode is chosen by default.
+
+
+Driver compilation and testing
+--
+
+Refer to the document :ref:`compiling and testing a PMD for a NIC 
`
+for details.
+
+
diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
index 4d40ea29a3..12841ce407 100644
--- a/doc/guides/nics/index.rst
+++ b/doc/guides/nics/index.rst
@@ -34,6 +34,7 @@ Network Interface Controller Drivers
 hns3
 i40e
 ice
+idpf
 igb
 igc
 ionic
diff --git a/doc/guides/rel_notes/release_22_11.rst 
b/doc/guides/rel_notes/release_22_11.rst
index a6f180ed99..7b66733aea 100644
--- a/doc/guides/rel_notes/rele

[PATCH v15 03/18] net/idpf: add Tx queue setup

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support for tx_queue_setup ops.

In the single queue model, the same descriptor queue is used by SW to
post buffer descriptors to HW and by HW to post completed descriptors
to SW.

In the split queue model, "RX buffer queues" are used to pass
descriptor buffers from SW to HW while Rx queues are used only to
pass the descriptor completions, that is, descriptors that point
to completed buffers, from HW to SW. This is contrary to the single
queue model in which Rx queues are used for both purposes.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |  13 ++
 drivers/net/idpf/idpf_rxtx.c   | 364 +
 drivers/net/idpf/idpf_rxtx.h   |  70 +++
 drivers/net/idpf/meson.build   |   1 +
 4 files changed, 448 insertions(+)
 create mode 100644 drivers/net/idpf/idpf_rxtx.c
 create mode 100644 drivers/net/idpf/idpf_rxtx.h

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 035f563275..54f20d30ca 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -11,6 +11,7 @@
 #include 
 
 #include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
 
 #define IDPF_TX_SINGLE_Q   "tx_single"
 #define IDPF_RX_SINGLE_Q   "rx_single"
@@ -42,6 +43,17 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+   dev_info->default_txconf = (struct rte_eth_txconf) {
+   .tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,
+   .tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,
+   };
+
+   dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = IDPF_MAX_RING_DESC,
+   .nb_min = IDPF_MIN_RING_DESC,
+   .nb_align = IDPF_ALIGN_RING_DESC,
+   };
+
return 0;
 }
 
@@ -631,6 +643,7 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct 
idpf_adapter *adapter)
 static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_configure  = idpf_dev_configure,
.dev_close  = idpf_dev_close,
+   .tx_queue_setup = idpf_tx_queue_setup,
.dev_infos_get  = idpf_dev_info_get,
 };
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
new file mode 100644
index 00..4afa0a2560
--- /dev/null
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include 
+#include 
+
+#include "idpf_ethdev.h"
+#include "idpf_rxtx.h"
+
+static int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+   uint16_t tx_free_thresh)
+{
+   /* TX descriptors will have their RS bit set after tx_rs_thresh
+* descriptors have been used. The TX descriptor ring will be cleaned
+* after tx_free_thresh descriptors are used or if the number of
+* descriptors required to transmit a packet is greater than the
+* number of free TX descriptors.
+*
+* The following constraints must be satisfied:
+*  - tx_rs_thresh must be less than the size of the ring minus 2.
+*  - tx_free_thresh must be less than the size of the ring minus 3.
+*  - tx_rs_thresh must be less than or equal to tx_free_thresh.
+*  - tx_rs_thresh must be a divisor of the ring size.
+*
+* One descriptor in the TX ring is used as a sentinel to avoid a H/W
+* race condition, hence the maximum threshold constraints. When set
+* to zero use default values.
+*/
+   if (tx_rs_thresh >= (nb_desc - 2)) {
+   PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+"number of TX descriptors (%u) minus 2",
+tx_rs_thresh, nb_desc);
+   return -EINVAL;
+   }
+   if (tx_free_thresh >= (nb_desc - 3)) {
+   PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+"number of TX descriptors (%u) minus 3.",
+tx_free_thresh, nb_desc);
+   return -EINVAL;
+   }
+   if (tx_rs_thresh > tx_free_thresh) {
+   PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+"equal to tx_free_thresh (%u).",
+tx_rs_thresh, tx_free_thresh);
+   return -EINVAL;
+   }
+   if ((nb_desc % tx_rs_thresh) != 0) {
+   PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+"number of TX descriptors (%u).",
+tx_rs_thresh, nb_desc);
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
+static void
+reset_split_tx_descq(struct idpf_tx_queue *txq)
+{
+   struct id

[PATCH v15 04/18] net/idpf: add Rx queue setup

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support for rx_queue_setup ops.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |  11 +
 drivers/net/idpf/idpf_rxtx.c   | 400 +
 drivers/net/idpf/idpf_rxtx.h   |  46 
 3 files changed, 457 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 54f20d30ca..fb5cd1b111 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -48,12 +48,22 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
.tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,
};
 
+   dev_info->default_rxconf = (struct rte_eth_rxconf) {
+   .rx_free_thresh = IDPF_DEFAULT_RX_FREE_THRESH,
+   };
+
dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
.nb_max = IDPF_MAX_RING_DESC,
.nb_min = IDPF_MIN_RING_DESC,
.nb_align = IDPF_ALIGN_RING_DESC,
};
 
+   dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
+   .nb_max = IDPF_MAX_RING_DESC,
+   .nb_min = IDPF_MIN_RING_DESC,
+   .nb_align = IDPF_ALIGN_RING_DESC,
+   };
+
return 0;
 }
 
@@ -643,6 +653,7 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct 
idpf_adapter *adapter)
 static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_configure  = idpf_dev_configure,
.dev_close  = idpf_dev_close,
+   .rx_queue_setup = idpf_rx_queue_setup,
.tx_queue_setup = idpf_tx_queue_setup,
.dev_infos_get  = idpf_dev_info_get,
 };
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 4afa0a2560..25dd5d85d5 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -8,6 +8,21 @@
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
 
+static int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+   /* The following constraints must be satisfied:
+*   thresh < rxq->nb_rx_desc
+*/
+   if (thresh >= nb_desc) {
+   PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+thresh, nb_desc);
+   return -EINVAL;
+   }
+
+   return 0;
+}
+
 static int
 check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
uint16_t tx_free_thresh)
@@ -56,6 +71,87 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
return 0;
 }
 
+static void
+reset_split_rx_descq(struct idpf_rx_queue *rxq)
+{
+   uint16_t len;
+   uint32_t i;
+
+   if (rxq == NULL)
+   return;
+
+   len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+   for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
+i++)
+   ((volatile char *)rxq->rx_ring)[i] = 0;
+
+   rxq->rx_tail = 0;
+   rxq->expected_gen_id = 1;
+}
+
+static void
+reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+{
+   uint16_t len;
+   uint32_t i;
+
+   if (rxq == NULL)
+   return;
+
+   len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+   for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
+i++)
+   ((volatile char *)rxq->rx_ring)[i] = 0;
+
+   memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+   for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+   rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+   /* The next descriptor id which can be received. */
+   rxq->rx_next_avail = 0;
+
+   /* The next descriptor id which can be refilled. */
+   rxq->rx_tail = 0;
+   /* The number of descriptors which can be refilled. */
+   rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+   rxq->bufq1 = NULL;
+   rxq->bufq2 = NULL;
+}
+
+static void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+   uint16_t len;
+   uint32_t i;
+
+   if (rxq == NULL)
+   return;
+
+   len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+   for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+i++)
+   ((volatile char *)rxq->rx_ring)[i] = 0;
+
+   memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+   for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+   rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+   rxq->rx_tail = 0;
+   rxq->nb_rx_hold = 0;
+
+   if (rxq->pkt_first_seg != NULL)
+   rte_pktmbuf_free(rxq->pkt_first_seg);
+
+   rxq->pkt_first_seg = NULL;
+   rxq->pkt_last_seg = NULL;
+}
+
 static void
 reset_split_tx_descq(struct idpf_tx_queue *txq)
 {
@@ -145,6 +241,310 @@ reset_single_tx_queue(struct idpf_tx_queue *txq)
txq->next_rs = txq->rs_thresh - 1;
 }
 
+static int
+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+   

[PATCH v15 05/18] net/idpf: add support for device start and stop

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add dev ops dev_start, dev_stop and link_update.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c | 55 ++
 drivers/net/idpf/idpf_rxtx.c   | 20 +
 2 files changed, 75 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index fb5cd1b111..621bf9aad5 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -29,6 +29,22 @@ static const char * const idpf_valid_args[] = {
NULL
 };
 
+static int
+idpf_dev_link_update(struct rte_eth_dev *dev,
+__rte_unused int wait_to_complete)
+{
+   struct rte_eth_link new_link;
+
+   memset(&new_link, 0, sizeof(new_link));
+
+   new_link.link_speed = RTE_ETH_SPEED_NUM_NONE;
+   new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+   new_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ RTE_ETH_LINK_SPEED_FIXED);
+
+   return rte_eth_linkstatus_set(dev, &new_link);
+}
+
 static int
 idpf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -267,6 +283,42 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+idpf_dev_start(struct rte_eth_dev *dev)
+{
+   struct idpf_vport *vport = dev->data->dev_private;
+   int ret;
+
+   if (dev->data->mtu > vport->max_mtu) {
+   PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
+   return -EINVAL;
+   }
+
+   vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
+
+   /* TODO: start queues */
+
+   ret = idpf_vc_ena_dis_vport(vport, true);
+   if (ret != 0) {
+   PMD_DRV_LOG(ERR, "Failed to enable vport");
+   return ret;
+   }
+
+   return 0;
+}
+
+static int
+idpf_dev_stop(struct rte_eth_dev *dev)
+{
+   struct idpf_vport *vport = dev->data->dev_private;
+
+   idpf_vc_ena_dis_vport(vport, false);
+
+   /* TODO: stop queues */
+
+   return 0;
+}
+
 static int
 idpf_dev_close(struct rte_eth_dev *dev)
 {
@@ -656,6 +708,9 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.rx_queue_setup = idpf_rx_queue_setup,
.tx_queue_setup = idpf_tx_queue_setup,
.dev_infos_get  = idpf_dev_info_get,
+   .dev_start  = idpf_dev_start,
+   .dev_stop   = idpf_dev_stop,
+   .link_update= idpf_dev_link_update,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 25dd5d85d5..3528d2f2c7 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -334,6 +334,11 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
 
+   if (rx_conf->rx_deferred_start) {
+   PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+   return -EINVAL;
+   }
+
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
 sizeof(struct idpf_rx_queue),
@@ -465,6 +470,11 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
 
+   if (rx_conf->rx_deferred_start) {
+   PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+   return -EINVAL;
+   }
+
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
 sizeof(struct idpf_rx_queue),
@@ -569,6 +579,11 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
 
+   if (tx_conf->tx_deferred_start) {
+   PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+   return -EINVAL;
+   }
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf split txq",
 sizeof(struct idpf_tx_queue),
@@ -691,6 +706,11 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
 
+   if (tx_conf->tx_deferred_start) {
+   PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
+   return -EINVAL;
+   }
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf txq",
 sizeof(struct idpf_tx_queue),
-- 
2.26.2



[PATCH v15 06/18] net/idpf: add support for queue start

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support for these device ops:
 - rx_queue_start
 - tx_queue_start

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |  42 +++-
 drivers/net/idpf/idpf_ethdev.h |   9 +
 drivers/net/idpf/idpf_rxtx.c   | 237 +++--
 drivers/net/idpf/idpf_rxtx.h   |   6 +
 drivers/net/idpf/idpf_vchnl.c  | 447 +
 5 files changed, 720 insertions(+), 21 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 621bf9aad5..0400ed611f 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -283,6 +283,39 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+idpf_start_queues(struct rte_eth_dev *dev)
+{
+   struct idpf_rx_queue *rxq;
+   struct idpf_tx_queue *txq;
+   int err = 0;
+   int i;
+
+   for (i = 0; i < dev->data->nb_tx_queues; i++) {
+   txq = dev->data->tx_queues[i];
+   if (txq == NULL || txq->tx_deferred_start)
+   continue;
+   err = idpf_tx_queue_start(dev, i);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+   return err;
+   }
+   }
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   rxq = dev->data->rx_queues[i];
+   if (rxq == NULL || rxq->rx_deferred_start)
+   continue;
+   err = idpf_rx_queue_start(dev, i);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
+   return err;
+   }
+   }
+
+   return err;
+}
+
 static int
 idpf_dev_start(struct rte_eth_dev *dev)
 {
@@ -296,11 +329,16 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
 
-   /* TODO: start queues */
+   ret = idpf_start_queues(dev);
+   if (ret != 0) {
+   PMD_DRV_LOG(ERR, "Failed to start queues");
+   return ret;
+   }
 
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
+   /* TODO: stop queues */
return ret;
}
 
@@ -711,6 +749,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.dev_start  = idpf_dev_start,
.dev_stop   = idpf_dev_stop,
.link_update= idpf_dev_link_update,
+   .rx_queue_start = idpf_rx_queue_start,
+   .tx_queue_start = idpf_tx_queue_start,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 84ae6641e2..96c22009e9 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -24,7 +24,9 @@
 #define IDPF_DEFAULT_TXQ_NUM   16
 
 #define IDPF_INVALID_VPORT_IDX 0x
+#define IDPF_TXQ_PER_GRP   1
 #define IDPF_TX_COMPLQ_PER_GRP 1
+#define IDPF_RXQ_PER_GRP   1
 #define IDPF_RX_BUFQ_PER_GRP   2
 
 #define IDPF_CTLQ_ID   -1
@@ -182,6 +184,13 @@ int idpf_vc_check_api_version(struct idpf_adapter 
*adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
 int idpf_vc_create_vport(struct idpf_adapter *adapter);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
+int idpf_vc_config_rxqs(struct idpf_vport *vport);
+int idpf_vc_config_rxq(struct idpf_vport *vport, uint16_t rxq_id);
+int idpf_vc_config_txqs(struct idpf_vport *vport);
+int idpf_vc_config_txq(struct idpf_vport *vport, uint16_t txq_id);
+int idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
+ bool rx, bool on);
+int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
  uint16_t buf_len, uint8_t *buf);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 3528d2f2c7..6d954afd9d 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -334,11 +334,6 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
 
-   if (rx_conf->rx_deferred_start) {
-   PMD_INIT_LOG(ERR, "Queue start is not supported currently.");
-   return -EINVAL;
-   }
-
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
 sizeof(struct idpf_rx_queue),
@@ -354,6 +349,7 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
rxq->rx_free_thresh = rx_free_thresh;
rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
rxq->port_id = dev-

[PATCH v15 07/18] net/idpf: add support for queue stop

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support for these device ops:
 - rx_queue_stop
 - tx_queue_stop

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |  17 ++--
 drivers/net/idpf/idpf_rxtx.c   | 148 +
 drivers/net/idpf/idpf_rxtx.h   |  13 +++
 drivers/net/idpf/idpf_vchnl.c  |  69 +++
 4 files changed, 242 insertions(+), 5 deletions(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 0400ed611f..9f1e1e6a18 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -324,7 +324,8 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
if (dev->data->mtu > vport->max_mtu) {
PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
-   return -EINVAL;
+   ret = -EINVAL;
+   goto err_mtu;
}
 
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
@@ -332,17 +333,21 @@ idpf_dev_start(struct rte_eth_dev *dev)
ret = idpf_start_queues(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to start queues");
-   return ret;
+   goto err_mtu;
}
 
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
-   /* TODO: stop queues */
-   return ret;
+   goto err_vport;
}
 
return 0;
+
+err_vport:
+   idpf_stop_queues(dev);
+err_mtu:
+   return ret;
 }
 
 static int
@@ -352,7 +357,7 @@ idpf_dev_stop(struct rte_eth_dev *dev)
 
idpf_vc_ena_dis_vport(vport, false);
 
-   /* TODO: stop queues */
+   idpf_stop_queues(dev);
 
return 0;
 }
@@ -751,6 +756,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.link_update= idpf_dev_link_update,
.rx_queue_start = idpf_rx_queue_start,
.tx_queue_start = idpf_tx_queue_start,
+   .rx_queue_stop  = idpf_rx_queue_stop,
+   .tx_queue_stop  = idpf_tx_queue_stop,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 6d954afd9d..8d5ec41a1f 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -71,6 +71,55 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
return 0;
 }
 
+static void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+   uint16_t i;
+
+   if (rxq->sw_ring == NULL)
+   return;
+
+   for (i = 0; i < rxq->nb_rx_desc; i++) {
+   if (rxq->sw_ring[i] != NULL) {
+   rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+   rxq->sw_ring[i] = NULL;
+   }
+   }
+}
+
+static void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+   uint16_t nb_desc, i;
+
+   if (txq == NULL || txq->sw_ring == NULL) {
+   PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+   return;
+   }
+
+   if (txq->sw_nb_desc != 0) {
+   /* For split queue model, descriptor ring */
+   nb_desc = txq->sw_nb_desc;
+   } else {
+   /* For single queue model */
+   nb_desc = txq->nb_tx_desc;
+   }
+   for (i = 0; i < nb_desc; i++) {
+   if (txq->sw_ring[i].mbuf != NULL) {
+   rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+   txq->sw_ring[i].mbuf = NULL;
+   }
+   }
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+   .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+   .release_mbufs = release_txq_mbufs,
+};
+
 static void
 reset_split_rx_descq(struct idpf_rx_queue *rxq)
 {
@@ -122,6 +171,14 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq)
rxq->bufq2 = NULL;
 }
 
+static inline void
+reset_split_rx_queue(struct idpf_rx_queue *rxq)
+{
+   reset_split_rx_descq(rxq);
+   reset_split_rx_bufq(rxq->bufq1);
+   reset_split_rx_bufq(rxq->bufq2);
+}
+
 static void
 reset_single_rx_queue(struct idpf_rx_queue *rxq)
 {
@@ -301,6 +358,7 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct 
idpf_rx_queue *bufq,
bufq->q_set = true;
bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
 queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+   bufq->ops = &def_rxq_ops;
 
/* TODO: allow bulk or vec */
 
@@ -527,6 +585,7 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
queue_idx * vport->chunks_info.rx_qtail_spacing);
+   rxq->ops = &def_rxq_ops;
 
return 0;
 }
@@ -621,6 +680,7 @@ idpf_tx_split_queue_setup(struct

[PATCH v15 08/18] net/idpf: add queue release

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support for queue operations:
 - rx_queue_release
 - tx_queue_release

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |  2 +
 drivers/net/idpf/idpf_rxtx.c   | 81 ++
 drivers/net/idpf/idpf_rxtx.h   |  3 ++
 3 files changed, 86 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 9f1e1e6a18..1485f40e71 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -758,6 +758,8 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.tx_queue_start = idpf_tx_queue_start,
.rx_queue_stop  = idpf_rx_queue_stop,
.tx_queue_stop  = idpf_tx_queue_stop,
+   .rx_queue_release   = idpf_dev_rx_queue_release,
+   .tx_queue_release   = idpf_dev_tx_queue_release,
 };
 
 static uint16_t
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 8d5ec41a1f..053409b99a 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -171,6 +171,51 @@ reset_split_rx_bufq(struct idpf_rx_queue *rxq)
rxq->bufq2 = NULL;
 }
 
+static void
+idpf_rx_queue_release(void *rxq)
+{
+   struct idpf_rx_queue *q = rxq;
+
+   if (q == NULL)
+   return;
+
+   /* Split queue */
+   if (q->bufq1 != NULL && q->bufq2 != NULL) {
+   q->bufq1->ops->release_mbufs(q->bufq1);
+   rte_free(q->bufq1->sw_ring);
+   rte_memzone_free(q->bufq1->mz);
+   rte_free(q->bufq1);
+   q->bufq2->ops->release_mbufs(q->bufq2);
+   rte_free(q->bufq2->sw_ring);
+   rte_memzone_free(q->bufq2->mz);
+   rte_free(q->bufq2);
+   rte_memzone_free(q->mz);
+   rte_free(q);
+   return;
+   }
+
+   /* Single queue */
+   q->ops->release_mbufs(q);
+   rte_free(q->sw_ring);
+   rte_memzone_free(q->mz);
+   rte_free(q);
+}
+
+static void
+idpf_tx_queue_release(void *txq)
+{
+   struct idpf_tx_queue *q = txq;
+
+   if (q == NULL)
+   return;
+
+   rte_free(q->complq);
+   q->ops->release_mbufs(q);
+   rte_free(q->sw_ring);
+   rte_memzone_free(q->mz);
+   rte_free(q);
+}
+
 static inline void
 reset_split_rx_queue(struct idpf_rx_queue *rxq)
 {
@@ -392,6 +437,12 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
 
+   /* Free memory if needed */
+   if (dev->data->rx_queues[queue_idx] != NULL) {
+   idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+   dev->data->rx_queues[queue_idx] = NULL;
+   }
+
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
 sizeof(struct idpf_rx_queue),
@@ -524,6 +575,12 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
return -EINVAL;
 
+   /* Free memory if needed */
+   if (dev->data->rx_queues[queue_idx] != NULL) {
+   idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
+   dev->data->rx_queues[queue_idx] = NULL;
+   }
+
/* Setup Rx description queue */
rxq = rte_zmalloc_socket("idpf rxq",
 sizeof(struct idpf_rx_queue),
@@ -630,6 +687,12 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
 
+   /* Free memory if needed. */
+   if (dev->data->tx_queues[queue_idx] != NULL) {
+   idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+   dev->data->tx_queues[queue_idx] = NULL;
+   }
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf split txq",
 sizeof(struct idpf_tx_queue),
@@ -754,6 +817,12 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
return -EINVAL;
 
+   /* Free memory if needed. */
+   if (dev->data->tx_queues[queue_idx] != NULL) {
+   idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
+   dev->data->tx_queues[queue_idx] = NULL;
+   }
+
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("idpf txq",
 sizeof(struct idpf_tx_queue),
@@ -1102,6 +1171,18 @@ idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
return 0;
 }
 
+void
+idpf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+   idpf_rx_queue_r

[PATCH v15 09/18] net/idpf: add support for MTU configuration

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add dev ops mtu_set.

Signed-off-by: Beilei Xing 
Signed-off-by: Junfeng Guo 
---
 doc/guides/nics/features/idpf.ini |  1 +
 drivers/net/idpf/idpf_ethdev.c| 13 +
 2 files changed, 14 insertions(+)

diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
index 46aab2eb61..d722c49fde 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -4,6 +4,7 @@
 ; Refer to default.ini for the full list of available PMD features.
 ;
 [Features]
+MTU update   = Y
 Linux= Y
 x86-32   = Y
 x86-64   = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 1485f40e71..856f3d7266 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -83,6 +83,18 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
return 0;
 }
 
+static int
+idpf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu __rte_unused)
+{
+   /* mtu setting is forbidden if port is start */
+   if (dev->data->dev_started) {
+   PMD_DRV_LOG(ERR, "port must be stopped before configuration");
+   return -EBUSY;
+   }
+
+   return 0;
+}
+
 static int
 idpf_init_vport_req_info(struct rte_eth_dev *dev)
 {
@@ -760,6 +772,7 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
.tx_queue_stop  = idpf_tx_queue_stop,
.rx_queue_release   = idpf_dev_rx_queue_release,
.tx_queue_release   = idpf_dev_tx_queue_release,
+   .mtu_set= idpf_dev_mtu_set,
 };
 
 static uint16_t
-- 
2.26.2



[PATCH v15 10/18] net/idpf: add support for basic Rx datapath

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add basic Rx support in split queue mode and single queue mode.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |   2 +
 drivers/net/idpf/idpf_rxtx.c   | 273 +
 drivers/net/idpf/idpf_rxtx.h   |   7 +-
 3 files changed, 281 insertions(+), 1 deletion(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 856f3d7266..2f1f95 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -348,6 +348,8 @@ idpf_dev_start(struct rte_eth_dev *dev)
goto err_mtu;
}
 
+   idpf_set_rx_function(dev);
+
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
PMD_DRV_LOG(ERR, "Failed to enable vport");
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 053409b99a..ea499c4d37 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1208,3 +1208,276 @@ idpf_stop_queues(struct rte_eth_dev *dev)
PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
}
 }
+
+static void
+idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
+{
+   volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_ring;
+   volatile struct virtchnl2_splitq_rx_buf_desc *rx_buf_desc;
+   uint16_t nb_refill = rx_bufq->rx_free_thresh;
+   uint16_t nb_desc = rx_bufq->nb_rx_desc;
+   uint16_t next_avail = rx_bufq->rx_tail;
+   struct rte_mbuf *nmb[rx_bufq->rx_free_thresh];
+   struct rte_eth_dev *dev;
+   uint64_t dma_addr;
+   uint16_t delta;
+   int i;
+
+   if (rx_bufq->nb_rx_hold < rx_bufq->rx_free_thresh)
+   return;
+
+   rx_buf_ring = rx_bufq->rx_ring;
+   delta = nb_desc - next_avail;
+   if (unlikely(delta < nb_refill)) {
+   if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, delta) == 
0)) {
+   for (i = 0; i < delta; i++) {
+   rx_buf_desc = &rx_buf_ring[next_avail + i];
+   rx_bufq->sw_ring[next_avail + i] = nmb[i];
+   dma_addr = 
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+   rx_buf_desc->hdr_addr = 0;
+   rx_buf_desc->pkt_addr = dma_addr;
+   }
+   nb_refill -= delta;
+   next_avail = 0;
+   rx_bufq->nb_rx_hold -= delta;
+   } else {
+   dev = &rte_eth_devices[rx_bufq->port_id];
+   dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
+   PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
+  rx_bufq->port_id, rx_bufq->queue_id);
+   return;
+   }
+   }
+
+   if (nb_desc - next_avail >= nb_refill) {
+   if (likely(rte_pktmbuf_alloc_bulk(rx_bufq->mp, nmb, nb_refill) 
== 0)) {
+   for (i = 0; i < nb_refill; i++) {
+   rx_buf_desc = &rx_buf_ring[next_avail + i];
+   rx_bufq->sw_ring[next_avail + i] = nmb[i];
+   dma_addr = 
rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb[i]));
+   rx_buf_desc->hdr_addr = 0;
+   rx_buf_desc->pkt_addr = dma_addr;
+   }
+   next_avail += nb_refill;
+   rx_bufq->nb_rx_hold -= nb_refill;
+   } else {
+   dev = &rte_eth_devices[rx_bufq->port_id];
+   dev->data->rx_mbuf_alloc_failed += nb_desc - next_avail;
+   PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u 
queue_id=%u",
+  rx_bufq->port_id, rx_bufq->queue_id);
+   }
+   }
+
+   IDPF_PCI_REG_WRITE(rx_bufq->qrx_tail, next_avail);
+
+   rx_bufq->rx_tail = next_avail;
+}
+
+uint16_t
+idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc_ring;
+   volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+   uint16_t pktlen_gen_bufq_id;
+   struct idpf_rx_queue *rxq;
+   struct rte_mbuf *rxm;
+   uint16_t rx_id_bufq1;
+   uint16_t rx_id_bufq2;
+   uint16_t pkt_len;
+   uint16_t bufq_id;
+   uint16_t gen_id;
+   uint16_t rx_id;
+   uint16_t nb_rx;
+
+   nb_rx = 0;
+   rxq = rx_queue;
+
+   if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
+   return nb_rx;
+
+   rx_id = rxq->rx_tail;
+   rx_id_bufq1 = rxq->bufq1->rx_next_avail;
+   rx_id_bufq2 = rxq->bufq2->rx_next_avail;
+   rx_desc_ring = rxq->rx_ring;
+
+  

[PATCH v15 11/18] net/idpf: add support for basic Tx datapath

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add basic Tx support in split queue mode and single queue mode.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |   3 +
 drivers/net/idpf/idpf_ethdev.h |   1 +
 drivers/net/idpf/idpf_rxtx.c   | 357 +
 drivers/net/idpf/idpf_rxtx.h   |  10 +
 4 files changed, 371 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 2f1f95..f9f6fe1162 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -59,6 +59,8 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+   dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,
.tx_rs_thresh = IDPF_DEFAULT_TX_RS_THRESH,
@@ -349,6 +351,7 @@ idpf_dev_start(struct rte_eth_dev *dev)
}
 
idpf_set_rx_function(dev);
+   idpf_set_tx_function(dev);
 
ret = idpf_vc_ena_dis_vport(vport, true);
if (ret != 0) {
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 96c22009e9..af0a8e2970 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -35,6 +35,7 @@
 
 #define IDPF_MIN_BUF_SIZE  1024
 #define IDPF_MAX_FRAME_SIZE9728
+#define IDPF_MIN_FRAME_SIZE14
 
 #define IDPF_NUM_MACADDR_MAX   64
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index ea499c4d37..f55d2143b9 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1365,6 +1365,148 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
return nb_rx;
 }
 
+static inline void
+idpf_split_tx_free(struct idpf_tx_queue *cq)
+{
+   volatile struct idpf_splitq_tx_compl_desc *compl_ring = cq->compl_ring;
+   volatile struct idpf_splitq_tx_compl_desc *txd;
+   uint16_t next = cq->tx_tail;
+   struct idpf_tx_entry *txe;
+   struct idpf_tx_queue *txq;
+   uint16_t gen, qid, q_head;
+   uint8_t ctype;
+
+   txd = &compl_ring[next];
+   gen = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+   IDPF_TXD_COMPLQ_GEN_M) >> IDPF_TXD_COMPLQ_GEN_S;
+   if (gen != cq->expected_gen_id)
+   return;
+
+   ctype = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+   IDPF_TXD_COMPLQ_COMPL_TYPE_M) >> IDPF_TXD_COMPLQ_COMPL_TYPE_S;
+   qid = (rte_le_to_cpu_16(txd->qid_comptype_gen) &
+   IDPF_TXD_COMPLQ_QID_M) >> IDPF_TXD_COMPLQ_QID_S;
+   q_head = rte_le_to_cpu_16(txd->q_head_compl_tag.compl_tag);
+   txq = cq->txqs[qid - cq->tx_start_qid];
+
+   switch (ctype) {
+   case IDPF_TXD_COMPLT_RE:
+   if (q_head == 0)
+   txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+   else
+   txq->last_desc_cleaned = q_head - 1;
+   if (unlikely((txq->last_desc_cleaned % 32) == 0)) {
+   PMD_DRV_LOG(ERR, "unexpected desc (head = %u) 
completion.",
+   q_head);
+   return;
+   }
+
+   break;
+   case IDPF_TXD_COMPLT_RS:
+   txq->nb_free++;
+   txq->nb_used--;
+   txe = &txq->sw_ring[q_head];
+   if (txe->mbuf != NULL) {
+   rte_pktmbuf_free_seg(txe->mbuf);
+   txe->mbuf = NULL;
+   }
+   break;
+   default:
+   PMD_DRV_LOG(ERR, "unknown completion type.");
+   return;
+   }
+
+   if (++next == cq->nb_tx_desc) {
+   next = 0;
+   cq->expected_gen_id ^= 1;
+   }
+
+   cq->tx_tail = next;
+}
+
+uint16_t
+idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+   struct idpf_tx_queue *txq = (struct idpf_tx_queue *)tx_queue;
+   volatile struct idpf_flex_tx_sched_desc *txr;
+   volatile struct idpf_flex_tx_sched_desc *txd;
+   struct idpf_tx_entry *sw_ring;
+   struct idpf_tx_entry *txe, *txn;
+   uint16_t nb_used, tx_id, sw_id;
+   struct rte_mbuf *tx_pkt;
+   uint16_t nb_to_clean;
+   uint16_t nb_tx = 0;
+
+   if (unlikely(txq == NULL) || unlikely(!txq->q_started))
+   return nb_tx;
+
+   txr = txq->desc_ring;
+   sw_ring = txq->sw_ring;
+   tx_id = txq->tx_tail;
+   sw_id = txq->sw_tail;
+   txe = &sw_ring[sw_id];
+
+   for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+   tx_pkt = tx_pkts[nb_tx];
+
+   if (txq->nb_free <= txq->free_thresh) {
+   /* TODO: Need to refine
+

[PATCH v15 12/18] net/idpf: support parsing packet type

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Parse packet type during receiving packets.

Signed-off-by: Wenjun Wu 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c |   6 +
 drivers/net/idpf/idpf_ethdev.h |   6 +
 drivers/net/idpf/idpf_rxtx.c   |  11 ++
 drivers/net/idpf/idpf_rxtx.h   |   5 +
 drivers/net/idpf/idpf_vchnl.c  | 240 +
 5 files changed, 268 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index f9f6fe1162..d0821ec3f3 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -686,6 +686,12 @@ idpf_adapter_init(struct rte_pci_device *pci_dev, struct 
idpf_adapter *adapter)
goto err_api;
}
 
+   ret = idpf_get_pkt_type(adapter);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to set ptype table");
+   goto err_api;
+   }
+
adapter->caps = rte_zmalloc("idpf_caps",
sizeof(struct virtchnl2_get_capabilities), 0);
if (adapter->caps == NULL) {
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index af0a8e2970..db9af58f72 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -39,6 +39,8 @@
 
 #define IDPF_NUM_MACADDR_MAX   64
 
+#define IDPF_MAX_PKT_TYPE  1024
+
 #define IDPF_VLAN_TAG_SIZE 4
 #define IDPF_ETH_OVERHEAD \
(RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + IDPF_VLAN_TAG_SIZE * 2)
@@ -125,6 +127,8 @@ struct idpf_adapter {
/* Max config queue number per VC message */
uint32_t max_rxq_per_msg;
uint32_t max_txq_per_msg;
+
+   uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
 };
 
 TAILQ_HEAD(idpf_adapter_list, idpf_adapter);
@@ -182,6 +186,7 @@ atomic_set_cmd(struct idpf_adapter *adapter, enum 
virtchnl_ops ops)
 struct idpf_adapter *idpf_find_adapter(struct rte_pci_device *pci_dev);
 void idpf_handle_virtchnl_msg(struct rte_eth_dev *dev);
 int idpf_vc_check_api_version(struct idpf_adapter *adapter);
+int idpf_get_pkt_type(struct idpf_adapter *adapter);
 int idpf_vc_get_caps(struct idpf_adapter *adapter);
 int idpf_vc_create_vport(struct idpf_adapter *adapter);
 int idpf_vc_destroy_vport(struct idpf_vport *vport);
@@ -193,6 +198,7 @@ int idpf_switch_queue(struct idpf_vport *vport, uint16_t 
qid,
  bool rx, bool on);
 int idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable);
 int idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable);
+int idpf_vc_query_ptype_info(struct idpf_adapter *adapter);
 int idpf_read_one_msg(struct idpf_adapter *adapter, uint32_t ops,
  uint16_t buf_len, uint8_t *buf);
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index f55d2143b9..a980714060 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1281,6 +1281,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
volatile struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
uint16_t pktlen_gen_bufq_id;
struct idpf_rx_queue *rxq;
+   const uint32_t *ptype_tbl;
struct rte_mbuf *rxm;
uint16_t rx_id_bufq1;
uint16_t rx_id_bufq2;
@@ -1300,6 +1301,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
rx_id_bufq1 = rxq->bufq1->rx_next_avail;
rx_id_bufq2 = rxq->bufq2->rx_next_avail;
rx_desc_ring = rxq->rx_ring;
+   ptype_tbl = rxq->adapter->ptype_tbl;
 
while (nb_rx < nb_pkts) {
rx_desc = &rx_desc_ring[rx_id];
@@ -1347,6 +1349,10 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
rxm->next = NULL;
rxm->nb_segs = 1;
rxm->port = rxq->port_id;
+   rxm->packet_type =
+   ptype_tbl[(rte_le_to_cpu_16(rx_desc->ptype_err_fflags0) 
&
+  VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M) >>
+ VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_S];
 
rx_pkts[nb_rx++] = rxm;
}
@@ -1533,6 +1539,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
volatile union virtchnl2_rx_desc *rxdp;
union virtchnl2_rx_desc rxd;
struct idpf_rx_queue *rxq;
+   const uint32_t *ptype_tbl;
uint16_t rx_id, nb_hold;
struct rte_eth_dev *dev;
uint16_t rx_packet_len;
@@ -1551,6 +1558,7 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
 
rx_id = rxq->rx_tail;
rx_ring = rxq->rx_ring;
+   ptype_tbl = rxq->adapter->ptype_tbl;
 
while (nb_rx < nb_pkts) {
rxdp = &rx_ring[rx_id];
@@ -1603,6 +1611,9 @@ idpf_singleq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
rxm->pkt_len = rx_packet_len;
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
+   rxm->packet_type =
+   

[PATCH v15 13/18] net/idpf: add support for write back based on ITR expire

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Enable write back on ITR expire, then packets can be received one by
one.

Signed-off-by: Beilei Xing 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c | 120 +
 drivers/net/idpf/idpf_ethdev.h |  13 
 drivers/net/idpf/idpf_vchnl.c  | 113 +++
 3 files changed, 246 insertions(+)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index d0821ec3f3..957cc10616 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -297,6 +297,90 @@ idpf_dev_configure(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+idpf_config_rx_queues_irqs(struct rte_eth_dev *dev)
+{
+   struct idpf_vport *vport = dev->data->dev_private;
+   struct idpf_adapter *adapter = vport->adapter;
+   struct virtchnl2_queue_vector *qv_map;
+   struct idpf_hw *hw = &adapter->hw;
+   uint32_t dynctl_reg_start;
+   uint32_t itrn_reg_start;
+   uint32_t dynctl_val, itrn_val;
+   uint16_t i;
+
+   qv_map = rte_zmalloc("qv_map",
+   dev->data->nb_rx_queues *
+   sizeof(struct virtchnl2_queue_vector), 0);
+   if (qv_map == NULL) {
+   PMD_DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+   dev->data->nb_rx_queues);
+   goto qv_map_alloc_err;
+   }
+
+   /* Rx interrupt disabled, Map interrupt only for writeback */
+
+   /* The capability flags adapter->caps->other_caps should be
+* compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+* condition should be updated when the FW can return the
+* correct flag bits.
+*/
+   dynctl_reg_start =
+   vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+   itrn_reg_start =
+   vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+   dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+   PMD_DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x",
+   dynctl_val);
+   itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+   PMD_DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+   /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+* register. WB_ON_ITR and INTENA are mutually exclusive
+* bits. Setting WB_ON_ITR bits means TX and RX Descs
+* are written back based on ITR expiration irrespective
+* of INTENA setting.
+*/
+   /* TBD: need to tune INTERVAL value for better performance. */
+   if (itrn_val != 0)
+   IDPF_WRITE_REG(hw,
+  dynctl_reg_start,
+  VIRTCHNL2_ITR_IDX_0  <<
+  PF_GLINT_DYN_CTL_ITR_INDX_S |
+  PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+  itrn_val <<
+  PF_GLINT_DYN_CTL_INTERVAL_S);
+   else
+   IDPF_WRITE_REG(hw,
+  dynctl_reg_start,
+  VIRTCHNL2_ITR_IDX_0  <<
+  PF_GLINT_DYN_CTL_ITR_INDX_S |
+  PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+  IDPF_DFLT_INTERVAL <<
+  PF_GLINT_DYN_CTL_INTERVAL_S);
+
+   for (i = 0; i < dev->data->nb_rx_queues; i++) {
+   /* map all queues to the same vector */
+   qv_map[i].queue_id = vport->chunks_info.rx_start_qid + i;
+   qv_map[i].vector_id =
+   vport->recv_vectors->vchunks.vchunks->start_vector_id;
+   }
+   vport->qv_map = qv_map;
+
+   if (idpf_vc_config_irq_map_unmap(vport, true) != 0) {
+   PMD_DRV_LOG(ERR, "config interrupt mapping failed");
+   goto config_irq_map_err;
+   }
+
+   return 0;
+
+config_irq_map_err:
+   rte_free(vport->qv_map);
+   vport->qv_map = NULL;
+
+qv_map_alloc_err:
+   return -1;
+}
+
 static int
 idpf_start_queues(struct rte_eth_dev *dev)
 {
@@ -334,6 +418,10 @@ static int
 idpf_dev_start(struct rte_eth_dev *dev)
 {
struct idpf_vport *vport = dev->data->dev_private;
+   struct idpf_adapter *adapter = vport->adapter;
+   uint16_t num_allocated_vectors =
+   adapter->caps->num_allocated_vectors;
+   uint16_t req_vecs_num;
int ret;
 
if (dev->data->mtu > vport->max_mtu) {
@@ -344,6 +432,27 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
 
+   req_vecs_num = IDPF_DFLT_Q_VEC_NUM;
+   if (req_vecs_num + adapter->used_vecs_num > num_allocated_vectors) {
+   PMD_DRV_LOG(ERR, "The accumulated request vectors' number 
should be less than %d",
+   num_allocated_vectors);
+   ret = -EINVAL;
+   goto err_mtu;
+   }
+
+   ret = idpf_vc_alloc_vectors(vpo

[PATCH v15 14/18] net/idpf: add support for RSS

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add RSS support.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 drivers/net/idpf/idpf_ethdev.c | 120 -
 drivers/net/idpf/idpf_ethdev.h |  26 +++
 drivers/net/idpf/idpf_vchnl.c  | 113 +++
 3 files changed, 258 insertions(+), 1 deletion(-)

diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 957cc10616..58560ea404 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -59,6 +59,8 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->max_mtu = dev_info->max_rx_pktlen - IDPF_ETH_OVERHEAD;
dev_info->min_mtu = RTE_ETHER_MIN_MTU;
 
+   dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
+
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -169,6 +171,8 @@ idpf_parse_devarg_id(char *name)
return val;
 }
 
+#define IDPF_RSS_KEY_LEN 52
+
 static int
 idpf_init_vport(struct rte_eth_dev *dev)
 {
@@ -189,6 +193,10 @@ idpf_init_vport(struct rte_eth_dev *dev)
vport->max_mtu = vport_info->max_mtu;
rte_memcpy(vport->default_mac_addr,
   vport_info->default_mac_addr, ETH_ALEN);
+   vport->rss_algorithm = vport_info->rss_algorithm;
+   vport->rss_key_size = RTE_MIN(IDPF_RSS_KEY_LEN,
+vport_info->rss_key_size);
+   vport->rss_lut_size = vport_info->rss_lut_size;
vport->sw_idx = idx;
 
for (i = 0; i < vport_info->chunks.num_chunks; i++) {
@@ -246,17 +254,110 @@ idpf_init_vport(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+idpf_config_rss(struct idpf_vport *vport)
+{
+   int ret;
+
+   ret = idpf_vc_set_rss_key(vport);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to configure RSS key");
+   return ret;
+   }
+
+   ret = idpf_vc_set_rss_lut(vport);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to configure RSS lut");
+   return ret;
+   }
+
+   ret = idpf_vc_set_rss_hash(vport);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to configure RSS hash");
+   return ret;
+   }
+
+   return ret;
+}
+
+static int
+idpf_init_rss(struct idpf_vport *vport)
+{
+   struct rte_eth_rss_conf *rss_conf;
+   uint16_t i, nb_q, lut_size;
+   int ret = 0;
+
+   rss_conf = &vport->dev_data->dev_conf.rx_adv_conf.rss_conf;
+   nb_q = vport->dev_data->nb_rx_queues;
+
+   vport->rss_key = rte_zmalloc("rss_key",
+vport->rss_key_size, 0);
+   if (vport->rss_key == NULL) {
+   PMD_INIT_LOG(ERR, "Failed to allocate RSS key");
+   ret = -ENOMEM;
+   goto err_alloc_key;
+   }
+
+   lut_size = vport->rss_lut_size;
+   vport->rss_lut = rte_zmalloc("rss_lut",
+sizeof(uint32_t) * lut_size, 0);
+   if (vport->rss_lut == NULL) {
+   PMD_INIT_LOG(ERR, "Failed to allocate RSS lut");
+   ret = -ENOMEM;
+   goto err_alloc_lut;
+   }
+
+   if (rss_conf->rss_key == NULL) {
+   for (i = 0; i < vport->rss_key_size; i++)
+   vport->rss_key[i] = (uint8_t)rte_rand();
+   } else if (rss_conf->rss_key_len != vport->rss_key_size) {
+   PMD_INIT_LOG(ERR, "Invalid RSS key length in RSS configuration, 
should be %d",
+vport->rss_key_size);
+   ret = -EINVAL;
+   goto err_cfg_key;
+   } else {
+   rte_memcpy(vport->rss_key, rss_conf->rss_key,
+  vport->rss_key_size);
+   }
+
+   for (i = 0; i < lut_size; i++)
+   vport->rss_lut[i] = i % nb_q;
+
+   vport->rss_hf = IDPF_DEFAULT_RSS_HASH_EXPANDED;
+
+   ret = idpf_config_rss(vport);
+   if (ret != 0) {
+   PMD_INIT_LOG(ERR, "Failed to configure RSS");
+   goto err_cfg_key;
+   }
+
+   return ret;
+
+err_cfg_key:
+   rte_free(vport->rss_lut);
+   vport->rss_lut = NULL;
+err_alloc_lut:
+   rte_free(vport->rss_key);
+   vport->rss_key = NULL;
+err_alloc_key:
+   return ret;
+}
+
 static int
 idpf_dev_configure(struct rte_eth_dev *dev)
 {
+   struct idpf_vport *vport = dev->data->dev_private;
struct rte_eth_conf *conf = &dev->data->dev_conf;
+   struct idpf_adapter *adapter = vport->adapter;
+   int ret;
 
if (conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
PMD_INIT_LOG(ERR, "Setting link speed is not supported");
return -ENOTSUP;
}
 
-   if (dev->data->nb_rx_queues == 1 && conf->rxmode.mq_mode != 
RTE_ETH_MQ_RX_NONE) {
+   if ((dev->data->nb_rx_queues == 1 && conf->rxmode.mq_mode != 
R

[PATCH v15 15/18] net/idpf: add support for Rx offloading

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add Rx offloading support:
 - support CHKSUM and RSS offload for split queue model
 - support CHKSUM offload for single queue model

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 doc/guides/nics/features/idpf.ini |   5 ++
 drivers/net/idpf/idpf_ethdev.c|   6 ++
 drivers/net/idpf/idpf_rxtx.c  | 123 ++
 drivers/net/idpf/idpf_vchnl.c |  18 +
 4 files changed, 152 insertions(+)

diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
index d722c49fde..868571654f 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -3,8 +3,13 @@
 ;
 ; Refer to default.ini for the full list of available PMD features.
 ;
+; A feature with "P" indicates only be supported when non-vector path
+; is selected.
+;
 [Features]
 MTU update   = Y
+L3 checksum offload  = P
+L4 checksum offload  = P
 Linux= Y
 x86-32   = Y
 x86-64   = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 58560ea404..a09f104425 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -61,6 +61,12 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 
dev_info->flow_type_rss_offloads = IDPF_RSS_OFFLOAD_ALL;
 
+   dev_info->rx_offload_capa =
+   RTE_ETH_RX_OFFLOAD_IPV4_CKSUM   |
+   RTE_ETH_RX_OFFLOAD_UDP_CKSUM|
+   RTE_ETH_RX_OFFLOAD_TCP_CKSUM|
+   RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+
dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
dev_info->default_txconf = (struct rte_eth_txconf) {
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index a980714060..f15e61a785 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1209,6 +1209,73 @@ idpf_stop_queues(struct rte_eth_dev *dev)
}
 }
 
+#define IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S   \
+   (RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S) | \
+RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S) | \
+RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S) |\
+RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S))
+
+static inline uint64_t
+idpf_splitq_rx_csum_offload(uint8_t err)
+{
+   uint64_t flags = 0;
+
+   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_S)) == 0))
+   return flags;
+
+   if (likely((err & IDPF_RX_FLEX_DESC_ADV_STATUS0_XSUM_S) == 0)) {
+   flags |= (RTE_MBUF_F_RX_IP_CKSUM_GOOD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD);
+   return flags;
+   }
+
+   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_S)) != 0))
+   flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
+   else
+   flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
+
+   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_S)) != 0))
+   flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
+   else
+   flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
+
+   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_S)) != 0))
+   flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
+
+   if (unlikely((err & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EUDPE_S)) != 0))
+   flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
+   else
+   flags |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD;
+
+   return flags;
+}
+
+#define IDPF_RX_FLEX_DESC_ADV_HASH1_S  0
+#define IDPF_RX_FLEX_DESC_ADV_HASH2_S  16
+#define IDPF_RX_FLEX_DESC_ADV_HASH3_S  24
+
+static inline uint64_t
+idpf_splitq_rx_rss_offload(struct rte_mbuf *mb,
+  volatile struct virtchnl2_rx_flex_desc_adv_nic_3 
*rx_desc)
+{
+   uint8_t status_err0_qw0;
+   uint64_t flags = 0;
+
+   status_err0_qw0 = rx_desc->status_err0_qw0;
+
+   if ((status_err0_qw0 & 
RTE_BIT32(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_RSS_VALID_S)) != 0) {
+   flags |= RTE_MBUF_F_RX_RSS_HASH;
+   mb->hash.rss = (rte_le_to_cpu_16(rx_desc->hash1) <<
+   IDPF_RX_FLEX_DESC_ADV_HASH1_S) |
+   ((uint32_t)(rx_desc->ff2_mirrid_hash2.hash2) <<
+IDPF_RX_FLEX_DESC_ADV_HASH2_S) |
+   ((uint32_t)(rx_desc->hash3) <<
+IDPF_RX_FLEX_DESC_ADV_HASH3_S);
+   }
+
+   return flags;
+}
+
 static void
 idpf_split_rx_bufq_refill(struct idpf_rx_queue *rx_bufq)
 {
@@ -1282,9 +1349,11 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
uint16_t pktlen_gen_bufq_id;
struct idpf_rx_queue *rxq;
const uint32_t *ptype_tbl;
+   uint8_t status_err0_qw1;
struct rte_mb

[PATCH v15 16/18] net/idpf: add support for Tx offloading

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add Tx offloading support:
 - support TSO for single queue model and split queue model.

Signed-off-by: Beilei Xing 
Signed-off-by: Xiaoyun Li 
Signed-off-by: Junfeng Guo 
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c|   4 +-
 drivers/net/idpf/idpf_rxtx.c  | 128 +-
 drivers/net/idpf/idpf_rxtx.h  |  22 +
 4 files changed, 152 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
index 868571654f..d82b4aa0ff 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -8,6 +8,7 @@
 ;
 [Features]
 MTU update   = Y
+TSO  = P
 L3 checksum offload  = P
 L4 checksum offload  = P
 Linux= Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index a09f104425..084426260c 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -67,7 +67,9 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
RTE_ETH_RX_OFFLOAD_TCP_CKSUM|
RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
 
-   dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+   dev_info->tx_offload_capa =
+   RTE_ETH_TX_OFFLOAD_TCP_TSO  |
+   RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index f15e61a785..cc296d7ab1 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -1506,6 +1506,49 @@ idpf_split_tx_free(struct idpf_tx_queue *cq)
cq->tx_tail = next;
 }
 
+/* Check if the context descriptor is needed for TX offloading */
+static inline uint16_t
+idpf_calc_context_desc(uint64_t flags)
+{
+   if ((flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
+   return 1;
+
+   return 0;
+}
+
+/* set TSO context descriptor
+ */
+static inline void
+idpf_set_splitq_tso_ctx(struct rte_mbuf *mbuf,
+   union idpf_tx_offload tx_offload,
+   volatile union idpf_flex_tx_ctx_desc *ctx_desc)
+{
+   uint16_t cmd_dtype;
+   uint32_t tso_len;
+   uint8_t hdr_len;
+
+   if (tx_offload.l4_len == 0) {
+   PMD_TX_LOG(DEBUG, "L4 length set to 0");
+   return;
+   }
+
+   hdr_len = tx_offload.l2_len +
+   tx_offload.l3_len +
+   tx_offload.l4_len;
+   cmd_dtype = IDPF_TX_DESC_DTYPE_FLEX_TSO_CTX |
+   IDPF_TX_FLEX_CTX_DESC_CMD_TSO;
+   tso_len = mbuf->pkt_len - hdr_len;
+
+   ctx_desc->tso.qw1.cmd_dtype = rte_cpu_to_le_16(cmd_dtype);
+   ctx_desc->tso.qw0.hdr_len = hdr_len;
+   ctx_desc->tso.qw0.mss_rt =
+   rte_cpu_to_le_16((uint16_t)mbuf->tso_segsz &
+IDPF_TXD_FLEX_CTX_MSS_RT_M);
+   ctx_desc->tso.qw0.flex_tlen =
+   rte_cpu_to_le_32(tso_len &
+IDPF_TXD_FLEX_CTX_MSS_RT_M);
+}
+
 uint16_t
 idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
  uint16_t nb_pkts)
@@ -1514,11 +1557,14 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
volatile struct idpf_flex_tx_sched_desc *txr;
volatile struct idpf_flex_tx_sched_desc *txd;
struct idpf_tx_entry *sw_ring;
+   union idpf_tx_offload tx_offload = {0};
struct idpf_tx_entry *txe, *txn;
uint16_t nb_used, tx_id, sw_id;
struct rte_mbuf *tx_pkt;
uint16_t nb_to_clean;
uint16_t nb_tx = 0;
+   uint64_t ol_flags;
+   uint16_t nb_ctx;
 
if (unlikely(txq == NULL) || unlikely(!txq->q_started))
return nb_tx;
@@ -1548,7 +1594,29 @@ idpf_splitq_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts,
 
if (txq->nb_free < tx_pkt->nb_segs)
break;
-   nb_used = tx_pkt->nb_segs;
+
+   ol_flags = tx_pkt->ol_flags;
+   tx_offload.l2_len = tx_pkt->l2_len;
+   tx_offload.l3_len = tx_pkt->l3_len;
+   tx_offload.l4_len = tx_pkt->l4_len;
+   tx_offload.tso_segsz = tx_pkt->tso_segsz;
+   /* Calculate the number of context descriptors needed. */
+   nb_ctx = idpf_calc_context_desc(ol_flags);
+   nb_used = tx_pkt->nb_segs + nb_ctx;
+
+   /* context descriptor */
+   if (nb_ctx != 0) {
+   volatile union idpf_flex_tx_ctx_desc *ctx_desc =
+   (volatile union idpf_flex_tx_ctx_desc *)&txr[tx_id];
+
+   if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
+   idpf_set_splitq_tso_ctx(tx_pkt, tx_offload,
+   

[PATCH v15 17/18] net/idpf: add AVX512 data path for single queue model

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support of AVX512 vector data path for single queue model.

Signed-off-by: Wenjun Wu 
Signed-off-by: Junfeng Guo 
---
 doc/guides/nics/idpf.rst|  19 +
 drivers/net/idpf/idpf_ethdev.c  |   3 +-
 drivers/net/idpf/idpf_ethdev.h  |   5 +
 drivers/net/idpf/idpf_rxtx.c| 145 
 drivers/net/idpf/idpf_rxtx.h|  21 +
 drivers/net/idpf/idpf_rxtx_vec_avx512.c | 871 
 drivers/net/idpf/idpf_rxtx_vec_common.h | 100 +++
 drivers/net/idpf/meson.build|  28 +
 8 files changed, 1191 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/idpf/idpf_rxtx_vec_avx512.c
 create mode 100644 drivers/net/idpf/idpf_rxtx_vec_common.h

diff --git a/doc/guides/nics/idpf.rst b/doc/guides/nics/idpf.rst
index c1001d5d0c..3039c61748 100644
--- a/doc/guides/nics/idpf.rst
+++ b/doc/guides/nics/idpf.rst
@@ -64,3 +64,22 @@ Refer to the document :ref:`compiling and testing a PMD for 
a NIC tx_offload_capa =
RTE_ETH_TX_OFFLOAD_TCP_TSO  |
-   RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+   RTE_ETH_TX_OFFLOAD_MULTI_SEGS   |
+   RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = IDPF_DEFAULT_TX_FREE_THRESH,
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 8d0804f603..7d54e5db60 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -162,6 +162,11 @@ struct idpf_adapter {
uint32_t max_txq_per_msg;
 
uint32_t ptype_tbl[IDPF_MAX_PKT_TYPE] __rte_cache_min_aligned;
+
+   bool rx_vec_allowed;
+   bool tx_vec_allowed;
+   bool rx_use_avx512;
+   bool tx_use_avx512;
 };
 
 TAILQ_HEAD(idpf_adapter_list, idpf_adapter);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index cc296d7ab1..9e20f2b9d3 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -4,9 +4,11 @@
 
 #include 
 #include 
+#include 
 
 #include "idpf_ethdev.h"
 #include "idpf_rxtx.h"
+#include "idpf_rxtx_vec_common.h"
 
 static int
 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
@@ -252,6 +254,8 @@ reset_single_rx_queue(struct idpf_rx_queue *rxq)
 
rxq->pkt_first_seg = NULL;
rxq->pkt_last_seg = NULL;
+   rxq->rxrearm_start = 0;
+   rxq->rxrearm_nb = 0;
 }
 
 static void
@@ -2073,25 +2077,166 @@ idpf_prep_pkts(__rte_unused void *tx_queue, struct 
rte_mbuf **tx_pkts,
return i;
 }
 
+static void __rte_cold
+release_rxq_mbufs_vec(struct idpf_rx_queue *rxq)
+{
+   const uint16_t mask = rxq->nb_rx_desc - 1;
+   uint16_t i;
+
+   if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc)
+   return;
+
+   /* free all mbufs that are valid in the ring */
+   if (rxq->rxrearm_nb == 0) {
+   for (i = 0; i < rxq->nb_rx_desc; i++) {
+   if (rxq->sw_ring[i] != NULL)
+   rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+   }
+   } else {
+   for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & 
mask) {
+   if (rxq->sw_ring[i] != NULL)
+   rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+   }
+   }
+
+   rxq->rxrearm_nb = rxq->nb_rx_desc;
+
+   /* set all entries to NULL */
+   memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc);
+}
+
+static const struct idpf_rxq_ops def_singleq_rx_ops_vec = {
+   .release_mbufs = release_rxq_mbufs_vec,
+};
+
+static inline int
+idpf_singleq_rx_vec_setup_default(struct idpf_rx_queue *rxq)
+{
+   uintptr_t p;
+   struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
+
+   mb_def.nb_segs = 1;
+   mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+   mb_def.port = rxq->port_id;
+   rte_mbuf_refcnt_set(&mb_def, 1);
+
+   /* prevent compiler reordering: rearm_data covers previous fields */
+   rte_compiler_barrier();
+   p = (uintptr_t)&mb_def.rearm_data;
+   rxq->mbuf_initializer = *(uint64_t *)p;
+   return 0;
+}
+
+int __rte_cold
+idpf_singleq_rx_vec_setup(struct idpf_rx_queue *rxq)
+{
+   rxq->ops = &def_singleq_rx_ops_vec;
+   return idpf_singleq_rx_vec_setup_default(rxq);
+}
+
 void
 idpf_set_rx_function(struct rte_eth_dev *dev)
 {
struct idpf_vport *vport = dev->data->dev_private;
+#ifdef RTE_ARCH_X86
+   struct idpf_adapter *ad = vport->adapter;
+   struct idpf_rx_queue *rxq;
+   int i;
+
+   if (idpf_rx_vec_dev_check_default(dev) == IDPF_VECTOR_PATH &&
+   rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
+   ad->rx_vec_allowed = true;
+
+   if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
+#ifdef CC_AVX512_SUPPORT
+   if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 
&&
+  

[PATCH v15 18/18] net/idpf: add support for timestamp offload

2022-10-28 Thread beilei . xing
From: Junfeng Guo 

Add support for timestamp offload.

Signed-off-by: Wenjing Qiao 
Signed-off-by: Junfeng Guo 
---
 doc/guides/nics/features/idpf.ini |  1 +
 drivers/net/idpf/idpf_ethdev.c|  5 +-
 drivers/net/idpf/idpf_ethdev.h|  3 ++
 drivers/net/idpf/idpf_rxtx.c  | 65 ++
 drivers/net/idpf/idpf_rxtx.h  | 90 +++
 5 files changed, 163 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
index d82b4aa0ff..099fd7f216 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -11,6 +11,7 @@ MTU update   = Y
 TSO  = P
 L3 checksum offload  = P
 L4 checksum offload  = P
+Timestamp offload= P
 Linux= Y
 x86-32   = Y
 x86-64   = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index cd4ebcc2c6..50aac65daf 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -22,6 +22,8 @@ rte_spinlock_t idpf_adapter_lock;
 struct idpf_adapter_list idpf_adapter_list;
 bool idpf_adapter_list_init;
 
+uint64_t idpf_timestamp_dynflag;
+
 static const char * const idpf_valid_args[] = {
IDPF_TX_SINGLE_Q,
IDPF_RX_SINGLE_Q,
@@ -65,7 +67,8 @@ idpf_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
RTE_ETH_RX_OFFLOAD_IPV4_CKSUM   |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM|
RTE_ETH_RX_OFFLOAD_TCP_CKSUM|
-   RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+   RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+   RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
dev_info->tx_offload_capa =
RTE_ETH_TX_OFFLOAD_TCP_TSO  |
diff --git a/drivers/net/idpf/idpf_ethdev.h b/drivers/net/idpf/idpf_ethdev.h
index 7d54e5db60..ccdf4abe40 100644
--- a/drivers/net/idpf/idpf_ethdev.h
+++ b/drivers/net/idpf/idpf_ethdev.h
@@ -167,6 +167,9 @@ struct idpf_adapter {
bool tx_vec_allowed;
bool rx_use_avx512;
bool tx_use_avx512;
+
+   /* For PTP */
+   uint64_t time_hw;
 };
 
 TAILQ_HEAD(idpf_adapter_list, idpf_adapter);
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index 9e20f2b9d3..bafa007faf 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -10,6 +10,8 @@
 #include "idpf_rxtx.h"
 #include "idpf_rxtx_vec_common.h"
 
+static int idpf_timestamp_dynfield_offset = -1;
+
 static int
 check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
 {
@@ -900,6 +902,24 @@ idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
return idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,
 socket_id, tx_conf);
 }
+
+static int
+idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)
+{
+   int err;
+   if ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0) {
+   /* Register mbuf field and flag for Rx timestamp */
+   err = 
rte_mbuf_dyn_rx_timestamp_register(&idpf_timestamp_dynfield_offset,
+
&idpf_timestamp_dynflag);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR,
+   "Cannot register mbuf field/flag for 
timestamp");
+   return -EINVAL;
+   }
+   }
+   return 0;
+}
+
 static int
 idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
 {
@@ -993,6 +1013,13 @@ idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t 
rx_queue_id)
return -EINVAL;
}
 
+   err = idpf_register_ts_mbuf(rxq);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "fail to regidter timestamp mbuf %u",
+   rx_queue_id);
+   return -EIO;
+   }
+
if (rxq->bufq1 == NULL) {
/* Single queue */
err = idpf_alloc_single_rxq_mbufs(rxq);
@@ -1354,6 +1381,7 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
struct idpf_rx_queue *rxq;
const uint32_t *ptype_tbl;
uint8_t status_err0_qw1;
+   struct idpf_adapter *ad;
struct rte_mbuf *rxm;
uint16_t rx_id_bufq1;
uint16_t rx_id_bufq2;
@@ -1363,9 +1391,11 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
uint16_t gen_id;
uint16_t rx_id;
uint16_t nb_rx;
+   uint64_t ts_ns;
 
nb_rx = 0;
rxq = rx_queue;
+   ad = rxq->adapter;
 
if (unlikely(rxq == NULL) || unlikely(!rxq->q_started))
return nb_rx;
@@ -1376,6 +1406,9 @@ idpf_splitq_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
rx_desc_ring = rxq->rx_ring;
ptype_tbl = rxq->adapter->ptype_tbl;
 
+   if ((rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
+   rxq->hw_register