[PATCH] net: increase the maximum of RX/TX descriptors

2024-10-29 Thread Lukas Sismis
Intel PMDs are capped by default to only 4096 RX/TX descriptors.
This can be limiting for applications requiring a bigger buffer
capabilities. The cap prevented the applications to configure
more descriptors. By bufferring more packets with RX/TX
descriptors, the applications can better handle the processing
peaks.

Signed-off-by: Lukas Sismis 
---
 doc/guides/nics/ixgbe.rst| 2 +-
 drivers/net/cpfl/cpfl_rxtx.h | 2 +-
 drivers/net/e1000/e1000_ethdev.h | 2 +-
 drivers/net/iavf/iavf_rxtx.h | 2 +-
 drivers/net/ice/ice_rxtx.h   | 2 +-
 drivers/net/idpf/idpf_rxtx.h | 2 +-
 drivers/net/ixgbe/ixgbe_ethdev.c | 2 +-
 drivers/net/ixgbe/ixgbe_rxtx.h   | 2 +-
 8 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 14573b542e..291b33d699 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -76,7 +76,7 @@ Scattered packets are not supported in this mode.
 If an incoming packet is greater than the maximum acceptable length of one 
"mbuf" data size (by default, the size is 2 KB),
 vPMD for RX would be disabled.
 
-By default, IXGBE_MAX_RING_DESC is set to 4096 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
+By default, IXGBE_MAX_RING_DESC is set to 32768 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
 
 Windows Prerequisites and Pre-conditions
 
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index aacd087b56..4db4025771 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -11,7 +11,7 @@
 /* In QLEN must be whole number of 32 descriptors. */
 #define CPFL_ALIGN_RING_DESC   32
 #define CPFL_MIN_RING_DESC 32
-#define CPFL_MAX_RING_DESC 4096
+#define CPFL_MAX_RING_DESC 32768
 #define CPFL_DMA_MEM_ALIGN 4096
 
 #define CPFL_MAX_HAIRPINQ_RX_2_TX  1
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 339ae1f4b6..e9046047f6 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -107,7 +107,7 @@
  * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
  */
 #defineE1000_MIN_RING_DESC 32
-#defineE1000_MAX_RING_DESC 4096
+#defineE1000_MAX_RING_DESC 32768
 
 /*
  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 7b56076d32..f9c129f0ef 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -8,7 +8,7 @@
 /* In QLEN must be whole number of 32 descriptors. */
 #define IAVF_ALIGN_RING_DESC  32
 #define IAVF_MIN_RING_DESC64
-#define IAVF_MAX_RING_DESC4096
+#define IAVF_MAX_RING_DESC32768
 #define IAVF_DMA_MEM_ALIGN4096
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define IAVF_RING_BASE_ALIGN  128
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f7276cfc9f..6d18fe908d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -9,7 +9,7 @@
 
 #define ICE_ALIGN_RING_DESC  32
 #define ICE_MIN_RING_DESC64
-#define ICE_MAX_RING_DESC4096
+#define ICE_MAX_RING_DESC32768
 #define ICE_DMA_MEM_ALIGN4096
 #define ICE_RING_BASE_ALIGN  128
 
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 41a7495083..0f78f7cba5 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -11,7 +11,7 @@
 /* In QLEN must be whole number of 32 descriptors. */
 #define IDPF_ALIGN_RING_DESC   32
 #define IDPF_MIN_RING_DESC 32
-#define IDPF_MAX_RING_DESC 4096
+#define IDPF_MAX_RING_DESC 32768
 #define IDPF_DMA_MEM_ALIGN 4096
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define IDPF_RING_BASE_ALIGN   128
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7da2ccf6a8..a2637f0a91 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -73,7 +73,7 @@
 
 #define IXGBE_MMW_SIZE_DEFAULT0x4
 #define IXGBE_MMW_SIZE_JUMBO_FRAME0x14
-#define IXGBE_MAX_RING_DESC   4096 /* replicate define from rxtx */
+#define IXGBE_MAX_RING_DESC   32768 /* replicate define from rxtx */
 
 /*
  *  Default values for RX/TX configuration
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ee89c89929..a28037b08a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -25,7 +25,7 @@
  *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
  */
 #defineIXGBE_MIN_RING_DESC 32
-#defineIXGBE_MAX_RING_DESC 4096
+#defineIXGBE_MAX_RING_DESC 32768
 
 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
-- 
2.34.1



[PATCH] net: increase the maximum of RX/TX descriptors

2024-10-29 Thread Lukas Sismis
Intel PMDs are capped by default to only 4096 RX/TX descriptors.
This can be limiting for applications requiring a bigger buffer
capabilities. The cap prevented the applications to configure
more descriptors. By bufferring more packets with RX/TX
descriptors, the applications can better handle the processing
peaks.

Signed-off-by: Lukas Sismis 
---
 doc/guides/nics/ixgbe.rst| 2 +-
 drivers/net/cpfl/cpfl_rxtx.h | 2 +-
 drivers/net/e1000/e1000_ethdev.h | 2 +-
 drivers/net/iavf/iavf_rxtx.h | 2 +-
 drivers/net/ice/ice_rxtx.h   | 2 +-
 drivers/net/idpf/idpf_rxtx.h | 2 +-
 drivers/net/ixgbe/ixgbe_ethdev.c | 2 +-
 drivers/net/ixgbe/ixgbe_rxtx.h   | 2 +-
 8 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 14573b542e..291b33d699 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -76,7 +76,7 @@ Scattered packets are not supported in this mode.
 If an incoming packet is greater than the maximum acceptable length of one 
"mbuf" data size (by default, the size is 2 KB),
 vPMD for RX would be disabled.
 
-By default, IXGBE_MAX_RING_DESC is set to 4096 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
+By default, IXGBE_MAX_RING_DESC is set to 32768 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
 
 Windows Prerequisites and Pre-conditions
 
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index aacd087b56..4db4025771 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -11,7 +11,7 @@
 /* In QLEN must be whole number of 32 descriptors. */
 #define CPFL_ALIGN_RING_DESC   32
 #define CPFL_MIN_RING_DESC 32
-#define CPFL_MAX_RING_DESC 4096
+#define CPFL_MAX_RING_DESC 32768
 #define CPFL_DMA_MEM_ALIGN 4096
 
 #define CPFL_MAX_HAIRPINQ_RX_2_TX  1
diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h
index 339ae1f4b6..e9046047f6 100644
--- a/drivers/net/e1000/e1000_ethdev.h
+++ b/drivers/net/e1000/e1000_ethdev.h
@@ -107,7 +107,7 @@
  * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
  */
 #defineE1000_MIN_RING_DESC 32
-#defineE1000_MAX_RING_DESC 4096
+#defineE1000_MAX_RING_DESC 32768
 
 /*
  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
index 7b56076d32..f9c129f0ef 100644
--- a/drivers/net/iavf/iavf_rxtx.h
+++ b/drivers/net/iavf/iavf_rxtx.h
@@ -8,7 +8,7 @@
 /* In QLEN must be whole number of 32 descriptors. */
 #define IAVF_ALIGN_RING_DESC  32
 #define IAVF_MIN_RING_DESC64
-#define IAVF_MAX_RING_DESC4096
+#define IAVF_MAX_RING_DESC32768
 #define IAVF_DMA_MEM_ALIGN4096
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define IAVF_RING_BASE_ALIGN  128
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f7276cfc9f..6d18fe908d 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -9,7 +9,7 @@
 
 #define ICE_ALIGN_RING_DESC  32
 #define ICE_MIN_RING_DESC64
-#define ICE_MAX_RING_DESC4096
+#define ICE_MAX_RING_DESC32768
 #define ICE_DMA_MEM_ALIGN4096
 #define ICE_RING_BASE_ALIGN  128
 
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index 41a7495083..0f78f7cba5 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -11,7 +11,7 @@
 /* In QLEN must be whole number of 32 descriptors. */
 #define IDPF_ALIGN_RING_DESC   32
 #define IDPF_MIN_RING_DESC 32
-#define IDPF_MAX_RING_DESC 4096
+#define IDPF_MAX_RING_DESC 32768
 #define IDPF_DMA_MEM_ALIGN 4096
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define IDPF_RING_BASE_ALIGN   128
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7da2ccf6a8..a2637f0a91 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -73,7 +73,7 @@
 
 #define IXGBE_MMW_SIZE_DEFAULT0x4
 #define IXGBE_MMW_SIZE_JUMBO_FRAME0x14
-#define IXGBE_MAX_RING_DESC   4096 /* replicate define from rxtx */
+#define IXGBE_MAX_RING_DESC   32768 /* replicate define from rxtx */
 
 /*
  *  Default values for RX/TX configuration
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ee89c89929..a28037b08a 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -25,7 +25,7 @@
  *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
  */
 #defineIXGBE_MIN_RING_DESC 32
-#defineIXGBE_MAX_RING_DESC 4096
+#defineIXGBE_MAX_RING_DESC 32768
 
 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
-- 
2.34.1



[PATCH v3 1/1] net/bonding: make bonding functions stable

2024-10-29 Thread Lukas Sismis
Remove rte_experimental macros from the stable functions
as they have been part of the stable API since 23.11.

Signed-off-by: Lukas Sismis 
---
 drivers/net/bonding/rte_eth_bond.h|  4 
 drivers/net/bonding/rte_eth_bond_8023ad.h |  1 -
 drivers/net/bonding/version.map   | 15 +--
 3 files changed, 5 insertions(+), 15 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond.h 
b/drivers/net/bonding/rte_eth_bond.h
index e59ff8793e..4f79ff9b85 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -125,7 +125,6 @@ rte_eth_bond_free(const char *name);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -138,7 +137,6 @@ rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t 
member_port_id);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -199,7 +197,6 @@ rte_eth_bond_primary_get(uint16_t bonding_port_id);
  * Number of members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
@@ -216,7 +213,6 @@ rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t 
members[],
  * Number of active members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h 
b/drivers/net/bonding/rte_eth_bond_8023ad.h
index b2deb26e2e..5432eafcfe 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -193,7 +193,6 @@ rte_eth_bond_8023ad_setup(uint16_t port_id,
  *   -EINVAL if conf is NULL or member id is invalid (not a member of given
  *   bonding device or is not inactive).
  */
-__rte_experimental
 int
 rte_eth_bond_8023ad_member_info(uint16_t port_id, uint16_t member_id,
struct rte_eth_bond_8023ad_member_info *conf);
diff --git a/drivers/net/bonding/version.map b/drivers/net/bonding/version.map
index a309469b1f..eb37dadf76 100644
--- a/drivers/net/bonding/version.map
+++ b/drivers/net/bonding/version.map
@@ -11,12 +11,17 @@ DPDK_25 {
rte_eth_bond_8023ad_ext_distrib;
rte_eth_bond_8023ad_ext_distrib_get;
rte_eth_bond_8023ad_ext_slowtx;
+   rte_eth_bond_8023ad_member_info;
rte_eth_bond_8023ad_setup;
+   rte_eth_bond_active_members_get;
rte_eth_bond_create;
rte_eth_bond_free;
rte_eth_bond_link_monitoring_set;
rte_eth_bond_mac_address_reset;
rte_eth_bond_mac_address_set;
+   rte_eth_bond_member_add;
+   rte_eth_bond_member_remove;
+   rte_eth_bond_members_get;
rte_eth_bond_mode_get;
rte_eth_bond_mode_set;
rte_eth_bond_primary_get;
@@ -26,13 +31,3 @@ DPDK_25 {
 
local: *;
 };
-
-EXPERIMENTAL {
-   # added in 23.11
-   global:
-   rte_eth_bond_8023ad_member_info;
-   rte_eth_bond_active_members_get;
-   rte_eth_bond_member_add;
-   rte_eth_bond_member_remove;
-   rte_eth_bond_members_get;
-};
-- 
2.34.1



[PATCH] net/bonding: make bonding functions stable

2024-10-29 Thread Lukas Sismis
Remove rte_experimental macros from the stable functions
as they have been part of the stable API since 23.11.

Signed-off-by: Lukas Sismis 
---
 drivers/net/bonding/rte_eth_bond.h|  4 
 drivers/net/bonding/rte_eth_bond_8023ad.h |  1 -
 drivers/net/bonding/version.map   | 10 --
 3 files changed, 15 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond.h 
b/drivers/net/bonding/rte_eth_bond.h
index e59ff8793e..4f79ff9b85 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -125,7 +125,6 @@ rte_eth_bond_free(const char *name);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -138,7 +137,6 @@ rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t 
member_port_id);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -199,7 +197,6 @@ rte_eth_bond_primary_get(uint16_t bonding_port_id);
  * Number of members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
@@ -216,7 +213,6 @@ rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t 
members[],
  * Number of active members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h 
b/drivers/net/bonding/rte_eth_bond_8023ad.h
index b2deb26e2e..5432eafcfe 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -193,7 +193,6 @@ rte_eth_bond_8023ad_setup(uint16_t port_id,
  *   -EINVAL if conf is NULL or member id is invalid (not a member of given
  *   bonding device or is not inactive).
  */
-__rte_experimental
 int
 rte_eth_bond_8023ad_member_info(uint16_t port_id, uint16_t member_id,
struct rte_eth_bond_8023ad_member_info *conf);
diff --git a/drivers/net/bonding/version.map b/drivers/net/bonding/version.map
index a309469b1f..68b892d1e6 100644
--- a/drivers/net/bonding/version.map
+++ b/drivers/net/bonding/version.map
@@ -26,13 +26,3 @@ DPDK_25 {
 
local: *;
 };
-
-EXPERIMENTAL {
-   # added in 23.11
-   global:
-   rte_eth_bond_8023ad_member_info;
-   rte_eth_bond_active_members_get;
-   rte_eth_bond_member_add;
-   rte_eth_bond_member_remove;
-   rte_eth_bond_members_get;
-};
-- 
2.34.1



[PATCH] net/bonding: make bonding functions stable

2024-10-29 Thread Lukas Sismis
Remove rte_experimental macros from the stable functions
as they have been part of the stable API since 22.11.

Signed-off-by: Lukas Sismis 
---
 drivers/net/bonding/rte_eth_bond.h | 4 
 1 file changed, 4 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond.h 
b/drivers/net/bonding/rte_eth_bond.h
index e59ff8793e..4f79ff9b85 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -125,7 +125,6 @@ rte_eth_bond_free(const char *name);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -138,7 +137,6 @@ rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t 
member_port_id);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -199,7 +197,6 @@ rte_eth_bond_primary_get(uint16_t bonding_port_id);
  * Number of members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
@@ -216,7 +213,6 @@ rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t 
members[],
  * Number of active members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
-- 
2.34.1



[PATCH v2 2/2] net/ice: increase the maximum of RX/TX descriptors

2024-10-30 Thread Lukas Sismis
Intel PMDs are capped by default to only 4096 RX/TX descriptors.
This can be limiting for applications requiring a bigger buffer
capabilities. By bufferring more packets with RX/TX
descriptors, the applications can better handle the processing
peaks.

Setting ice max descriptors to 8192 - 32 as per datasheet:
Register name: QLEN (Rx-Queue)
Description: Receive Queue Length
Defines the size of the descriptor queue in descriptors units
from eight descriptors (QLEN=0x8) up to 8K descriptors minus
32 (QLEN=0x1FE0).
QLEN Restrictions: When the PXE_MODE flag in the
GLLAN_RCTL_0 register is cleared, the QLEN must be whole
number of 32 descriptors. When the PXE_MODE flag is set, the
QLEN can be one of the following options:
Up to 4 PFs, QLEN can be set to: 8, 16, 24 or 32 descriptors.
Up to 8 PFs, QLEN can be set to: 8 or 16 descriptors

Signed-off-by: Lukas Sismis 
---
 drivers/net/ice/ice_rxtx.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f7276cfc9f..b43f9fcd1b 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -9,7 +9,7 @@
 
 #define ICE_ALIGN_RING_DESC  32
 #define ICE_MIN_RING_DESC64
-#define ICE_MAX_RING_DESC4096
+#define ICE_MAX_RING_DESC8192 - 32
 #define ICE_DMA_MEM_ALIGN4096
 #define ICE_RING_BASE_ALIGN  128
 
-- 
2.34.1



[PATCH v2 1/2] net/ixgbe: increase the maximum of RX/TX descriptors

2024-10-30 Thread Lukas Sismis
Intel PMDs are capped by default to only 4096 RX/TX descriptors.
This can be limiting for applications requiring a bigger buffer
capabilities. By bufferring more packets with RX/TX
descriptors, the applications can better handle the processing
peaks.

Setting ixgbe max descriptors to 8192 as per datasheet:
Register name: RDLEN
Description: Descriptor Ring Length.
This register sets the number of bytes
allocated for descriptors in the circular descriptor buffer.
It must be 128B aligned (7 LS bit must be set to zero).
** Note: validated Lengths up to 128K (8K descriptors). **

Signed-off-by: Lukas Sismis 
---
 doc/guides/nics/ixgbe.rst| 2 +-
 drivers/net/ixgbe/ixgbe_ethdev.c | 2 +-
 drivers/net/ixgbe/ixgbe_rxtx.h   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 14573b542e..c5c6a6c34b 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -76,7 +76,7 @@ Scattered packets are not supported in this mode.
 If an incoming packet is greater than the maximum acceptable length of one 
"mbuf" data size (by default, the size is 2 KB),
 vPMD for RX would be disabled.
 
-By default, IXGBE_MAX_RING_DESC is set to 4096 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
+By default, IXGBE_MAX_RING_DESC is set to 8192 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
 
 Windows Prerequisites and Pre-conditions
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7da2ccf6a8..da9b3d7ca7 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -73,7 +73,7 @@
 
 #define IXGBE_MMW_SIZE_DEFAULT0x4
 #define IXGBE_MMW_SIZE_JUMBO_FRAME0x14
-#define IXGBE_MAX_RING_DESC   4096 /* replicate define from rxtx */
+#define IXGBE_MAX_RING_DESC   8192 /* replicate define from rxtx */
 
 /*
  *  Default values for RX/TX configuration
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ee89c89929..0550c1da60 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -25,7 +25,7 @@
  *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
  */
 #defineIXGBE_MIN_RING_DESC 32
-#defineIXGBE_MAX_RING_DESC 4096
+#defineIXGBE_MAX_RING_DESC 8192
 
 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
-- 
2.34.1



[PATCH v3 1/1] net/bonding: make bonding functions stable

2024-10-30 Thread Lukas Sismis
Remove rte_experimental macros from the stable functions
as they have been part of the stable API since 23.11.

Signed-off-by: Lukas Sismis 
---
 drivers/net/bonding/rte_eth_bond.h|  4 
 drivers/net/bonding/rte_eth_bond_8023ad.h |  1 -
 drivers/net/bonding/version.map   | 15 +--
 3 files changed, 5 insertions(+), 15 deletions(-)

diff --git a/drivers/net/bonding/rte_eth_bond.h 
b/drivers/net/bonding/rte_eth_bond.h
index e59ff8793e..4f79ff9b85 100644
--- a/drivers/net/bonding/rte_eth_bond.h
+++ b/drivers/net/bonding/rte_eth_bond.h
@@ -125,7 +125,6 @@ rte_eth_bond_free(const char *name);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -138,7 +137,6 @@ rte_eth_bond_member_add(uint16_t bonding_port_id, uint16_t 
member_port_id);
  * @return
  * 0 on success, negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_member_remove(uint16_t bonding_port_id, uint16_t member_port_id);
 
@@ -199,7 +197,6 @@ rte_eth_bond_primary_get(uint16_t bonding_port_id);
  * Number of members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
@@ -216,7 +213,6 @@ rte_eth_bond_members_get(uint16_t bonding_port_id, uint16_t 
members[],
  * Number of active members associated with bonding device on success,
  * negative value otherwise
  */
-__rte_experimental
 int
 rte_eth_bond_active_members_get(uint16_t bonding_port_id, uint16_t members[],
uint16_t len);
diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h 
b/drivers/net/bonding/rte_eth_bond_8023ad.h
index b2deb26e2e..5432eafcfe 100644
--- a/drivers/net/bonding/rte_eth_bond_8023ad.h
+++ b/drivers/net/bonding/rte_eth_bond_8023ad.h
@@ -193,7 +193,6 @@ rte_eth_bond_8023ad_setup(uint16_t port_id,
  *   -EINVAL if conf is NULL or member id is invalid (not a member of given
  *   bonding device or is not inactive).
  */
-__rte_experimental
 int
 rte_eth_bond_8023ad_member_info(uint16_t port_id, uint16_t member_id,
struct rte_eth_bond_8023ad_member_info *conf);
diff --git a/drivers/net/bonding/version.map b/drivers/net/bonding/version.map
index a309469b1f..eb37dadf76 100644
--- a/drivers/net/bonding/version.map
+++ b/drivers/net/bonding/version.map
@@ -11,12 +11,17 @@ DPDK_25 {
rte_eth_bond_8023ad_ext_distrib;
rte_eth_bond_8023ad_ext_distrib_get;
rte_eth_bond_8023ad_ext_slowtx;
+   rte_eth_bond_8023ad_member_info;
rte_eth_bond_8023ad_setup;
+   rte_eth_bond_active_members_get;
rte_eth_bond_create;
rte_eth_bond_free;
rte_eth_bond_link_monitoring_set;
rte_eth_bond_mac_address_reset;
rte_eth_bond_mac_address_set;
+   rte_eth_bond_member_add;
+   rte_eth_bond_member_remove;
+   rte_eth_bond_members_get;
rte_eth_bond_mode_get;
rte_eth_bond_mode_set;
rte_eth_bond_primary_get;
@@ -26,13 +31,3 @@ DPDK_25 {
 
local: *;
 };
-
-EXPERIMENTAL {
-   # added in 23.11
-   global:
-   rte_eth_bond_8023ad_member_info;
-   rte_eth_bond_active_members_get;
-   rte_eth_bond_member_add;
-   rte_eth_bond_member_remove;
-   rte_eth_bond_members_get;
-};
-- 
2.34.1



[PATCH v3 2/2] net/ice: increase the maximum of RX/TX descriptors

2024-10-30 Thread Lukas Sismis
Intel PMDs are capped by default to only 4096 RX/TX descriptors.
This can be limiting for applications requiring a bigger buffer
capabilities. By bufferring more packets with RX/TX
descriptors, the applications can better handle the processing
peaks.

Setting ice max descriptors to 8192 - 32 as per datasheet:
Register name: QLEN (Rx-Queue)
Description: Receive Queue Length
Defines the size of the descriptor queue in descriptors units
from eight descriptors (QLEN=0x8) up to 8K descriptors minus
32 (QLEN=0x1FE0).
QLEN Restrictions: When the PXE_MODE flag in the
GLLAN_RCTL_0 register is cleared, the QLEN must be whole
number of 32 descriptors. When the PXE_MODE flag is set, the
QLEN can be one of the following options:
Up to 4 PFs, QLEN can be set to: 8, 16, 24 or 32 descriptors.
Up to 8 PFs, QLEN can be set to: 8 or 16 descriptors

Signed-off-by: Lukas Sismis 
---
 drivers/net/ice/ice_rxtx.h | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index f7276cfc9f..45f25b3609 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -9,7 +9,7 @@
 
 #define ICE_ALIGN_RING_DESC  32
 #define ICE_MIN_RING_DESC64
-#define ICE_MAX_RING_DESC4096
+#define ICE_MAX_RING_DESC(8192 - 32)
 #define ICE_DMA_MEM_ALIGN4096
 #define ICE_RING_BASE_ALIGN  128
 
-- 
2.34.1



[PATCH v3 1/2] net/ixgbe: increase the maximum of RX/TX descriptors

2024-10-30 Thread Lukas Sismis
Intel PMDs are capped by default to only 4096 RX/TX descriptors.
This can be limiting for applications requiring a bigger buffer
capabilities. By bufferring more packets with RX/TX
descriptors, the applications can better handle the processing
peaks.

Setting ixgbe max descriptors to 8192 as per datasheet:
Register name: RDLEN
Description: Descriptor Ring Length.
This register sets the number of bytes
allocated for descriptors in the circular descriptor buffer.
It must be 128B aligned (7 LS bit must be set to zero).
** Note: validated Lengths up to 128K (8K descriptors). **

Signed-off-by: Lukas Sismis 
---
 doc/guides/nics/ixgbe.rst| 2 +-
 drivers/net/ixgbe/ixgbe_ethdev.c | 2 +-
 drivers/net/ixgbe/ixgbe_rxtx.h   | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/ixgbe.rst b/doc/guides/nics/ixgbe.rst
index 14573b542e..c5c6a6c34b 100644
--- a/doc/guides/nics/ixgbe.rst
+++ b/doc/guides/nics/ixgbe.rst
@@ -76,7 +76,7 @@ Scattered packets are not supported in this mode.
 If an incoming packet is greater than the maximum acceptable length of one 
"mbuf" data size (by default, the size is 2 KB),
 vPMD for RX would be disabled.
 
-By default, IXGBE_MAX_RING_DESC is set to 4096 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
+By default, IXGBE_MAX_RING_DESC is set to 8192 and RTE_PMD_IXGBE_RX_MAX_BURST 
is set to 32.
 
 Windows Prerequisites and Pre-conditions
 
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 7da2ccf6a8..da9b3d7ca7 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -73,7 +73,7 @@
 
 #define IXGBE_MMW_SIZE_DEFAULT0x4
 #define IXGBE_MMW_SIZE_JUMBO_FRAME0x14
-#define IXGBE_MAX_RING_DESC   4096 /* replicate define from rxtx */
+#define IXGBE_MAX_RING_DESC   8192 /* replicate define from rxtx */
 
 /*
  *  Default values for RX/TX configuration
diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h
index ee89c89929..0550c1da60 100644
--- a/drivers/net/ixgbe/ixgbe_rxtx.h
+++ b/drivers/net/ixgbe/ixgbe_rxtx.h
@@ -25,7 +25,7 @@
  *  (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
  */
 #defineIXGBE_MIN_RING_DESC 32
-#defineIXGBE_MAX_RING_DESC 4096
+#defineIXGBE_MAX_RING_DESC 8192
 
 #define RTE_PMD_IXGBE_TX_MAX_BURST 32
 #define RTE_PMD_IXGBE_RX_MAX_BURST 32
-- 
2.34.1