Re: [dpdk-dev] [EXT] [PATCH] crypto/dpaa_sec: update release notes

2021-09-08 Thread Hemant Agrawal



On 9/8/2021 12:45 AM, Akhil Goyal wrote:

Update the release notes in DPAAx for the changes in recent patches.

Signed-off-by: Hemant Agrawal 
---
  doc/guides/rel_notes/release_21_11.rst | 4 
  1 file changed, 4 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst
b/doc/guides/rel_notes/release_21_11.rst
index 0afd21812f..c747a0fb11 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -75,10 +75,14 @@ New Features
  * **Updated NXP dpaa2_sec crypto PMD.**

* Added raw vector datapath API support
+  * Added PDCP short MAC-I support

  * **Updated NXP dpaa_sec crypto PMD.**

* Added raw vector datapath API support
+  * Added PDCP short MAC-I support
+  * Added non-HMAC, DES, AES-XCBC and AES-CMAC algo support
+


Please send a next version for each of the series including each of the
Item separately in the release notes.
For example, DES-CBC patch will have update in release notes only for it
Subsequently for others also.
Please also remove entry from deprecation notices and subsequently update
the release notes ABI changes section done in a particular patch.

I tried splitting it, but it would also need changes in deprecation notices.
Please rebase the 3 series in following order (as for 3rd series I am waiting 
for review from Intel)
1. Algo support in dpaaX
2. short MAC
3. raw crypto

ok




[dpdk-dev] [PATCH v4 01/10] crypto/dpaa_sec: support DES-CBC

2021-09-08 Thread Hemant Agrawal
From: Gagandeep Singh 

add DES-CBC support and enable available cipher-only
test cases.

Signed-off-by: Gagandeep Singh 
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |  1 +
 doc/guides/rel_notes/release_21_11.rst  |  3 +++
 drivers/crypto/dpaa_sec/dpaa_sec.c  | 13 +
 drivers/crypto/dpaa_sec/dpaa_sec.h  | 20 
 4 files changed, 37 insertions(+)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini 
b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 243f3e1d67..5d0d04d601 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -24,6 +24,7 @@ AES CBC (256) = Y
 AES CTR (128) = Y
 AES CTR (192) = Y
 AES CTR (256) = Y
+DES CBC   = Y
 3DES CBC  = Y
 SNOW3G UEA2   = Y
 ZUC EEA3  = Y
diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 411fa9530a..4aa16d6915 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -73,6 +73,9 @@ New Features
 
   * Added event crypto adapter OP_FORWARD mode support.
 
+* **Updated NXP dpaa_sec crypto PMD.**
+
+  * Added DES-CBC algo support
 
 Removed Items
 -
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 19d4684e24..af5c7c499c 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -454,6 +454,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
switch (ses->cipher_alg) {
case RTE_CRYPTO_CIPHER_AES_CBC:
case RTE_CRYPTO_CIPHER_3DES_CBC:
+   case RTE_CRYPTO_CIPHER_DES_CBC:
case RTE_CRYPTO_CIPHER_AES_CTR:
case RTE_CRYPTO_CIPHER_3DES_CTR:
shared_desc_len = cnstr_shdsc_blkcipher(
@@ -2043,6 +2044,10 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev 
__rte_unused,
session->cipher_key.alg = OP_ALG_ALGSEL_AES;
session->cipher_key.algmode = OP_ALG_AAI_CBC;
break;
+   case RTE_CRYPTO_CIPHER_DES_CBC:
+   session->cipher_key.alg = OP_ALG_ALGSEL_DES;
+   session->cipher_key.algmode = OP_ALG_AAI_CBC;
+   break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
session->cipher_key.algmode = OP_ALG_AAI_CBC;
@@ -2218,6 +2223,10 @@ dpaa_sec_chain_init(struct rte_cryptodev *dev 
__rte_unused,
session->cipher_key.alg = OP_ALG_ALGSEL_AES;
session->cipher_key.algmode = OP_ALG_AAI_CBC;
break;
+   case RTE_CRYPTO_CIPHER_DES_CBC:
+   session->cipher_key.alg = OP_ALG_ALGSEL_DES;
+   session->cipher_key.algmode = OP_ALG_AAI_CBC;
+   break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
session->cipher_key.algmode = OP_ALG_AAI_CBC;
@@ -2667,6 +2676,10 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform 
*cipher_xform,
session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
session->cipher_key.algmode = OP_ALG_AAI_CBC;
break;
+   case RTE_CRYPTO_CIPHER_DES_CBC:
+   session->cipher_key.alg = OP_PCL_IPSEC_DES;
+   session->cipher_key.algmode = OP_ALG_AAI_CBC;
+   break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
session->cipher_key.alg = OP_PCL_IPSEC_3DES;
session->cipher_key.algmode = OP_ALG_AAI_CBC;
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h 
b/drivers/crypto/dpaa_sec/dpaa_sec.h
index 368699678b..216e8c8b6f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -456,6 +456,26 @@ static const struct rte_cryptodev_capabilities 
dpaa_sec_capabilities[] = {
}, }
}, }
},
+   {   /* DES CBC */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+   {.sym = {
+   .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+   {.cipher = {
+   .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+   .block_size = 8,
+   .key_size = {
+   .min = 8,
+   .max = 8,
+   .increment = 0
+   },
+   .iv_size = {
+   .min = 8,
+   .max = 8,
+   .increment = 0
+   }
+   }, }
+   }, }
+   },
{   /* 3DES CBC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
-- 
2.17.1



[dpdk-dev] [PATCH v4 02/10] crypto/dpaa_sec: support non-HMAC auth algos

2021-09-08 Thread Hemant Agrawal
From: Gagandeep Singh 

This patch add support for non-HMAC, md5, shax algos.

Signed-off-by: Gagandeep Singh 
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |   8 +-
 doc/guides/rel_notes/release_21_11.rst  |   2 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c  |  55 +++--
 drivers/crypto/dpaa_sec/dpaa_sec.h  | 126 
 4 files changed, 181 insertions(+), 10 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini 
b/doc/guides/cryptodevs/features/dpaa_sec.ini
index 5d0d04d601..eab14da96c 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -33,11 +33,17 @@ ZUC EEA3  = Y
 ; Supported authentication algorithms of the 'dpaa_sec' crypto driver.
 ;
 [Auth]
+MD5  = Y
 MD5 HMAC = Y
+SHA1 = Y
 SHA1 HMAC= Y
+SHA224   = Y
 SHA224 HMAC  = Y
+SHA256   = Y
 SHA256 HMAC  = Y
+SHA384   = Y
 SHA384 HMAC  = Y
+SHA512   = Y
 SHA512 HMAC  = Y
 SNOW3G UIA2  = Y
 ZUC EIA3 = Y
@@ -53,4 +59,4 @@ AES GCM (256) = Y
 ;
 ; Supported Asymmetric algorithms of the 'dpaa_sec' crypto driver.
 ;
-[Asymmetric]
\ No newline at end of file
+[Asymmetric]
diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 4aa16d6915..88c2a31d49 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -75,7 +75,7 @@ New Features
 
 * **Updated NXP dpaa_sec crypto PMD.**
 
-  * Added DES-CBC algo support
+  * Added DES-CBC and non-HMAC algo support
 
 Removed Items
 -
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index af5c7c499c..95b9d7414f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -489,6 +489,18 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
alginfo_a.algtype = ses->auth_key.alg;
alginfo_a.algmode = ses->auth_key.algmode;
switch (ses->auth_alg) {
+   case RTE_CRYPTO_AUTH_MD5:
+   case RTE_CRYPTO_AUTH_SHA1:
+   case RTE_CRYPTO_AUTH_SHA224:
+   case RTE_CRYPTO_AUTH_SHA256:
+   case RTE_CRYPTO_AUTH_SHA384:
+   case RTE_CRYPTO_AUTH_SHA512:
+   shared_desc_len = cnstr_shdsc_hash(
+   cdb->sh_desc, true,
+   swap, SHR_NEVER, &alginfo_a,
+   !ses->dir,
+   ses->digest_length);
+   break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
case RTE_CRYPTO_AUTH_SHA1_HMAC:
case RTE_CRYPTO_AUTH_SHA224_HMAC:
@@ -2080,43 +2092,70 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev 
__rte_unused,
 {
session->ctxt = DPAA_SEC_AUTH;
session->auth_alg = xform->auth.algo;
-   session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+   session->auth_key.length = xform->auth.key.length;
+   if (xform->auth.key.length) {
+   session->auth_key.data =
+   rte_zmalloc(NULL, xform->auth.key.length,
 RTE_CACHE_LINE_SIZE);
-   if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
-   DPAA_SEC_ERR("No Memory for auth key");
-   return -ENOMEM;
+   if (session->auth_key.data == NULL) {
+   DPAA_SEC_ERR("No Memory for auth key");
+   return -ENOMEM;
+   }
+   memcpy(session->auth_key.data, xform->auth.key.data,
+   xform->auth.key.length);
+
}
-   session->auth_key.length = xform->auth.key.length;
session->digest_length = xform->auth.digest_length;
if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
session->iv.offset = xform->auth.iv.offset;
session->iv.length = xform->auth.iv.length;
}
 
-   memcpy(session->auth_key.data, xform->auth.key.data,
-  xform->auth.key.length);
-
switch (xform->auth.algo) {
+   case RTE_CRYPTO_AUTH_SHA1:
+   session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
+   session->auth_key.algmode = OP_ALG_AAI_HASH;
+   break;
case RTE_CRYPTO_AUTH_SHA1_HMAC:
session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
session->auth_key.algmode = OP_ALG_AAI_HMAC;
break;
+   case RTE_CRYPTO_AUTH_MD5:
+   session->auth_key.alg = OP_ALG_ALGSEL_MD5;
+   session->auth_key.algmode = OP_ALG_AAI_HASH;
+   break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
session->auth_key.alg = OP_ALG_ALGSEL_MD5;
session->auth_key.algmode = OP_ALG_AAI_HMAC;
break;
+   case RTE_CRYPTO_AUTH_SHA22

[dpdk-dev] [PATCH v4 03/10] crypto/dpaa_sec: support AES-XCBC-MAC

2021-09-08 Thread Hemant Agrawal
From: Gagandeep Singh 

This patch adds support for AES-XCBC-MAC algo.

Signed-off-by: Gagandeep Singh 
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |  1 +
 doc/guides/rel_notes/release_21_11.rst  |  2 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c  | 21 -
 3 files changed, 22 insertions(+), 2 deletions(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini 
b/doc/guides/cryptodevs/features/dpaa_sec.ini
index eab14da96c..d7bc319373 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -47,6 +47,7 @@ SHA512   = Y
 SHA512 HMAC  = Y
 SNOW3G UIA2  = Y
 ZUC EIA3 = Y
+AES XCBC MAC = Y
 
 ;
 ; Supported AEAD algorithms of the 'dpaa_sec' crypto driver.
diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 88c2a31d49..b0da7e3135 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -75,7 +75,7 @@ New Features
 
 * **Updated NXP dpaa_sec crypto PMD.**
 
-  * Added DES-CBC and non-HMAC algo support
+  * Added DES-CBC, AES-XCBC-MAC and non-HMAC algo support
 
 Removed Items
 -
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 95b9d7414f..fc9c3a4c2c 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -527,6 +527,14 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
!ses->dir,
ses->digest_length);
break;
+   case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+   shared_desc_len = cnstr_shdsc_aes_mac(
+   cdb->sh_desc,
+   true, swap, SHR_NEVER,
+   &alginfo_a,
+   !ses->dir,
+   ses->digest_length);
+   break;
default:
DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
}
@@ -2168,6 +2176,10 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev 
__rte_unused,
session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
session->auth_key.algmode = OP_ALG_AAI_F9;
break;
+   case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+   session->auth_key.alg = OP_ALG_ALGSEL_AES;
+   session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
+   break;
default:
DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
  xform->auth.algo);
@@ -2249,6 +2261,10 @@ dpaa_sec_chain_init(struct rte_cryptodev *dev 
__rte_unused,
session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
session->auth_key.algmode = OP_ALG_AAI_HMAC;
break;
+   case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+   session->auth_key.alg = OP_ALG_ALGSEL_AES;
+   session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
+   break;
default:
DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
  auth_xform->algo);
@@ -2688,8 +2704,11 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform 
*cipher_xform,
case RTE_CRYPTO_AUTH_NULL:
session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
break;
-   case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+   session->auth_key.alg = OP_PCL_IPSEC_AES_XCBC_MAC_96;
+   session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
+   break;
+   case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA256:
-- 
2.17.1



[dpdk-dev] [PATCH v4 04/10] crypto/dpaa_sec: add support for AES CMAC integrity check

2021-09-08 Thread Hemant Agrawal
From: Gagandeep Singh 

This patch adds support for AES_CMAC integrity in non-security mode.

Signed-off-by: Gagandeep Singh 
---
 doc/guides/cryptodevs/features/dpaa_sec.ini |  1 +
 doc/guides/rel_notes/release_21_11.rst  |  2 +-
 drivers/crypto/dpaa_sec/dpaa_sec.c  | 10 +
 drivers/crypto/dpaa_sec/dpaa_sec.h  | 43 +
 4 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/doc/guides/cryptodevs/features/dpaa_sec.ini 
b/doc/guides/cryptodevs/features/dpaa_sec.ini
index d7bc319373..6a8f77fb1d 100644
--- a/doc/guides/cryptodevs/features/dpaa_sec.ini
+++ b/doc/guides/cryptodevs/features/dpaa_sec.ini
@@ -48,6 +48,7 @@ SHA512 HMAC  = Y
 SNOW3G UIA2  = Y
 ZUC EIA3 = Y
 AES XCBC MAC = Y
+AES CMAC (128) = Y
 
 ;
 ; Supported AEAD algorithms of the 'dpaa_sec' crypto driver.
diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index b0da7e3135..bf4f1c389b 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -75,7 +75,7 @@ New Features
 
 * **Updated NXP dpaa_sec crypto PMD.**
 
-  * Added DES-CBC, AES-XCBC-MAC and non-HMAC algo support
+  * Added DES-CBC, AES-XCBC-MAC, AES-CMAC and non-HMAC algo support
 
 Removed Items
 -
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index fc9c3a4c2c..c5416df726 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -528,6 +528,7 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
ses->digest_length);
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+   case RTE_CRYPTO_AUTH_AES_CMAC:
shared_desc_len = cnstr_shdsc_aes_mac(
cdb->sh_desc,
true, swap, SHR_NEVER,
@@ -2180,6 +2181,10 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev 
__rte_unused,
session->auth_key.alg = OP_ALG_ALGSEL_AES;
session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
break;
+   case RTE_CRYPTO_AUTH_AES_CMAC:
+   session->auth_key.alg = OP_ALG_ALGSEL_AES;
+   session->auth_key.algmode = OP_ALG_AAI_CMAC;
+   break;
default:
DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
  xform->auth.algo);
@@ -2265,6 +2270,10 @@ dpaa_sec_chain_init(struct rte_cryptodev *dev 
__rte_unused,
session->auth_key.alg = OP_ALG_ALGSEL_AES;
session->auth_key.algmode = OP_ALG_AAI_XCBC_MAC;
break;
+   case RTE_CRYPTO_AUTH_AES_CMAC:
+   session->auth_key.alg = OP_ALG_ALGSEL_AES;
+   session->auth_key.algmode = OP_ALG_AAI_CMAC;
+   break;
default:
DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
  auth_xform->algo);
@@ -2700,6 +2709,7 @@ dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform 
*cipher_xform,
break;
case RTE_CRYPTO_AUTH_AES_CMAC:
session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
+   session->auth_key.algmode = OP_ALG_AAI_CMAC;
break;
case RTE_CRYPTO_AUTH_NULL:
session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h 
b/drivers/crypto/dpaa_sec/dpaa_sec.h
index d500a4c246..c94d78e046 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -712,6 +712,49 @@ static const struct rte_cryptodev_capabilities 
dpaa_sec_capabilities[] = {
}, }
}, }
},
+   {   /* AES CMAC */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+   {.sym = {
+   .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+   {.auth = {
+   .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+   .block_size = 16,
+   .key_size = {
+   .min = 1,
+   .max = 16,
+   .increment = 1
+   },
+   .digest_size = {
+   .min = 12,
+   .max = 16,
+   .increment = 4
+   },
+   .iv_size = { 0 }
+   }, }
+   }, }
+   },
+   {   /* AES XCBC HMAC */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+   {.sym = {
+   .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+   {.auth = {
+   .algo = RTE_CRYPTO_AUTH_AES_XCB

[dpdk-dev] [PATCH v4 05/10] common/dpaax: caamflib load correct HFN from DESCBUF

2021-09-08 Thread Hemant Agrawal
From: Franck LENORMAND 

The offset of the HFn word and Bearer/Dir word is different
depending on type of PDB.

The wrong value was used.

This patch address this issue

Signed-off-by: Franck LENORMAND 
---
 drivers/common/dpaax/caamflib/desc/pdcp.h |  7 +-
 drivers/common/dpaax/caamflib/desc/sdap.h | 96 ++-
 2 files changed, 80 insertions(+), 23 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h 
b/drivers/common/dpaax/caamflib/desc/pdcp.h
index 659e289a45..e97d58cbc1 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -270,6 +270,9 @@ enum pdb_type_e {
PDCP_PDB_TYPE_INVALID
 };
 
+#define REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET 4
+#define FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET 8
+
 /**
  * rta_inline_pdcp_query() - Provide indications if a key can be passed as
  *   immediate data or shall be referenced in a
@@ -2564,11 +2567,11 @@ insert_hfn_ov_op(struct program *p,
return 0;
 
case PDCP_PDB_TYPE_REDUCED_PDB:
-   hfn_pdb_offset = 4;
+   hfn_pdb_offset = REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
break;
 
case PDCP_PDB_TYPE_FULL_PDB:
-   hfn_pdb_offset = 8;
+   hfn_pdb_offset = FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
break;
 
default:
diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h 
b/drivers/common/dpaax/caamflib/desc/sdap.h
index 6523db1733..f1c49ea3e6 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -1,5 +1,5 @@
 /* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2020 NXP
+ * Copyright 2020-2021 NXP
  */
 
 #ifndef __DESC_SDAP_H__
@@ -109,12 +109,17 @@ static inline int pdcp_sdap_insert_no_int_op(struct 
program *p,
 bool swap __maybe_unused,
 struct alginfo *cipherdata,
 unsigned int dir,
-enum pdcp_sn_size sn_size)
+enum pdcp_sn_size sn_size,
+enum pdb_type_e pdb_type)
 {
int op;
uint32_t sn_mask = 0;
uint32_t length = 0;
uint32_t offset = 0;
+   int hfn_bearer_dir_offset_in_descbuf =
+   (pdb_type == PDCP_PDB_TYPE_FULL_PDB) ?
+   FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
+   REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
&sn_mask))
@@ -137,7 +142,8 @@ static inline int pdcp_sdap_insert_no_int_op(struct program 
*p,
SEQSTORE(p, MATH0, offset, length, 0);
 
MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
-   MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+   MOVEB(p, DESCBUF, hfn_bearer_dir_offset_in_descbuf,
+   MATH2, 0, 8, WAITCOMP | IMMED);
MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
 
MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
@@ -190,9 +196,14 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap 
__maybe_unused,
 struct alginfo *cipherdata,
 struct alginfo *authdata __maybe_unused,
 unsigned int dir, enum pdcp_sn_size sn_size,
-unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+unsigned char era_2_sw_hfn_ovrd __maybe_unused,
+enum pdb_type_e pdb_type)
 {
uint32_t offset = 0, length = 0, sn_mask = 0;
+   int hfn_bearer_dir_offset_in_descbuf =
+   (pdb_type == PDCP_PDB_TYPE_FULL_PDB) ?
+   FULL_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET :
+   REDUCED_PDB_DESCBUF_HFN_BEARER_DIR_OFFSET;
 
if (pdcp_sdap_get_sn_parameters(sn_size, swap, &offset, &length,
&sn_mask))
@@ -217,7 +228,8 @@ pdcp_sdap_insert_enc_only_op(struct program *p, bool swap 
__maybe_unused,
/* Word (32 bit) swap */
MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
/* Load words from PDB: word 02 (HFN) + word 03 (bearer_dir)*/
-   MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+   MOVEB(p, DESCBUF, hfn_bearer_dir_offset_in_descbuf,
+   MATH2, 0, 8, WAITCOMP | IMMED);
/* Create basic IV */
MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
 
@@ -309,13 +321,18 @@ static inline int
 pdcp_sdap_insert_snoop_op(struct program *p, bool swap __maybe_unused,
  struct alginfo *cipherdata, struct alginfo *authdata,
  unsigned int dir, enum pdcp_sn_size sn_size,
- unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+

[dpdk-dev] [PATCH v4 06/10] common/dpaax: caamflib do not clear DPOVRD

2021-09-08 Thread Hemant Agrawal
From: Franck LENORMAND 

For SDAP, we are not using the protocol operation to perform
4G/LTE operation so the DPOVRD option is not used.

Removing it save some space in the descriptor buffer and
execution time.

Signed-off-by: Franck LENORMAND 
---
 drivers/common/dpaax/caamflib/desc/pdcp.h | 14 --
 drivers/common/dpaax/caamflib/desc/sdap.h |  2 +-
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h 
b/drivers/common/dpaax/caamflib/desc/pdcp.h
index e97d58cbc1..5b3d846099 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -2546,7 +2546,8 @@ static inline int
 insert_hfn_ov_op(struct program *p,
 uint32_t shift,
 enum pdb_type_e pdb_type,
-unsigned char era_2_sw_hfn_ovrd)
+unsigned char era_2_sw_hfn_ovrd,
+bool clear_dpovrd_at_end)
 {
uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
uint16_t hfn_pdb_offset;
@@ -2597,13 +2598,14 @@ insert_hfn_ov_op(struct program *p,
MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
 
-   if (rta_sec_era >= RTA_SEC_ERA_8)
+   if (clear_dpovrd_at_end && (rta_sec_era >= RTA_SEC_ERA_8)) {
/*
 * For ERA8, DPOVRD could be handled by the PROTOCOL command
 * itself. For now, this is not done. Thus, clear DPOVRD here
 * to alleviate any side-effects.
 */
MATHB(p, DPOVRD, AND, ZERO, DPOVRD, 4, STL);
+   }
 
SET_LABEL(p, keyjump);
PATCH_JUMP(p, pkeyjump, keyjump);
@@ -2989,7 +2991,7 @@ cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
SET_LABEL(p, pdb_end);
 
err = insert_hfn_ov_op(p, sn_size, pdb_type,
-  era_2_sw_hfn_ovrd);
+  era_2_sw_hfn_ovrd, true);
if (err)
return err;
 
@@ -3143,7 +3145,7 @@ cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
SET_LABEL(p, pdb_end);
 
err = insert_hfn_ov_op(p, sn_size, pdb_type,
-  era_2_sw_hfn_ovrd);
+  era_2_sw_hfn_ovrd, true);
if (err)
return err;
 
@@ -3319,7 +3321,7 @@ cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
}
SET_LABEL(p, pdb_end);
 
-   err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd);
+   err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
if (err)
return err;
 
@@ -3523,7 +3525,7 @@ cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
}
SET_LABEL(p, pdb_end);
 
-   err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd);
+   err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, true);
if (err)
return err;
 
diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h 
b/drivers/common/dpaax/caamflib/desc/sdap.h
index f1c49ea3e6..d5d5850b4f 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -990,7 +990,7 @@ cnstr_shdsc_pdcp_sdap_u_plane(uint32_t *descbuf,
SET_LABEL(p, pdb_end);
 
/* Inser the HFN override operation */
-   err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd);
+   err = insert_hfn_ov_op(p, sn_size, pdb_type, era_2_sw_hfn_ovrd, false);
if (err)
return err;
 
-- 
2.17.1



[dpdk-dev] [PATCH v4 07/10] common/dpaax: enhance caamflib with inline keys

2021-09-08 Thread Hemant Agrawal
From: Franck LENORMAND 

The space in descriptor buffer is scarce as it is limited to
64 words for platforms except ERA10 (which has 128).

As the descriptors are processed with QI, it adds some words
to the descriptor which is passed.

Some descriptors used for SDAP were using too much words reaching
the limit.

This patch reduces the number of words used by removing the inlining
of some keys (done for performance) in order to have working
descriptors.

Signed-off-by: Franck LENORMAND 
---
 drivers/common/dpaax/caamflib/desc/sdap.h   | 61 -
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 28 --
 2 files changed, 81 insertions(+), 8 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/sdap.h 
b/drivers/common/dpaax/caamflib/desc/sdap.h
index d5d5850b4f..b2497a5424 100644
--- a/drivers/common/dpaax/caamflib/desc/sdap.h
+++ b/drivers/common/dpaax/caamflib/desc/sdap.h
@@ -20,6 +20,63 @@
 #define SDAP_BITS_SIZE (SDAP_BYTE_SIZE * 8)
 #endif
 
+/**
+ * rta_inline_pdcp_query() - Provide indications if a key can be passed as
+ *   immediate data or shall be referenced in a
+ *   shared descriptor.
+ * Return: 0 if data can be inlined or 1 if referenced.
+ */
+static inline int
+rta_inline_pdcp_sdap_query(enum auth_type_pdcp auth_alg,
+ enum cipher_type_pdcp cipher_alg,
+ enum pdcp_sn_size sn_size,
+ int8_t hfn_ovd)
+{
+   int nb_key_to_inline = 0;
+
+   if ((cipher_alg != PDCP_CIPHER_TYPE_NULL) &&
+   (auth_alg != PDCP_AUTH_TYPE_NULL))
+   return 2;
+   else
+   return 0;
+
+   /**
+* Shared Descriptors for some of the cases does not fit in the
+* MAX_DESC_SIZE of the descriptor
+* The cases which exceed are for RTA_SEC_ERA=8 and HFN override
+* enabled and 12/18 bit uplane and either of following Algo combo.
+* - AES-SNOW
+* - AES-ZUC
+* - SNOW-SNOW
+* - SNOW-ZUC
+* - ZUC-SNOW
+* - ZUC-SNOW
+*
+* We cannot make inline for all cases, as this will impact performance
+* due to extra memory accesses for the keys.
+*/
+
+   /* Inline only the cipher key */
+   if ((rta_sec_era == RTA_SEC_ERA_8) && hfn_ovd &&
+   ((sn_size == PDCP_SN_SIZE_12) ||
+(sn_size == PDCP_SN_SIZE_18)) &&
+   (cipher_alg != PDCP_CIPHER_TYPE_NULL) &&
+   ((auth_alg == PDCP_AUTH_TYPE_SNOW) ||
+(auth_alg == PDCP_AUTH_TYPE_ZUC))) {
+
+   nb_key_to_inline++;
+
+   /* Sub case where inlining another key is required */
+   if ((cipher_alg == PDCP_CIPHER_TYPE_AES) &&
+   (auth_alg == PDCP_AUTH_TYPE_SNOW))
+   nb_key_to_inline++;
+   }
+
+   /* Inline both keys */
+
+   return nb_key_to_inline;
+}
+
 static inline void key_loading_opti(struct program *p,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -788,8 +845,8 @@ pdcp_sdap_insert_cplane_null_op(struct program *p,
   unsigned char era_2_sw_hfn_ovrd,
   enum pdb_type_e pdb_type __maybe_unused)
 {
-   return pdcp_insert_cplane_int_only_op(p, swap, cipherdata, authdata,
-   dir, sn_size, era_2_sw_hfn_ovrd);
+   return pdcp_insert_cplane_null_op(p, swap, cipherdata, authdata, dir,
+ sn_size, era_2_sw_hfn_ovrd);
 }
 
 static inline int
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 1ccead3641..6b6fee828b 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -3261,12 +3261,28 @@ dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
goto out;
}
 
-   if (rta_inline_pdcp_query(authdata.algtype,
-   cipherdata.algtype,
-   session->pdcp.sn_size,
-   session->pdcp.hfn_ovd)) {
-   cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
-   cipherdata.key_type = RTA_DATA_PTR;
+   if (pdcp_xform->sdap_enabled) {
+   int nb_keys_to_inline =
+   rta_inline_pdcp_sdap_query(authdata.algtype,
+   cipherdata.algtype,
+   session->pdcp.sn_size,
+   session->pdcp.hfn_ovd);
+   if (nb_keys_to_inline >= 1) {
+   cipherdata.key = DPAA2_VADDR_TO_IOVA(cipherdata.key);
+   cipherdata.key_type = RTA_DATA_PTR;
+   }
+   if (nb_keys_to_inline >= 2) {
+   authdata.key 

[dpdk-dev] [PATCH v4 08/10] common/dpaax: fix IV value for shortMAC-I for SNOW algo

2021-09-08 Thread Hemant Agrawal
From: Gagandeep Singh 

The logic was incorecly doing conditional swap. It need to
be bit swap always.

Fixes: 73a24060cd70 ("crypto/dpaa2_sec: add sample PDCP descriptor APIs")
Cc: sta...@dpdk.org

Signed-off-by: Gagandeep Singh 
---
 drivers/common/dpaax/caamflib/desc/pdcp.h | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/common/dpaax/caamflib/desc/pdcp.h 
b/drivers/common/dpaax/caamflib/desc/pdcp.h
index 5b3d846099..8e8daf5ba8 100644
--- a/drivers/common/dpaax/caamflib/desc/pdcp.h
+++ b/drivers/common/dpaax/caamflib/desc/pdcp.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
  * Copyright 2008-2013 Freescale Semiconductor, Inc.
- * Copyright 2019-2020 NXP
+ * Copyright 2019-2021 NXP
  */
 
 #ifndef __DESC_PDCP_H__
@@ -3715,9 +3715,10 @@ cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
break;
 
case PDCP_AUTH_TYPE_SNOW:
+   /* IV calculation based on 3GPP specs. 36331, section:5.3.7.4 */
iv[0] = 0x;
-   iv[1] = swap ? swab32(0x0400) : 0x0400;
-   iv[2] = swap ? swab32(0xF800) : 0xF800;
+   iv[1] = swab32(0x0400);
+   iv[2] = swab32(0xF800);
 
KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
authdata->keylen, INLINE_KEY(authdata));
-- 
2.17.1



[dpdk-dev] [PATCH v4 09/10] crypto/dpaa_sec: force inline of the keys to save space

2021-09-08 Thread Hemant Agrawal
From: Gagandeep Singh 

This patch improve storage and performance by force inline
of the keys.

Signed-off-by: Franck LENORMAND 
Signed-off-by: Gagandeep Singh 
---
 drivers/crypto/dpaa_sec/dpaa_sec.c | 35 ++
 1 file changed, 26 insertions(+), 9 deletions(-)

diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c 
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index c5416df726..10ef990886 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2017-2019 NXP
+ *   Copyright 2017-2021 NXP
  *
  */
 
@@ -263,14 +263,31 @@ dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
p_authdata = &authdata;
}
 
-   if (rta_inline_pdcp_query(authdata.algtype,
-   cipherdata.algtype,
-   ses->pdcp.sn_size,
-   ses->pdcp.hfn_ovd)) {
-   cipherdata.key =
-   (size_t)rte_dpaa_mem_vtop((void *)
-   (size_t)cipherdata.key);
-   cipherdata.key_type = RTA_DATA_PTR;
+   if (ses->pdcp.sdap_enabled) {
+   int nb_keys_to_inline =
+   rta_inline_pdcp_sdap_query(authdata.algtype,
+   cipherdata.algtype,
+   ses->pdcp.sn_size,
+   ses->pdcp.hfn_ovd);
+   if (nb_keys_to_inline >= 1) {
+   cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
+   (size_t)cipherdata.key);
+   cipherdata.key_type = RTA_DATA_PTR;
+   }
+   if (nb_keys_to_inline >= 2) {
+   authdata.key = (size_t)rte_dpaa_mem_vtop((void *)
+   (size_t)authdata.key);
+   authdata.key_type = RTA_DATA_PTR;
+   }
+   } else {
+   if (rta_inline_pdcp_query(authdata.algtype,
+   cipherdata.algtype,
+   ses->pdcp.sn_size,
+   ses->pdcp.hfn_ovd)) {
+   cipherdata.key = (size_t)rte_dpaa_mem_vtop((void *)
+   (size_t)cipherdata.key);
+   cipherdata.key_type = RTA_DATA_PTR;
+   }
}
 
if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
-- 
2.17.1



[dpdk-dev] [PATCH v4 10/10] crypto/dpaa2_sec: add error packet counters

2021-09-08 Thread Hemant Agrawal
This patch add support to also counter err pkt counter per queue.
This also enhances few related debug prints.

Signed-off-by: Hemant Agrawal 
---
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 6 --
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c 
b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 6b6fee828b..aac4e9806a 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1709,8 +1709,9 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op 
**ops,
 
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
-   DPAA2_SEC_ERR("SEC returned Error - %x",
+   DPAA2_SEC_DP_ERR("SEC returned Error - %x\n",
  fd->simple.frc);
+   dpaa2_qp->rx_vq.err_pkts += 1;
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -1722,7 +1723,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op 
**ops,
 
dpaa2_qp->rx_vq.rx_pkts += num_rx;
 
-   DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
+   DPAA2_SEC_DP_DEBUG("SEC RX pkts %d err pkts %" PRIu64 "\n", num_rx,
+   dpaa2_qp->rx_vq.err_pkts);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
 }
-- 
2.17.1



Re: [dpdk-dev] [RFC V1] examples/l3fwd-power: fix memory leak for rte_pci_device

2021-09-08 Thread Thomas Monjalon
08/09/2021 04:01, Huisong Li:
> 在 2021/9/7 16:53, Thomas Monjalon 写道:
> > 07/09/2021 05:41, Huisong Li:
> >> Calling rte_eth_dev_close() will release resources of eth device and close
> >> it. But rte_pci_device struct isn't released when app exit, which will lead
> >> to memory leak.
> > That's a PMD issue.
> > When the last port of a PCI device is closed, the device should be freed.
> 
> Why is this a PMD problem? I don't understand.

In the PMD close function, freeing of PCI device must be managed,
so the app doesn't have to bother.

> As far as I know, most apps or examples in the DPDK project have only 
> one port for a pci device.

The number of ports per PCI device is driver-specific.

> When the port is closed, the rte_pci_device should be freed. But none of 
> the apps seem to do this.

That's because from the app point of view, only ports should be managed.
The hardware device is managed by the PMD.
Only drivers (PMDs) have to do the relation between class ports
and hardware devices.

> >> +  /* Retrieve device address in eth device before closing it. */
> >> +  eth_dev = &rte_eth_devices[portid];
> > You should not access this array, considered internal.
> 
> We have to save the address of rte_device to free rte_pci_device before 
> closing eth device.
> 
> Because the the device address in rte_eth_dev struct will be set to a 
> NULL after closing eth device.
> 
> It's also handled in OVS in this way.

No you don't have to call rte_dev_remove at all from an app.

> >> +  rte_dev = eth_dev->device;
> >>rte_eth_dev_close(portid);
> >> +  ret = rte_dev_remove(rte_dev);





[dpdk-dev] [PATCH 0/3] add option to configure tunnel header verification

2021-09-08 Thread Tejasree Kondoj
Add option to indicate whether outer header verification need to be done
as part of inbound IPsec processing.
CNXK PMD support and unit tests are also added for the same.

Depends on
https://patches.dpdk.org/project/dpdk/list/?series=18743

Tejasree Kondoj (3):
  security: add option to configure tunnel header verification
  common/cnxk: add support for tunnel header verification
  test/crypto: add tunnel header verification tests

 app/test/test_cryptodev.c | 45 +-
 app/test/test_cryptodev_security_ipsec.c  | 25 +++-
 app/test/test_cryptodev_security_ipsec.h  |  1 +
 ...st_cryptodev_security_ipsec_test_vectors.h |  3 +
 doc/guides/rel_notes/release_21_11.rst|  5 ++
 drivers/common/cnxk/cnxk_security.c   | 60 +++
 drivers/common/cnxk/roc_ie_ot.h   |  6 +-
 .../crypto/cnxk/cnxk_cryptodev_capabilities.c |  4 ++
 lib/security/rte_security.h   | 17 ++
 9 files changed, 162 insertions(+), 4 deletions(-)

-- 
2.27.0



[dpdk-dev] [PATCH 1/3] security: add option to configure tunnel header verification

2021-09-08 Thread Tejasree Kondoj
Add option to indicate whether outer header verification
need to be done as part of inbound IPsec processing.

With inline IPsec processing, SA lookup would be happening
in the Rx path of rte_ethdev. When rte_flow is configured to
support more than one SA, SPI would be used to lookup SA.
In such cases, additional verification would be required to
ensure duplicate SPIs are not getting processed in the inline path.

For lookaside cases, the same option can be used by application
to offload tunnel verification to the PMD.

These verifications would help in averting possible DoS attacks.

Signed-off-by: Tejasree Kondoj 
---
 doc/guides/rel_notes/release_21_11.rst |  5 +
 lib/security/rte_security.h| 17 +
 2 files changed, 22 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 0e3ed28378..b0606cb542 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -136,6 +136,11 @@ ABI Changes
 soft and hard SA expiry limits. Limits can be either in units of packets or
 bytes.
 
+* security: add IPsec SA option to configure tunnel header verification
+
+  * Added SA option to indicate whether outer header verification need to be
+done as part of inbound IPsec processing.
+
 
 Known Issues
 
diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h
index 95c169d6cf..2a61cad885 100644
--- a/lib/security/rte_security.h
+++ b/lib/security/rte_security.h
@@ -55,6 +55,14 @@ enum rte_security_ipsec_tunnel_type {
/**< Outer header is IPv6 */
 };
 
+/**
+ * IPSEC tunnel header verification mode
+ *
+ * Controls how outer IP header is verified in inbound.
+ */
+#define RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR 0x1
+#define RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR 0x2
+
 /**
  * Security context for crypto/eth devices
  *
@@ -195,6 +203,15 @@ struct rte_security_ipsec_sa_options {
 * by the PMD.
 */
uint32_t iv_gen_disable : 1;
+
+   /** Verify tunnel header in inbound
+* * ``RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR``: Verify destination
+*   IP address.
+*
+* * ``RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR``: Verify both
+*   source and destination IP addresses.
+*/
+   uint32_t tunnel_hdr_verify : 2;
 };
 
 /** IPSec security association direction */
-- 
2.27.0



[dpdk-dev] [PATCH 2/3] common/cnxk: add support for tunnel header verification

2021-09-08 Thread Tejasree Kondoj
Adding support to verify tunnel header in IPsec inbound.

Signed-off-by: Tejasree Kondoj 
---
 drivers/common/cnxk/cnxk_security.c   | 60 +++
 drivers/common/cnxk/roc_ie_ot.h   |  6 +-
 .../crypto/cnxk/cnxk_cryptodev_capabilities.c |  4 ++
 3 files changed, 69 insertions(+), 1 deletion(-)

diff --git a/drivers/common/cnxk/cnxk_security.c 
b/drivers/common/cnxk/cnxk_security.c
index 215d9fd4d1..cc5daf333c 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -199,6 +199,62 @@ ot_ipsec_inb_ctx_size(struct roc_ot_ipsec_inb_sa *sa)
return size;
 }
 
+static int
+ot_ipsec_inb_tunnel_hdr_fill(struct roc_ot_ipsec_inb_sa *sa,
+struct rte_security_ipsec_xform *ipsec_xfrm)
+{
+   struct rte_security_ipsec_tunnel_param *tunnel;
+
+   if (ipsec_xfrm->mode != RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+   return 0;
+
+   if (ipsec_xfrm->options.tunnel_hdr_verify == 0)
+   return 0;
+
+   tunnel = &ipsec_xfrm->tunnel;
+
+   switch (tunnel->type) {
+   case RTE_SECURITY_IPSEC_TUNNEL_IPV4:
+   sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_4;
+   memcpy(&sa->outer_hdr.ipv4.src_addr, &tunnel->ipv4.src_ip,
+  sizeof(struct in_addr));
+   memcpy(&sa->outer_hdr.ipv4.dst_addr, &tunnel->ipv4.dst_ip,
+  sizeof(struct in_addr));
+
+   /* IP Source and Dest are in LE/CPU endian */
+   sa->outer_hdr.ipv4.src_addr =
+   rte_be_to_cpu_32(sa->outer_hdr.ipv4.src_addr);
+   sa->outer_hdr.ipv4.dst_addr =
+   rte_be_to_cpu_32(sa->outer_hdr.ipv4.dst_addr);
+
+   break;
+   case RTE_SECURITY_IPSEC_TUNNEL_IPV6:
+   sa->w2.s.outer_ip_ver = ROC_IE_SA_IP_VERSION_6;
+   memcpy(&sa->outer_hdr.ipv6.src_addr, &tunnel->ipv6.src_addr,
+  sizeof(struct in6_addr));
+   memcpy(&sa->outer_hdr.ipv6.dst_addr, &tunnel->ipv6.dst_addr,
+  sizeof(struct in6_addr));
+
+   break;
+   default:
+   return -EINVAL;
+   }
+
+   switch (ipsec_xfrm->options.tunnel_hdr_verify) {
+   case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR:
+   sa->w2.s.ip_hdr_verify = ROC_IE_OT_SA_IP_HDR_VERIFY_DST_ADDR;
+   break;
+   case RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR:
+   sa->w2.s.ip_hdr_verify =
+   ROC_IE_OT_SA_IP_HDR_VERIFY_SRC_DST_ADDR;
+   break;
+   default:
+   return -ENOTSUP;
+   }
+
+   return 0;
+}
+
 int
 cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
  struct rte_security_ipsec_xform *ipsec_xfrm,
@@ -229,6 +285,10 @@ cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
sa->w0.s.ar_win = rte_log2_u32(replay_win_sz) - 5;
}
 
+   rc = ot_ipsec_inb_tunnel_hdr_fill(sa, ipsec_xfrm);
+   if (rc)
+   return rc;
+
/* Default options for pkt_out and pkt_fmt are with
 * second pass meta and no defrag.
 */
diff --git a/drivers/common/cnxk/roc_ie_ot.h b/drivers/common/cnxk/roc_ie_ot.h
index 1ff468841d..12c75afac2 100644
--- a/drivers/common/cnxk/roc_ie_ot.h
+++ b/drivers/common/cnxk/roc_ie_ot.h
@@ -180,7 +180,11 @@ union roc_ot_ipsec_sa_word2 {
uint64_t auth_type : 4;
 
uint64_t encap_type : 2;
-   uint64_t rsvd1 : 6;
+   uint64_t et_ovrwr_ddr_en : 1;
+   uint64_t esn_en : 1;
+   uint64_t tport_l4_incr_csum : 1;
+   uint64_t ip_hdr_verify : 2;
+   uint64_t rsvd5 : 1;
 
uint64_t rsvd2 : 7;
uint64_t async_mode : 1;
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c 
b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
index 4b97639e56..8a0cf289fd 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
@@ -920,6 +920,10 @@ cn10k_sec_caps_update(struct rte_security_capability 
*sec_cap)
 #ifdef LA_IPSEC_DEBUG
sec_cap->ipsec.options.iv_gen_disable = 1;
 #endif
+   } else {
+   if (sec_cap->ipsec.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
+   sec_cap->ipsec.options.tunnel_hdr_verify =
+   RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
}
 }
 
-- 
2.27.0



[dpdk-dev] [PATCH 3/3] test/crypto: add tunnel header verification tests

2021-09-08 Thread Tejasree Kondoj
Add test cases to verify tunnel header in IPsec inbound.

Signed-off-by: Tejasree Kondoj 
---
 app/test/test_cryptodev.c | 45 ++-
 app/test/test_cryptodev_security_ipsec.c  | 25 ++-
 app/test/test_cryptodev_security_ipsec.h  |  1 +
 ...st_cryptodev_security_ipsec_test_vectors.h |  3 ++
 4 files changed, 71 insertions(+), 3 deletions(-)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index e513f38765..ab7b63f37f 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -8876,6 +8876,7 @@ test_ipsec_proto_process(const struct ipsec_test_data 
td[],
int salt_len, i, ret = TEST_SUCCESS;
struct rte_security_ctx *ctx;
uint8_t *input_text;
+   uint32_t verify;
 
ut_params->type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
gbl_action_type = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
@@ -8885,11 +8886,19 @@ test_ipsec_proto_process(const struct ipsec_test_data 
td[],
/* Copy IPsec xform */
memcpy(&ipsec_xform, &td[0].ipsec_xform, sizeof(ipsec_xform));
 
+   dir = ipsec_xform.direction;
+   verify = flags->tunnel_hdr_verify;
+
+   if ((dir == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) && verify) {
+   if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR)
+   src += 1;
+   else if (verify == RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR)
+   dst += 1;
+   }
+
memcpy(&ipsec_xform.tunnel.ipv4.src_ip, &src, sizeof(src));
memcpy(&ipsec_xform.tunnel.ipv4.dst_ip, &dst, sizeof(dst));
 
-   dir = ipsec_xform.direction;
-
ctx = rte_cryptodev_get_sec_ctx(dev_id);
 
sec_cap_idx.action = ut_params->type;
@@ -9181,6 +9190,30 @@ test_ipsec_proto_udp_encap(const void *data __rte_unused)
return test_ipsec_proto_all(&flags);
 }
 
+static int
+test_ipsec_proto_tunnel_src_dst_addr_verify(const void *data __rte_unused)
+{
+   struct ipsec_test_flags flags;
+
+   memset(&flags, 0, sizeof(flags));
+
+   flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
+
+   return test_ipsec_proto_all(&flags);
+}
+
+static int
+test_ipsec_proto_tunnel_dst_addr_verify(const void *data __rte_unused)
+{
+   struct ipsec_test_flags flags;
+
+   memset(&flags, 0, sizeof(flags));
+
+   flags.tunnel_hdr_verify = RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR;
+
+   return test_ipsec_proto_all(&flags);
+}
+
 static int
 test_PDCP_PROTO_all(void)
 {
@@ -14124,6 +14157,14 @@ static struct unit_test_suite ipsec_proto_testsuite  = 
{
"Negative test: ICV corruption",
ut_setup_security, ut_teardown,
test_ipsec_proto_err_icv_corrupt),
+   TEST_CASE_NAMED_ST(
+   "Tunnel dst addr verification",
+   ut_setup_security, ut_teardown,
+   test_ipsec_proto_tunnel_dst_addr_verify),
+   TEST_CASE_NAMED_ST(
+   "Tunnel src and dst addr verification",
+   ut_setup_security, ut_teardown,
+   test_ipsec_proto_tunnel_src_dst_addr_verify),
TEST_CASES_END() /**< NULL terminate unit test array */
}
 };
diff --git a/app/test/test_cryptodev_security_ipsec.c 
b/app/test/test_cryptodev_security_ipsec.c
index 046536cc9c..f040630655 100644
--- a/app/test/test_cryptodev_security_ipsec.c
+++ b/app/test/test_cryptodev_security_ipsec.c
@@ -86,6 +86,15 @@ test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform 
*ipsec_xform,
return -ENOTSUP;
}
 
+   if ((ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS) &&
+   (ipsec_xform->options.tunnel_hdr_verify >
+   sec_cap->ipsec.options.tunnel_hdr_verify)) {
+   if (!silent)
+   RTE_LOG(INFO, USER1,
+   "Tunnel header verify is not supported\n");
+   return -ENOTSUP;
+   }
+
return 0;
 }
 
@@ -207,6 +216,9 @@ test_ipsec_td_update(struct ipsec_test_data td_inb[],
if (flags->udp_encap)
td_inb[i].ipsec_xform.options.udp_encap = 1;
 
+   td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
+   flags->tunnel_hdr_verify;
+
/* Clear outbound specific flags */
td_inb[i].ipsec_xform.options.iv_gen_disable = 0;
}
@@ -292,7 +304,8 @@ test_ipsec_td_verify(struct rte_mbuf *m, const struct 
ipsec_test_data *td,
/* For tests with status as error for test success, skip verification */
if (td->ipsec_xform.direction == RTE_SECURITY_IPSEC_SA_DIR_INGRESS &&
(flags->icv_corrupt ||
-flags->sa_expiry_pkts_hard))
+flags->sa_expiry_pkts_hard ||
+flags->tunnel_hdr_verify))
return TEST_SUCCESS;
 
if

[dpdk-dev] [PATCH 0/3] add option to configure UDP ports verification

2021-09-08 Thread Tejasree Kondoj
Add option to indicate whether UDP encapsulation ports verification
need to be done as part of inbound IPsec processing.
CNXK PMD support and unit tests are also added for the same.

Depends on
https://patches.dpdk.org/project/dpdk/list/?series=18755

Tejasree Kondoj (3):
  security: add option to configure UDP ports verification
  common/cnxk: add support for UDP ports verification
  test/crypto: add UDP encapsulation ports verification tests

 app/test/test_cryptodev.c   | 17 +
 app/test/test_cryptodev_security_ipsec.c| 11 +++
 app/test/test_cryptodev_security_ipsec.h|  1 +
 doc/guides/rel_notes/release_21_11.rst  |  5 +
 drivers/common/cnxk/cnxk_security.c |  3 +++
 drivers/common/cnxk/roc_ie_ot.h |  4 ++--
 .../crypto/cnxk/cnxk_cryptodev_capabilities.c   |  1 +
 lib/security/rte_security.h |  7 +++
 8 files changed, 47 insertions(+), 2 deletions(-)

-- 
2.27.0



[dpdk-dev] [PATCH 1/3] security: add option to configure UDP ports verification

2021-09-08 Thread Tejasree Kondoj
Add option to indicate whether UDP encapsulation ports
verification need to be done as part of inbound
IPsec processing.

Signed-off-by: Tejasree Kondoj 
---
 doc/guides/rel_notes/release_21_11.rst | 5 +
 lib/security/rte_security.h| 7 +++
 2 files changed, 12 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index b0606cb542..afeba0105b 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -141,6 +141,11 @@ ABI Changes
   * Added SA option to indicate whether outer header verification need to be
 done as part of inbound IPsec processing.
 
+* security: add IPsec SA option to configure UDP ports verification
+
+  * Added SA option to indicate whether UDP ports verification need to be
+done as part of inbound IPsec processing.
+
 
 Known Issues
 
diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h
index 2a61cad885..18b0f02c44 100644
--- a/lib/security/rte_security.h
+++ b/lib/security/rte_security.h
@@ -139,6 +139,13 @@ struct rte_security_ipsec_sa_options {
 */
uint32_t udp_encap : 1;
 
+   /** Verify UDP encapsulation ports in inbound
+*
+* * 1: Match UDP source and destination ports
+* * 0: Do not match UDP ports
+*/
+   uint32_t udp_ports_verify : 1;
+
/** Copy DSCP bits
 *
 * * 1: Copy IPv4 or IPv6 DSCP bits from inner IP header to
-- 
2.27.0



[dpdk-dev] [PATCH 2/3] common/cnxk: add support for UDP ports verification

2021-09-08 Thread Tejasree Kondoj
Adding support to verify UDP encapsulation ports
in IPsec inbound.

Signed-off-by: Tejasree Kondoj 
---
 drivers/common/cnxk/cnxk_security.c   | 3 +++
 drivers/common/cnxk/roc_ie_ot.h   | 4 ++--
 drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c | 1 +
 3 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/drivers/common/cnxk/cnxk_security.c 
b/drivers/common/cnxk/cnxk_security.c
index cc5daf333c..13c4f128ae 100644
--- a/drivers/common/cnxk/cnxk_security.c
+++ b/drivers/common/cnxk/cnxk_security.c
@@ -303,6 +303,9 @@ cnxk_ot_ipsec_inb_sa_fill(struct roc_ot_ipsec_inb_sa *sa,
sa->w10.s.udp_dst_port = 4500;
}
 
+   if (ipsec_xfrm->options.udp_ports_verify)
+   sa->w2.s.udp_ports_verify = 1;
+
offset = offsetof(struct roc_ot_ipsec_inb_sa, ctx);
/* Word offset for HW managed SA field */
sa->w0.s.hw_ctx_off = offset / 8;
diff --git a/drivers/common/cnxk/roc_ie_ot.h b/drivers/common/cnxk/roc_ie_ot.h
index 12c75afac2..e8415cff3c 100644
--- a/drivers/common/cnxk/roc_ie_ot.h
+++ b/drivers/common/cnxk/roc_ie_ot.h
@@ -184,7 +184,7 @@ union roc_ot_ipsec_sa_word2 {
uint64_t esn_en : 1;
uint64_t tport_l4_incr_csum : 1;
uint64_t ip_hdr_verify : 2;
-   uint64_t rsvd5 : 1;
+   uint64_t udp_ports_verify : 1;
 
uint64_t rsvd2 : 7;
uint64_t async_mode : 1;
@@ -329,7 +329,7 @@ struct roc_ot_ipsec_inb_sa {
uint64_t esn_en : 1;
uint64_t tport_l4_incr_csum : 1;
uint64_t ip_hdr_verify : 2;
-   uint64_t rsvd5 : 1;
+   uint64_t udp_ports_verify : 1;
 
uint64_t rsvd6 : 7;
uint64_t async_mode : 1;
diff --git a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c 
b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
index 8a0cf289fd..ba4166c56d 100644
--- a/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
+++ b/drivers/crypto/cnxk/cnxk_cryptodev_capabilities.c
@@ -921,6 +921,7 @@ cn10k_sec_caps_update(struct rte_security_capability 
*sec_cap)
sec_cap->ipsec.options.iv_gen_disable = 1;
 #endif
} else {
+   sec_cap->ipsec.options.udp_ports_verify = 1;
if (sec_cap->ipsec.mode == RTE_SECURITY_IPSEC_SA_MODE_TUNNEL)
sec_cap->ipsec.options.tunnel_hdr_verify =
RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR;
-- 
2.27.0



[dpdk-dev] [PATCH 3/3] test/crypto: add UDP encapsulation ports verification tests

2021-09-08 Thread Tejasree Kondoj
Adding UDP encapsulation ports verification test cases.

Signed-off-by: Tejasree Kondoj 
---
 app/test/test_cryptodev.c| 17 +
 app/test/test_cryptodev_security_ipsec.c | 11 +++
 app/test/test_cryptodev_security_ipsec.h |  1 +
 3 files changed, 29 insertions(+)

diff --git a/app/test/test_cryptodev.c b/app/test/test_cryptodev.c
index ab7b63f37f..352d8f4360 100644
--- a/app/test/test_cryptodev.c
+++ b/app/test/test_cryptodev.c
@@ -9214,6 +9214,19 @@ test_ipsec_proto_tunnel_dst_addr_verify(const void *data 
__rte_unused)
return test_ipsec_proto_all(&flags);
 }
 
+static int
+test_ipsec_proto_udp_ports_verify(const void *data __rte_unused)
+{
+   struct ipsec_test_flags flags;
+
+   memset(&flags, 0, sizeof(flags));
+
+   flags.udp_encap = true;
+   flags.udp_ports_verify = true;
+
+   return test_ipsec_proto_all(&flags);
+}
+
 static int
 test_PDCP_PROTO_all(void)
 {
@@ -14145,6 +14158,10 @@ static struct unit_test_suite ipsec_proto_testsuite  = 
{
"UDP encapsulation",
ut_setup_security, ut_teardown,
test_ipsec_proto_udp_encap),
+   TEST_CASE_NAMED_ST(
+   "UDP encapsulation ports verification test",
+   ut_setup_security, ut_teardown,
+   test_ipsec_proto_udp_ports_verify),
TEST_CASE_NAMED_ST(
"SA expiry packets soft",
ut_setup_security, ut_teardown,
diff --git a/app/test/test_cryptodev_security_ipsec.c 
b/app/test/test_cryptodev_security_ipsec.c
index f040630655..764e77bbff 100644
--- a/app/test/test_cryptodev_security_ipsec.c
+++ b/app/test/test_cryptodev_security_ipsec.c
@@ -36,6 +36,14 @@ test_ipsec_sec_caps_verify(struct rte_security_ipsec_xform 
*ipsec_xform,
return -ENOTSUP;
}
 
+   if (ipsec_xform->options.udp_ports_verify == 1 &&
+   sec_cap->ipsec.options.udp_ports_verify == 0) {
+   if (!silent)
+   RTE_LOG(INFO, USER1, "UDP encapsulation ports "
+   "verification is not supported\n");
+   return -ENOTSUP;
+   }
+
if (ipsec_xform->options.copy_dscp == 1 &&
sec_cap->ipsec.options.copy_dscp == 0) {
if (!silent)
@@ -216,6 +224,9 @@ test_ipsec_td_update(struct ipsec_test_data td_inb[],
if (flags->udp_encap)
td_inb[i].ipsec_xform.options.udp_encap = 1;
 
+   if (flags->udp_ports_verify)
+   td_inb[i].ipsec_xform.options.udp_ports_verify = 1;
+
td_inb[i].ipsec_xform.options.tunnel_hdr_verify =
flags->tunnel_hdr_verify;
 
diff --git a/app/test/test_cryptodev_security_ipsec.h 
b/app/test/test_cryptodev_security_ipsec.h
index a65cb54eae..0416005520 100644
--- a/app/test/test_cryptodev_security_ipsec.h
+++ b/app/test/test_cryptodev_security_ipsec.h
@@ -55,6 +55,7 @@ struct ipsec_test_flags {
bool iv_gen;
uint32_t tunnel_hdr_verify;
bool udp_encap;
+   bool udp_ports_verify;
 };
 
 struct crypto_param {
-- 
2.27.0



Re: [dpdk-dev] [PATCH 1/3] security: add option to configure tunnel header verification

2021-09-08 Thread Hemant Agrawal



On 9/8/2021 1:51 PM, Tejasree Kondoj wrote:

Add option to indicate whether outer header verification
need to be done as part of inbound IPsec processing.

With inline IPsec processing, SA lookup would be happening
in the Rx path of rte_ethdev. When rte_flow is configured to
support more than one SA, SPI would be used to lookup SA.
In such cases, additional verification would be required to
ensure duplicate SPIs are not getting processed in the inline path.

For lookaside cases, the same option can be used by application
to offload tunnel verification to the PMD.

These verifications would help in averting possible DoS attacks.

Signed-off-by: Tejasree Kondoj 
---
  doc/guides/rel_notes/release_21_11.rst |  5 +
  lib/security/rte_security.h| 17 +
  2 files changed, 22 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 0e3ed28378..b0606cb542 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -136,6 +136,11 @@ ABI Changes
  soft and hard SA expiry limits. Limits can be either in units of packets 
or
  bytes.
  
+* security: add IPsec SA option to configure tunnel header verification

+
+  * Added SA option to indicate whether outer header verification need to be
+done as part of inbound IPsec processing.
+
  
  Known Issues

  
diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h
index 95c169d6cf..2a61cad885 100644
--- a/lib/security/rte_security.h
+++ b/lib/security/rte_security.h
@@ -55,6 +55,14 @@ enum rte_security_ipsec_tunnel_type {
/**< Outer header is IPv6 */
  };
  
+/**

+ * IPSEC tunnel header verification mode
+ *
+ * Controls how outer IP header is verified in inbound.
+ */
+#define RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR 0x1
+#define RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR 0x2
+
  /**
   * Security context for crypto/eth devices
   *
@@ -195,6 +203,15 @@ struct rte_security_ipsec_sa_options {
 * by the PMD.
 */
uint32_t iv_gen_disable : 1;
+
+   /** Verify tunnel header in inbound
+* * ``RTE_SECURITY_IPSEC_TUNNEL_VERIFY_DST_ADDR``: Verify destination
+*   IP address.
+*
+* * ``RTE_SECURITY_IPSEC_TUNNEL_VERIFY_SRC_DST_ADDR``: Verify both
+*   source and destination IP addresses.
+*/
+   uint32_t tunnel_hdr_verify : 2;
  };
  
  /** IPSec security association direction */

Acked-by: Hemant Agrawal 


Re: [dpdk-dev] [PATCH 1/3] security: add option to configure UDP ports verification

2021-09-08 Thread Hemant Agrawal



On 9/8/2021 1:55 PM, Tejasree Kondoj wrote:

Add option to indicate whether UDP encapsulation ports
verification need to be done as part of inbound
IPsec processing.

Signed-off-by: Tejasree Kondoj 


Acked-by: Hemant Agrawal 



---
  doc/guides/rel_notes/release_21_11.rst | 5 +
  lib/security/rte_security.h| 7 +++
  2 files changed, 12 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index b0606cb542..afeba0105b 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -141,6 +141,11 @@ ABI Changes
* Added SA option to indicate whether outer header verification need to be
  done as part of inbound IPsec processing.
  
+* security: add IPsec SA option to configure UDP ports verification

+
+  * Added SA option to indicate whether UDP ports verification need to be
+done as part of inbound IPsec processing.
+
  
  Known Issues

  
diff --git a/lib/security/rte_security.h b/lib/security/rte_security.h
index 2a61cad885..18b0f02c44 100644
--- a/lib/security/rte_security.h
+++ b/lib/security/rte_security.h
@@ -139,6 +139,13 @@ struct rte_security_ipsec_sa_options {
 */
uint32_t udp_encap : 1;
  
+	/** Verify UDP encapsulation ports in inbound

+*
+* * 1: Match UDP source and destination ports
+* * 0: Do not match UDP ports
+*/
+   uint32_t udp_ports_verify : 1;
+
/** Copy DSCP bits
 *
 * * 1: Copy IPv4 or IPv6 DSCP bits from inner IP header to


Re: [dpdk-dev] [PATCH v3] eventdev: update crypto adapter metadata structures

2021-09-08 Thread Shijith Thotton
>>
>> >> In crypto adapter metadata, reserved bytes in request info structure
>> >> is a space holder for response info. It enforces an order of
>> >> operation if the structures are updated using memcpy to avoid
>> >> overwriting response info. It is logical to move the reserved space
>> >> out of request info. It also solves the ordering issue mentioned before.
>> >I would like to understand what kind of ordering issue you have faced
>> >with the current approach. Could you please give an example/sequence
>> and explain?
>> >
>>
>> I have seen this issue with crypto adapter autotest (#n215).
>>
>> Example:
>> rte_memcpy(&m_data.response_info, &response_info,
>> sizeof(response_info)); rte_memcpy(&m_data.request_info,
>> &request_info, sizeof(request_info));
>>
>> Here response info is getting overwritten by request info.
>> Above lines can reordered to fix the issue, but can be ignored with this 
>> patch.
>There is a reason for designing the metadata in this way.
>Right now, sizeof (union rte_event_crypto_metadata) is 16 bytes.
>So, the session based case needs just 16 bytes to store the data.
>Whereas, for sessionless case each crypto_ops requires another 16 bytes.
>
>By changing the struct in the following way you are doubling the memory
>requirement.
>With the changes, for sessionless case, each crypto op requires 32 bytes of 
>space
>instead of 16 bytes and the mempool will be bigger.
>This will have the perf impact too!
>
>You can just copy the individual members(cdev_id & queue_pair_id) after the
>response_info.
>OR You have a better way?
>
 
I missed the second word of event structure. It adds an extra 8 bytes, which is 
not required.
Let me know, what you think of below change to metadata structure.

struct rte_event_crypto_metadata {
uint64_t response_info; // 8 bytes
struct rte_event_crypto_request request_info; // 8 bytes
};

Total structure size is 16 bytes.

Response info can be set like below in test app:
m_data.response_info = response_info.event;

The main aim of this patch is to decouple response info from request info.

>>
>> >>
>> >> This patch removes the reserve field from request info and makes
>> >> event crypto metadata type to structure from union to make space for
>> >> response info.
>> >>
>> >> App and drivers are updated as per metadata change.
>> >>
>> >> Signed-off-by: Shijith Thotton 
>> >> Acked-by: Anoob Joseph 
>> >> ---
>> >> v3:
>> >> * Updated ABI section of release notes.
>> >>
>> >> v2:
>> >> * Updated deprecation notice.
>> >>
>> >> v1:
>> >> * Rebased.
>> >>
>> >>  app/test/test_event_crypto_adapter.c  | 14 +++---
>> >>  doc/guides/rel_notes/deprecation.rst  |  6 --
>> >>  doc/guides/rel_notes/release_21_11.rst|  2 ++
>> >>  drivers/crypto/octeontx/otx_cryptodev_ops.c   |  8 
>> >>  drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  4 ++--
>> >>  .../event/octeontx2/otx2_evdev_crypto_adptr_tx.h  |  4 ++--
>> >>  lib/eventdev/rte_event_crypto_adapter.c   |  8 
>> >>  lib/eventdev/rte_event_crypto_adapter.h   | 15 +--
>> >>  8 files changed, 26 insertions(+), 35 deletions(-)
>> >>
>> >> diff --git a/app/test/test_event_crypto_adapter.c
>> >> b/app/test/test_event_crypto_adapter.c
>> >> index 3ad20921e2..0d73694d3a 100644
>> >> --- a/app/test/test_event_crypto_adapter.c
>> >> +++ b/app/test/test_event_crypto_adapter.c
>> >> @@ -168,7 +168,7 @@ test_op_forward_mode(uint8_t session_less)  {
>> >>   struct rte_crypto_sym_xform cipher_xform;
>> >>   struct rte_cryptodev_sym_session *sess;
>> >> - union rte_event_crypto_metadata m_data;
>> >> + struct rte_event_crypto_metadata m_data;
>> >>   struct rte_crypto_sym_op *sym_op;
>> >>   struct rte_crypto_op *op;
>> >>   struct rte_mbuf *m;
>> >> @@ -368,7 +368,7 @@ test_op_new_mode(uint8_t session_less)  {
>> >>   struct rte_crypto_sym_xform cipher_xform;
>> >>   struct rte_cryptodev_sym_session *sess;
>> >> - union rte_event_crypto_metadata m_data;
>> >> + struct rte_event_crypto_metadata m_data;
>> >>   struct rte_crypto_sym_op *sym_op;
>> >>   struct rte_crypto_op *op;
>> >>   struct rte_mbuf *m;
>> >> @@ -406,7 +406,7 @@ test_op_new_mode(uint8_t session_less)
>> >>   if (cap &
>> >> RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA) {
>> >>   /* Fill in private user data information */
>> >>   rte_memcpy(&m_data.response_info,
>> &response_info,
>> >> -sizeof(m_data));
>> >> +sizeof(response_info));
>> >>   rte_cryptodev_sym_session_set_user_data(sess,
>> >>   &m_data, sizeof(m_data));
>> >>   }
>> >> @@ -426,7 +426,7 @@ test_op_new_mode(uint8_t session_less)
>> >>   op->private_data_offset = len;
>> >>   /* Fill in private data information */
>> >>   rte_memcpy(&m_data.response_info, &response_info,
>> >> -   

Re: [dpdk-dev] [PATCH v3] eventdev: update crypto adapter metadata structures

2021-09-08 Thread Gujjar, Abhinandan S
Hi Shijith,

> -Original Message-
> From: Shijith Thotton 
> Sent: Wednesday, September 8, 2021 1:13 PM
> To: Gujjar, Abhinandan S ; dev@dpdk.org
> Cc: Jerin Jacob Kollanukkaran ; Anoob Joseph
> ; Pavan Nikhilesh Bhagavatula
> ; Akhil Goyal ; Ray
> Kinsella ; Ankur Dwivedi 
> Subject: RE: [PATCH v3] eventdev: update crypto adapter metadata
> structures
> 
> >>
> >> >> In crypto adapter metadata, reserved bytes in request info
> >> >> structure is a space holder for response info. It enforces an
> >> >> order of operation if the structures are updated using memcpy to
> >> >> avoid overwriting response info. It is logical to move the
> >> >> reserved space out of request info. It also solves the ordering issue
> mentioned before.
> >> >I would like to understand what kind of ordering issue you have
> >> >faced with the current approach. Could you please give an
> >> >example/sequence
> >> and explain?
> >> >
> >>
> >> I have seen this issue with crypto adapter autotest (#n215).
> >>
> >> Example:
> >> rte_memcpy(&m_data.response_info, &response_info,
> >> sizeof(response_info)); rte_memcpy(&m_data.request_info,
> >> &request_info, sizeof(request_info));
> >>
> >> Here response info is getting overwritten by request info.
> >> Above lines can reordered to fix the issue, but can be ignored with this
> patch.
> >There is a reason for designing the metadata in this way.
> >Right now, sizeof (union rte_event_crypto_metadata) is 16 bytes.
> >So, the session based case needs just 16 bytes to store the data.
> >Whereas, for sessionless case each crypto_ops requires another 16 bytes.
> >
> >By changing the struct in the following way you are doubling the memory
> >requirement.
> >With the changes, for sessionless case, each crypto op requires 32
> >bytes of space instead of 16 bytes and the mempool will be bigger.
> >This will have the perf impact too!
> >
> >You can just copy the individual members(cdev_id & queue_pair_id) after
> >the response_info.
> >OR You have a better way?
> >
> 
> I missed the second word of event structure. It adds an extra 8 bytes, which
> is not required.
I guess you meant not adding, it is overlapping with request information.
> Let me know, what you think of below change to metadata structure.
> 
> struct rte_event_crypto_metadata {
>   uint64_t response_info; // 8 bytes
With this, it lags clarity saying it is an event information.
>   struct rte_event_crypto_request request_info; // 8 bytes };
> 
> Total structure size is 16 bytes.
> 
> Response info can be set like below in test app:
>   m_data.response_info = response_info.event;
> 
> The main aim of this patch is to decouple response info from request info.
The decoupling was required as it was doing memcpy.
If you copy the individual members of request info(after response), you don't 
require it.
> 
> >>
> >> >>
> >> >> This patch removes the reserve field from request info and makes
> >> >> event crypto metadata type to structure from union to make space
> >> >> for response info.
> >> >>
> >> >> App and drivers are updated as per metadata change.
> >> >>
> >> >> Signed-off-by: Shijith Thotton 
> >> >> Acked-by: Anoob Joseph 
> >> >> ---
> >> >> v3:
> >> >> * Updated ABI section of release notes.
> >> >>
> >> >> v2:
> >> >> * Updated deprecation notice.
> >> >>
> >> >> v1:
> >> >> * Rebased.
> >> >>
> >> >>  app/test/test_event_crypto_adapter.c  | 14 +++---
> >> >>  doc/guides/rel_notes/deprecation.rst  |  6 --
> >> >>  doc/guides/rel_notes/release_21_11.rst|  2 ++
> >> >>  drivers/crypto/octeontx/otx_cryptodev_ops.c   |  8 
> >> >>  drivers/crypto/octeontx2/otx2_cryptodev_ops.c |  4 ++--
> >> >>  .../event/octeontx2/otx2_evdev_crypto_adptr_tx.h  |  4 ++--
> >> >>  lib/eventdev/rte_event_crypto_adapter.c   |  8 
> >> >>  lib/eventdev/rte_event_crypto_adapter.h   | 15 +--
> >> >>  8 files changed, 26 insertions(+), 35 deletions(-)
> >> >>
> >> >> diff --git a/app/test/test_event_crypto_adapter.c
> >> >> b/app/test/test_event_crypto_adapter.c
> >> >> index 3ad20921e2..0d73694d3a 100644
> >> >> --- a/app/test/test_event_crypto_adapter.c
> >> >> +++ b/app/test/test_event_crypto_adapter.c
> >> >> @@ -168,7 +168,7 @@ test_op_forward_mode(uint8_t session_less)
> {
> >> >> struct rte_crypto_sym_xform cipher_xform;
> >> >> struct rte_cryptodev_sym_session *sess;
> >> >> -   union rte_event_crypto_metadata m_data;
> >> >> +   struct rte_event_crypto_metadata m_data;
> >> >> struct rte_crypto_sym_op *sym_op;
> >> >> struct rte_crypto_op *op;
> >> >> struct rte_mbuf *m;
> >> >> @@ -368,7 +368,7 @@ test_op_new_mode(uint8_t session_less)  {
> >> >> struct rte_crypto_sym_xform cipher_xform;
> >> >> struct rte_cryptodev_sym_session *sess;
> >> >> -   union rte_event_crypto_metadata m_data;
> >> >> +   struct rte_event_crypto_metadata m_data;
> >> >> s

Re: [dpdk-dev] [EXT] Re: [PATCH v4 1/4] test/crypto: remove illegal header include

2021-09-08 Thread Akhil Goyal
> 
> On 9/8/2021 12:52 AM, Akhil Goyal wrote:
> > rte_cryptodev_pmd.h is an interface between
> > driver and library and it is mentioned in the
> > file that application cannot use it directly.
> > Hence, removing the include.
> >
> > Signed-off-by: Akhil Goyal 
> > Acked-by: Matan Azrad 
> 
> Series-
> 
> Acked-by: Hemant Agrawal 

Series Applied to dpdk-next-crypto




Re: [dpdk-dev] [PATCH v3 1/3] eventdev: add rx queue info get api

2021-09-08 Thread Kundapura, Ganapati
Hi Jerin,

> -Original Message-
> From: Jerin Jacob 
> Sent: 07 September 2021 15:07
> To: Kundapura, Ganapati 
> Cc: Jayatheerthan, Jay ; dpdk-dev
> ; Pavan Nikhilesh 
> Subject: Re: [PATCH v3 1/3] eventdev: add rx queue info get api
> 
> On Tue, Sep 7, 2021 at 2:20 PM Kundapura, Ganapati
>  wrote:
> >
> >
> >
> > > -Original Message-
> > > From: Jerin Jacob 
> > > Sent: 07 September 2021 13:42
> > > To: Kundapura, Ganapati 
> > > Cc: Jayatheerthan, Jay ; dpdk-dev
> > > ; Pavan Nikhilesh 
> > > Subject: Re: [PATCH v3 1/3] eventdev: add rx queue info get api
> > >
> > >  in
> > >
> > > On Tue, Sep 7, 2021 at 12:15 PM Ganapati Kundapura
> > >  wrote:
> > > >
> > > > Added rte_event_eth_rx_adapter_queue_info_get() API to get rx
> > > > queue information - event queue identifier, flags for handling
> > > > received packets, schedular type, event priority, polling
> > > > frequency of the receive queue and flow identifier in
> > > > rte_event_eth_rx_adapter_queue_info structure
> > > >
> > > > Signed-off-by: Ganapati Kundapura 
> > > >
> > > > ---
> > > > v3:
> > > > * Split single patch into implementaion, test and document updation
> > > >   patches separately
> > >
> > > > +struct rte_event_eth_rx_adapter_queue_info;
> > > > +
> > > > +/**
> > > > + * Retrieve information about Rx queue. This callback is invoked
> > > > +if
> > > > + * the caps returned from the eventdev_eth_rx_adapter_caps_get(,
> > > > +eth_port_id)
> > > > + * has RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT set.
> > >
> > > It will useful for !RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT
> case
> > > too.
> > >
> 
> 
> 
> Missed this comment in v4
Sorry I missed these comments

rte_event_eth_rx_adapter_queue_info_get() calls PMD callback if internal port 
cap is set,
otherwise it implements to return the queue_info.
PMD callback is for internal port and queue_info_get() api implements for non 
internal port

> > > > diff --git a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > index 182dd2e..75c0010 100644
> > > > --- a/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > +++ b/lib/eventdev/rte_event_eth_rx_adapter.h
> > > > @@ -33,6 +33,7 @@
> > > >   *  - rte_event_eth_rx_adapter_stop()
> > > >   *  - rte_event_eth_rx_adapter_stats_get()
> > > >   *  - rte_event_eth_rx_adapter_stats_reset()
> > > > + *  - rte_event_eth_rx_adapter_queue_info_get()
> > > >   *
> > > >   * The application creates an ethernet to event adapter using
> > > >   * rte_event_eth_rx_adapter_create_ext() or
> > > > rte_event_eth_rx_adapter_create() @@ -140,6 +141,56 @@ typedef
> int
> > > (*rte_event_eth_rx_adapter_conf_cb) (uint8_t id, uint8_t dev_id,
> > > > void *arg);
> > > >
> > > >  /**
> > > > + * Rx queue info
> > > > + */
> > > > +struct rte_event_eth_rx_adapter_queue_info {
> > >
> > > Can we avoid the duplication of this structure and use
> > > rte_event_eth_rx_adapter_queue_conf instead.
> > >
Agree
> > > API can be rte_event_eth_rx_adapter_queue_conf_get() to align the
> > > structure.
Agree
> > >
> > > Also instead of every driver duplicating this code, How about
> > > - common code stores the config in
> > > rte_event_eth_rx_adapter_queue_add()
> > > - common code stores the config in
> > > rte_event_eth_rx_adapter_queue_conf_get()

queue_add() stores the config in dev_info and queue_conf_get() retrieves the 
config from dev_info.
Please clarify on common code to store and retrieve queue conf?

> > > - Addtional PMD level API can be given incase, something needs to
> > > overridden by Adapter.
> 
Existing PMD callbacks like queue_add, queue_del, adapter_start, adapter_stop 
etc
doesn't have any additional PMD level api.
queue_info_get PMD callback is also similar.

> 
> Missed addressing this comment in v4.


Re: [dpdk-dev] [PATCH v2] net/ixgbe: fix vf mac remains

2021-09-08 Thread Wang, Haiyue
> -Original Message-
> From: Qiming Chen 
> Sent: Monday, September 6, 2021 10:35
> To: dev@dpdk.org
> Cc: Wang, Haiyue ; Qiming Chen 
> ; sta...@dpdk.org
> Subject: [PATCH v2] net/ixgbe: fix vf mac remains
> 
> In the following two scenarios, the mac address residual problem
> will occur:
> 1) In the VF hard pass-through environment, after adding the mac
> address, the process restarts, and the previously added mac is
> still valid;
> 2) In the vf hard pass-through environment, after the mac address
> is issued, the port will start/stop, and the previously added mac
> is still valid;

How did you test it ?

>From the ixgbe PF, it will clean the MACs:

ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
ixgbe_set_vf_macvlan(adapter, vf, 0, NULL);

> 
> The patch clears the mac address at the start and stop of the vf
> port. After the start is cleared, the rte framework will restore
> the mac addition to solve the problem of residual mac addresses.
> 
> Fixes: af75078fece3 ("first public release")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Qiming Chen 
> ---
> v2:
>   Modify fixes commit
> ---
>  drivers/net/ixgbe/ixgbe_ethdev.c | 6 ++
>  1 file changed, 6 insertions(+)
> 
> diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c 
> b/drivers/net/ixgbe/ixgbe_ethdev.c
> index 6a91f104e1..e40350d86e 100644
> --- a/drivers/net/ixgbe/ixgbe_ethdev.c
> +++ b/drivers/net/ixgbe/ixgbe_ethdev.c
> @@ -5410,6 +5410,9 @@ ixgbevf_dev_start(struct rte_eth_dev *dev)
>*/
>   ixgbevf_dev_link_update(dev, 0);
> 
> + /* Clear the mac address, the rte frame will be restored */
> + ixgbevf_set_uc_addr_vf(hw, 0, NULL);
> +
>   hw->adapter_stopped = false;
> 
>   return 0;
> @@ -5454,6 +5457,9 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev)
>   intr_handle->intr_vec = NULL;
>   }
> 
> + /* Clear the mac address */
> + ixgbevf_set_uc_addr_vf(hw, 0, NULL);
> +
>   adapter->rss_reta_updated = 0;
> 
>   return 0;
> --
> 2.30.1.windows.1



[dpdk-dev] [PATCH 00/32] net/ngbe: add many features

2021-09-08 Thread Jiawen Wu
This patch adds a number of major features to complete ngbe PMD.

Jiawen Wu (32):
  net/ngbe: add packet type
  net/ngbe: support scattered Rx
  net/ngbe: support Rx checksum offload
  net/ngbe: support TSO
  net/ngbe: support CRC offload
  net/ngbe: support jumbo frame
  net/ngbe: support VLAN and QinQ offload
  net/ngbe: support basic statistics
  net/ngbe: support device xstats
  net/ngbe: support MTU set
  net/ngbe: add device promiscuous and allmulticast mode
  net/ngbe: support getting FW version
  net/ngbe: add loopback mode
  net/ngbe: support Rx interrupt
  net/ngbe: support MAC filters
  net/ngbe: support VLAN filter
  net/ngbe: support RSS hash
  net/ngbe: support SRIOV
  net/ngbe: add mailbox process operations
  net/ngbe: support flow control
  net/ngbe: support device LED on and off
  net/ngbe: support EEPROM dump
  net/ngbe: support register dump
  net/ngbe: support timesync
  net/ngbe: add Rx and Tx queue info get
  net/ngbe: add Rx and Tx descriptor status
  net/ngbe: add Tx done cleanup
  net/ngbe: add IPsec context creation
  net/ngbe: create and destroy security session
  net/ngbe: support security operations
  net/ngbe: add security offload in Rx and Tx
  doc: update for ngbe

 doc/guides/nics/features/ngbe.ini  |   33 +
 doc/guides/nics/ngbe.rst   |   16 +
 doc/guides/rel_notes/release_21_11.rst |   10 +
 drivers/net/ngbe/base/meson.build  |1 +
 drivers/net/ngbe/base/ngbe.h   |4 +
 drivers/net/ngbe/base/ngbe_dummy.h |  131 ++
 drivers/net/ngbe/base/ngbe_eeprom.c|  133 ++
 drivers/net/ngbe/base/ngbe_eeprom.h|   10 +
 drivers/net/ngbe/base/ngbe_hw.c|  912 ++-
 drivers/net/ngbe/base/ngbe_hw.h|   24 +
 drivers/net/ngbe/base/ngbe_mbx.c   |  327 
 drivers/net/ngbe/base/ngbe_mbx.h   |   89 +
 drivers/net/ngbe/base/ngbe_mng.c   |   85 +
 drivers/net/ngbe/base/ngbe_mng.h   |   18 +
 drivers/net/ngbe/base/ngbe_phy.c   |9 +
 drivers/net/ngbe/base/ngbe_phy.h   |3 +
 drivers/net/ngbe/base/ngbe_phy_mvl.c   |   57 +
 drivers/net/ngbe/base/ngbe_phy_mvl.h   |4 +
 drivers/net/ngbe/base/ngbe_phy_rtl.c   |   42 +
 drivers/net/ngbe/base/ngbe_phy_rtl.h   |3 +
 drivers/net/ngbe/base/ngbe_phy_yt.c|   44 +
 drivers/net/ngbe/base/ngbe_phy_yt.h|6 +
 drivers/net/ngbe/base/ngbe_type.h  |  226 +++
 drivers/net/ngbe/meson.build   |7 +
 drivers/net/ngbe/ngbe_ethdev.c | 2077 ++-
 drivers/net/ngbe/ngbe_ethdev.h |  199 +++
 drivers/net/ngbe/ngbe_ipsec.c  |  702 
 drivers/net/ngbe/ngbe_ipsec.h  |   95 ++
 drivers/net/ngbe/ngbe_pf.c |  760 +
 drivers/net/ngbe/ngbe_ptypes.c |  300 
 drivers/net/ngbe/ngbe_ptypes.h |  240 +++
 drivers/net/ngbe/ngbe_regs_group.h |   54 +
 drivers/net/ngbe/ngbe_rxtx.c   | 2083 +++-
 drivers/net/ngbe/ngbe_rxtx.h   |   84 +-
 drivers/net/ngbe/rte_pmd_ngbe.h|   39 +
 35 files changed, 8799 insertions(+), 28 deletions(-)
 create mode 100644 drivers/net/ngbe/base/ngbe_mbx.c
 create mode 100644 drivers/net/ngbe/base/ngbe_mbx.h
 create mode 100644 drivers/net/ngbe/ngbe_ipsec.c
 create mode 100644 drivers/net/ngbe/ngbe_ipsec.h
 create mode 100644 drivers/net/ngbe/ngbe_pf.c
 create mode 100644 drivers/net/ngbe/ngbe_ptypes.c
 create mode 100644 drivers/net/ngbe/ngbe_ptypes.h
 create mode 100644 drivers/net/ngbe/ngbe_regs_group.h
 create mode 100644 drivers/net/ngbe/rte_pmd_ngbe.h

-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 01/32] net/ngbe: add packet type

2021-09-08 Thread Jiawen Wu
Add packet type marco definition and convert ptype to ptid.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   1 +
 doc/guides/nics/ngbe.rst  |   1 +
 drivers/net/ngbe/meson.build  |   1 +
 drivers/net/ngbe/ngbe_ethdev.c|   9 +
 drivers/net/ngbe/ngbe_ethdev.h|   4 +
 drivers/net/ngbe/ngbe_ptypes.c| 300 ++
 drivers/net/ngbe/ngbe_ptypes.h| 240 
 drivers/net/ngbe/ngbe_rxtx.c  |  16 ++
 drivers/net/ngbe/ngbe_rxtx.h  |   2 +
 9 files changed, 574 insertions(+)
 create mode 100644 drivers/net/ngbe/ngbe_ptypes.c
 create mode 100644 drivers/net/ngbe/ngbe_ptypes.h

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 08d5f1b0dc..8b7588184a 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -8,6 +8,7 @@ Speed capabilities   = Y
 Link status  = Y
 Link status event= Y
 Queue start/stop = Y
+Packet type parsing  = Y
 Multiprocess aware   = Y
 Linux= Y
 ARMv8= Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 3ba3bb755f..d044397cd5 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -11,6 +11,7 @@ for Wangxun 1 Gigabit Ethernet NICs.
 Features
 
 
+- Packet type information
 - Link state information
 
 
diff --git a/drivers/net/ngbe/meson.build b/drivers/net/ngbe/meson.build
index 815ef4da23..05f94fe7d6 100644
--- a/drivers/net/ngbe/meson.build
+++ b/drivers/net/ngbe/meson.build
@@ -12,6 +12,7 @@ objs = [base_objs]
 
 sources = files(
 'ngbe_ethdev.c',
+'ngbe_ptypes.c',
 'ngbe_rxtx.c',
 )
 
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3b5c6615ad..4388d93560 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -667,6 +667,15 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
return 0;
 }
 
+const uint32_t *
+ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+   if (dev->rx_pkt_burst == ngbe_recv_pkts)
+   return ngbe_get_supported_ptypes();
+
+   return NULL;
+}
+
 /* return 0 means link status changed, -1 means not changed */
 int
 ngbe_dev_link_update_share(struct rte_eth_dev *dev,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 7fb72f3f1f..486c6c3839 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -6,6 +6,8 @@
 #ifndef _NGBE_ETHDEV_H_
 #define _NGBE_ETHDEV_H_
 
+#include "ngbe_ptypes.h"
+
 /* need update link, bit flag */
 #define NGBE_FLAG_NEED_LINK_UPDATE  ((uint32_t)(1 << 0))
 #define NGBE_FLAG_MAILBOX   ((uint32_t)(1 << 1))
@@ -131,4 +133,6 @@ ngbe_dev_link_update_share(struct rte_eth_dev *dev,
 #define NGBE_DEFAULT_TX_HTHRESH  0
 #define NGBE_DEFAULT_TX_WTHRESH  0
 
+const uint32_t *ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
+
 #endif /* _NGBE_ETHDEV_H_ */
diff --git a/drivers/net/ngbe/ngbe_ptypes.c b/drivers/net/ngbe/ngbe_ptypes.c
new file mode 100644
index 00..d6d82105c9
--- /dev/null
+++ b/drivers/net/ngbe/ngbe_ptypes.c
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
+ */
+
+#include 
+#include 
+
+#include "base/ngbe_type.h"
+#include "ngbe_ptypes.h"
+
+/* The ngbe_ptype_lookup is used to convert from the 8-bit ptid in the
+ * hardware to a bit-field that can be used by SW to more easily determine the
+ * packet type.
+ *
+ * Macros are used to shorten the table lines and make this table human
+ * readable.
+ *
+ * We store the PTYPE in the top byte of the bit field - this is just so that
+ * we can check that the table doesn't have a row missing, as the index into
+ * the table should be the PTYPE.
+ */
+#define TPTE(ptid, l2, l3, l4, tun, el2, el3, el4) \
+   [ptid] = (RTE_PTYPE_L2_##l2 | \
+   RTE_PTYPE_L3_##l3 | \
+   RTE_PTYPE_L4_##l4 | \
+   RTE_PTYPE_TUNNEL_##tun | \
+   RTE_PTYPE_INNER_L2_##el2 | \
+   RTE_PTYPE_INNER_L3_##el3 | \
+   RTE_PTYPE_INNER_L4_##el4)
+
+#define RTE_PTYPE_L2_NONE   0
+#define RTE_PTYPE_L3_NONE   0
+#define RTE_PTYPE_L4_NONE   0
+#define RTE_PTYPE_TUNNEL_NONE   0
+#define RTE_PTYPE_INNER_L2_NONE 0
+#define RTE_PTYPE_INNER_L3_NONE 0
+#define RTE_PTYPE_INNER_L4_NONE 0
+
+static u32 ngbe_ptype_lookup[NGBE_PTID_MAX] __rte_cache_aligned = {
+   /* L2:0-3 L3:4-7 L4:8-11 TUN:12-15 EL2:16-19 EL3:20-23 EL2:24-27 */
+   /* L2: ETH */
+   TPTE(0x10, ETHER,  NONE, NONE, NONE, NONE, NONE, NONE),
+   TPTE(0x11, ETHER,  NONE, NONE, NONE, NONE, NONE, NONE),
+   TPTE(0x12, ETHER_TIMESYNC, NONE, NONE, NONE, NONE, NONE, NONE),
+   TPTE(0x13, ETHER_FIP,  NONE, NONE, NONE, NONE, NONE, NONE),
+ 

[dpdk-dev] [PATCH 03/32] net/ngbe: support Rx checksum offload

2021-09-08 Thread Jiawen Wu
Support IP/L4 checksum on Rx, and convert it to mbuf flags.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  2 +
 doc/guides/nics/ngbe.rst  |  1 +
 drivers/net/ngbe/ngbe_rxtx.c  | 75 +--
 3 files changed, 75 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index f85754eb7a..2777ed5a62 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -9,6 +9,8 @@ Link status  = Y
 Link status event= Y
 Queue start/stop = Y
 Scattered Rx = Y
+L3 checksum offload  = P
+L4 checksum offload  = P
 Packet type parsing  = Y
 Multiprocess aware   = Y
 Linux= Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 463452ce8c..0a14252ff2 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -12,6 +12,7 @@ Features
 
 
 - Packet type information
+- Checksum offload
 - Link state information
 - Scattered for RX
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 49fa978853..1661ecafa5 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -263,6 +263,31 @@ ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t 
ptid_mask)
return ngbe_decode_ptype(ptid);
 }
 
+static inline uint64_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+   uint64_t pkt_flags = 0;
+
+   /* checksum offload can't be disabled */
+   if (rx_status & NGBE_RXD_STAT_IPCS) {
+   pkt_flags |= (rx_status & NGBE_RXD_ERR_IPCS
+   ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
+   }
+
+   if (rx_status & NGBE_RXD_STAT_L4CS) {
+   pkt_flags |= (rx_status & NGBE_RXD_ERR_L4CS
+   ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
+   }
+
+   if (rx_status & NGBE_RXD_STAT_EIPCS &&
+   rx_status & NGBE_RXD_ERR_EIPCS) {
+   pkt_flags |= PKT_RX_OUTER_IP_CKSUM_BAD;
+   }
+
+
+   return pkt_flags;
+}
+
 /*
  * LOOK_AHEAD defines how many desc statuses to check beyond the
  * current descriptor.
@@ -281,6 +306,7 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
struct ngbe_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
+   uint64_t pkt_flags;
int nb_dd;
uint32_t s[LOOK_AHEAD];
uint32_t pkt_info[LOOK_AHEAD];
@@ -325,6 +351,9 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
 
+   /* convert descriptor fields to rte mbuf flags */
+   pkt_flags = rx_desc_error_to_pkt_flags(s[j]);
+   mb->ol_flags = pkt_flags;
mb->packet_type =
ngbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
rxq->pkt_type_mask);
@@ -519,6 +548,7 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t rx_id;
uint16_t nb_rx;
uint16_t nb_hold;
+   uint64_t pkt_flags;
 
nb_rx = 0;
nb_hold = 0;
@@ -611,11 +641,14 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 
/*
 * Initialize the returned mbuf.
-* setup generic mbuf fields:
+* 1) setup generic mbuf fields:
 *- number of segments,
 *- next segment,
 *- packet length,
 *- Rx port identifier.
+* 2) integrate hardware offload data, if any:
+*- IP checksum flag,
+*- error flags.
 */
pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len));
rxm->data_off = RTE_PKTMBUF_HEADROOM;
@@ -627,6 +660,8 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rxm->port = rxq->port_id;
 
pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
+   pkt_flags = rx_desc_error_to_pkt_flags(staterr);
+   rxm->ol_flags = pkt_flags;
rxm->packet_type = ngbe_rxd_pkt_info_to_pkt_type(pkt_info,
   rxq->pkt_type_mask);
 
@@ -663,16 +698,30 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
return nb_rx;
 }
 
+/**
+ * ngbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
+ *
+ * Fill the following info in the HEAD buffer of the Rx cluster:
+ *- RX port identifier
+ *- hardware offload data, if any:
+ *  - IP checksum flag
+ *  - error flags
+ * @head HEAD of the packet cluster
+ * @desc HW descriptor to get data from
+ * @rxq Pointer to the Rx queue
+ */
 static inline void
 ngbe_fill_cluster_head_buf(struct rte_mbuf *head, struct ngbe_rx_desc *desc,
struct ngbe_rx_queue *rxq

[dpdk-dev] [PATCH 04/32] net/ngbe: support TSO

2021-09-08 Thread Jiawen Wu
Add transmit datapath with offloads, and support TCP segmentation
offload.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   3 +
 doc/guides/nics/ngbe.rst  |   3 +-
 drivers/net/ngbe/ngbe_ethdev.c|  19 +-
 drivers/net/ngbe/ngbe_ethdev.h|   6 +
 drivers/net/ngbe/ngbe_rxtx.c  | 678 ++
 drivers/net/ngbe/ngbe_rxtx.h  |  58 +++
 6 files changed, 765 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 2777ed5a62..32f74a3084 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -9,8 +9,11 @@ Link status  = Y
 Link status event= Y
 Queue start/stop = Y
 Scattered Rx = Y
+TSO  = Y
 L3 checksum offload  = P
 L4 checksum offload  = P
+Inner L3 checksum= P
+Inner L4 checksum= P
 Packet type parsing  = Y
 Multiprocess aware   = Y
 Linux= Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 0a14252ff2..6a6ae39243 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -13,8 +13,9 @@ Features
 
 - Packet type information
 - Checksum offload
+- TSO offload
 - Link state information
-- Scattered for RX
+- Scattered and gather for TX and RX
 
 
 Prerequisites
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index fba0a2dcfd..e7d63f1b14 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -138,7 +138,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
 
eth_dev->dev_ops = &ngbe_eth_dev_ops;
eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
-   eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
+   eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
+   eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
 
/*
 * For secondary processes, we don't initialise any further as primary
@@ -146,6 +147,20 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
 * Rx and Tx function.
 */
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+   struct ngbe_tx_queue *txq;
+   /* Tx queue function in primary, set by last queue initialized
+* Tx queue may not initialized by primary process
+*/
+   if (eth_dev->data->tx_queues) {
+   uint16_t nb_tx_queues = eth_dev->data->nb_tx_queues;
+   txq = eth_dev->data->tx_queues[nb_tx_queues - 1];
+   ngbe_set_tx_function(eth_dev, txq);
+   } else {
+   /* Use default Tx function if we get here */
+   PMD_INIT_LOG(NOTICE,
+   "No Tx queues configured yet. Using default Tx 
function.");
+   }
+
ngbe_set_rx_function(eth_dev);
 
return 0;
@@ -641,6 +656,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->max_rx_pktlen = 15872;
dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
 dev_info->rx_queue_offload_capa);
+   dev_info->tx_queue_offload_capa = 0;
+   dev_info->tx_offload_capa = ngbe_get_tx_port_offloads(dev);
 
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index e7fe9a03b7..cbf3ab558f 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -114,9 +114,15 @@ uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
 uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
 
+uint16_t ngbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+   uint16_t nb_pkts);
+
 uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
 
+uint16_t ngbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+   uint16_t nb_pkts);
+
 void ngbe_set_ivar_map(struct ngbe_hw *hw, int8_t direction,
   uint8_t queue, uint8_t msix_vector);
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 1661ecafa5..21f5808787 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -9,11 +9,24 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "ngbe_logs.h"
 #include "base/ngbe.h"
 #include "ngbe_ethdev.h"
 #include "ngbe_rxtx.h"
+/* Bit Mask to indicate what bits required for building Tx context */
+static const u64 NGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
+   PKT_TX_OUTER_IPV6 |
+   PKT_TX_OUTER_IPV4 |
+   PKT_TX_IPV6 |
+   PKT_TX_IPV4 |
+   PKT_TX_L4_MASK |
+   PKT_TX_TCP_SEG |
+   PKT_TX_TUNNEL_MASK |
+  

[dpdk-dev] [PATCH 05/32] net/ngbe: support CRC offload

2021-09-08 Thread Jiawen Wu
Support to strip or keep CRC in Rx path.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  1 +
 drivers/net/ngbe/ngbe_rxtx.c  | 53 +--
 drivers/net/ngbe/ngbe_rxtx.h  |  1 +
 3 files changed, 53 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 32f74a3084..2a472d9434 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -10,6 +10,7 @@ Link status event= Y
 Queue start/stop = Y
 Scattered Rx = Y
 TSO  = Y
+CRC offload  = P
 L3 checksum offload  = P
 L4 checksum offload  = P
 Inner L3 checksum= P
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 21f5808787..f9d8cf9d19 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -968,7 +968,8 @@ ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
/* Translate descriptor info to mbuf format */
for (j = 0; j < nb_dd; ++j) {
mb = rxep[j].mbuf;
-   pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len);
+   pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
+ rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
 
@@ -1271,7 +1272,8 @@ ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 *- IP checksum flag,
 *- error flags.
 */
-   pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len));
+   pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
+ rxq->crc_len);
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
rxm->nb_segs = 1;
@@ -1521,6 +1523,22 @@ ngbe_recv_pkts_sc(void *rx_queue, struct rte_mbuf 
**rx_pkts, uint16_t nb_pkts,
/* Initialize the first mbuf of the returned packet */
ngbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
 
+   /* Deal with the case, when HW CRC srip is disabled. */
+   first_seg->pkt_len -= rxq->crc_len;
+   if (unlikely(rxm->data_len <= rxq->crc_len)) {
+   struct rte_mbuf *lp;
+
+   for (lp = first_seg; lp->next != rxm; lp = lp->next)
+   ;
+
+   first_seg->nb_segs--;
+   lp->data_len -= rxq->crc_len - rxm->data_len;
+   lp->next = NULL;
+   rte_pktmbuf_free_seg(rxm);
+   } else {
+   rxm->data_len -= rxq->crc_len;
+   }
+
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch((char *)first_seg->buf_addr +
first_seg->data_off);
@@ -1989,6 +2007,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev 
__rte_unused)
offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
   DEV_RX_OFFLOAD_UDP_CKSUM   |
   DEV_RX_OFFLOAD_TCP_CKSUM   |
+  DEV_RX_OFFLOAD_KEEP_CRC|
   DEV_RX_OFFLOAD_SCATTER;
 
return offloads;
@@ -2032,6 +2051,10 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
rxq->queue_id = queue_idx;
rxq->reg_idx = queue_idx;
rxq->port_id = dev->data->port_id;
+   if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+   rxq->crc_len = RTE_ETHER_CRC_LEN;
+   else
+   rxq->crc_len = 0;
rxq->drop_en = rx_conf->rx_drop_en;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = offloads;
@@ -2259,6 +2282,7 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
uint32_t fctrl;
uint32_t hlreg0;
uint32_t srrctl;
+   uint32_t rdrxctl;
uint32_t rxcsum;
uint16_t buf_size;
uint16_t i;
@@ -2279,7 +2303,14 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
fctrl |= NGBE_PSRCTL_BCA;
wr32(hw, NGBE_PSRCTL, fctrl);
 
+   /*
+* Configure CRC stripping, if any.
+*/
hlreg0 = rd32(hw, NGBE_SECRXCTL);
+   if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+   hlreg0 &= ~NGBE_SECRXCTL_CRCSTRIP;
+   else
+   hlreg0 |= NGBE_SECRXCTL_CRCSTRIP;
hlreg0 &= ~NGBE_SECRXCTL_XDSA;
wr32(hw, NGBE_SECRXCTL, hlreg0);
 
@@ -2290,6 +2321,15 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
 
+   /*
+* Reset crc_len in case it was changed after queue setup by a
+* call to configure.
+*/
+   if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CR

[dpdk-dev] [PATCH 06/32] net/ngbe: support jumbo frame

2021-09-08 Thread Jiawen Wu
Add to support Rx jumbo frames.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  1 +
 doc/guides/nics/ngbe.rst  |  1 +
 drivers/net/ngbe/ngbe_rxtx.c  | 11 ++-
 3 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 2a472d9434..30fdfe62c7 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -8,6 +8,7 @@ Speed capabilities   = Y
 Link status  = Y
 Link status event= Y
 Queue start/stop = Y
+Jumbo frame  = Y
 Scattered Rx = Y
 TSO  = Y
 CRC offload  = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 6a6ae39243..702a455041 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -14,6 +14,7 @@ Features
 - Packet type information
 - Checksum offload
 - TSO offload
+- Jumbo frames
 - Link state information
 - Scattered and gather for TX and RX
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index f9d8cf9d19..4238fbe3b8 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -2008,6 +2008,7 @@ ngbe_get_rx_port_offloads(struct rte_eth_dev *dev 
__rte_unused)
   DEV_RX_OFFLOAD_UDP_CKSUM   |
   DEV_RX_OFFLOAD_TCP_CKSUM   |
   DEV_RX_OFFLOAD_KEEP_CRC|
+  DEV_RX_OFFLOAD_JUMBO_FRAME |
   DEV_RX_OFFLOAD_SCATTER;
 
return offloads;
@@ -2314,8 +2315,16 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
hlreg0 &= ~NGBE_SECRXCTL_XDSA;
wr32(hw, NGBE_SECRXCTL, hlreg0);
 
-   wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+   /*
+* Configure jumbo frame support, if any.
+*/
+   if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+   wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+   NGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
+   } else {
+   wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));
+   }
 
/* Setup Rx queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 02/32] net/ngbe: support scattered Rx

2021-09-08 Thread Jiawen Wu
Add scattered Rx function to support receiving segmented mbufs.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   1 +
 doc/guides/nics/ngbe.rst  |   1 +
 drivers/net/ngbe/ngbe_ethdev.c|  20 +-
 drivers/net/ngbe/ngbe_ethdev.h|   8 +
 drivers/net/ngbe/ngbe_rxtx.c  | 541 ++
 drivers/net/ngbe/ngbe_rxtx.h  |   5 +
 6 files changed, 574 insertions(+), 2 deletions(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 8b7588184a..f85754eb7a 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -8,6 +8,7 @@ Speed capabilities   = Y
 Link status  = Y
 Link status event= Y
 Queue start/stop = Y
+Scattered Rx = Y
 Packet type parsing  = Y
 Multiprocess aware   = Y
 Linux= Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index d044397cd5..463452ce8c 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -13,6 +13,7 @@ Features
 
 - Packet type information
 - Link state information
+- Scattered for RX
 
 
 Prerequisites
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 4388d93560..fba0a2dcfd 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -140,8 +140,16 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
eth_dev->tx_pkt_burst = &ngbe_xmit_pkts_simple;
 
-   if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+   /*
+* For secondary processes, we don't initialise any further as primary
+* has already done this work. Only check we don't need a different
+* Rx and Tx function.
+*/
+   if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+   ngbe_set_rx_function(eth_dev);
+
return 0;
+   }
 
rte_eth_copy_pci_info(eth_dev, pci_dev);
 
@@ -528,6 +536,9 @@ ngbe_dev_stop(struct rte_eth_dev *dev)
 
ngbe_dev_clear_queues(dev);
 
+   /* Clear stored conf */
+   dev->data->scattered_rx = 0;
+
/* Clear recorded link status */
memset(&link, 0, sizeof(link));
rte_eth_linkstatus_set(dev, &link);
@@ -628,6 +639,8 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024;
dev_info->max_rx_pktlen = 15872;
+   dev_info->rx_offload_capa = (ngbe_get_rx_port_offloads(dev) |
+dev_info->rx_queue_offload_capa);
 
dev_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_thresh = {
@@ -670,7 +683,10 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 const uint32_t *
 ngbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 {
-   if (dev->rx_pkt_burst == ngbe_recv_pkts)
+   if (dev->rx_pkt_burst == ngbe_recv_pkts ||
+   dev->rx_pkt_burst == ngbe_recv_pkts_sc_single_alloc ||
+   dev->rx_pkt_burst == ngbe_recv_pkts_sc_bulk_alloc ||
+   dev->rx_pkt_burst == ngbe_recv_pkts_bulk_alloc)
return ngbe_get_supported_ptypes();
 
return NULL;
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 486c6c3839..e7fe9a03b7 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -106,6 +106,14 @@ int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);
 uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
 
+uint16_t ngbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
+   uint16_t nb_pkts);
+
+uint16_t ngbe_recv_pkts_sc_single_alloc(void *rx_queue,
+   struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+uint16_t ngbe_recv_pkts_sc_bulk_alloc(void *rx_queue,
+   struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
 uint16_t ngbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index a3ef0f7577..49fa978853 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -263,6 +263,243 @@ ngbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t 
ptid_mask)
return ngbe_decode_ptype(ptid);
 }
 
+/*
+ * LOOK_AHEAD defines how many desc statuses to check beyond the
+ * current descriptor.
+ * It must be a pound define for optimal performance.
+ * Do not change the value of LOOK_AHEAD, as the ngbe_rx_scan_hw_ring
+ * function only works with LOOK_AHEAD=8.
+ */
+#define LOOK_AHEAD 8
+#if (LOOK_AHEAD != 8)
+#error "PMD NGBE: LOOK_AHEAD must be 8\n"
+#endif
+static inline int
+ngbe_rx_scan_hw_ring(struct ngbe_rx_queue *rxq)
+{
+   volatile struct ngbe_rx_desc *rxdp;
+   

[dpdk-dev] [PATCH 07/32] net/ngbe: support VLAN and QinQ offload

2021-09-08 Thread Jiawen Wu
Support to set VLAN and QinQ offload.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   2 +
 doc/guides/nics/ngbe.rst  |   1 +
 drivers/net/ngbe/ngbe_ethdev.c| 273 ++
 drivers/net/ngbe/ngbe_ethdev.h|  42 +
 drivers/net/ngbe/ngbe_rxtx.c  | 119 -
 drivers/net/ngbe/ngbe_rxtx.h  |   3 +
 6 files changed, 434 insertions(+), 6 deletions(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 30fdfe62c7..4ae2d66d15 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -12,6 +12,8 @@ Jumbo frame  = Y
 Scattered Rx = Y
 TSO  = Y
 CRC offload  = P
+VLAN offload = P
+QinQ offload = P
 L3 checksum offload  = P
 L4 checksum offload  = P
 Inner L3 checksum= P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 702a455041..9518a59443 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -13,6 +13,7 @@ Features
 
 - Packet type information
 - Checksum offload
+- VLAN/QinQ stripping and inserting
 - TSO offload
 - Jumbo frames
 - Link state information
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index e7d63f1b14..3903eb0a2c 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -17,6 +17,9 @@
 static int ngbe_dev_close(struct rte_eth_dev *dev);
 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
+static void ngbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue);
+static void ngbe_vlan_hw_strip_disable(struct rte_eth_dev *dev,
+   uint16_t queue);
 
 static void ngbe_dev_link_status_print(struct rte_eth_dev *dev);
 static int ngbe_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
@@ -27,6 +30,24 @@ static void ngbe_dev_interrupt_handler(void *param);
 static void ngbe_dev_interrupt_delayed_handler(void *param);
 static void ngbe_configure_msix(struct rte_eth_dev *dev);
 
+#define NGBE_SET_HWSTRIP(h, q) do {\
+   uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+   uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+   (h)->bitmap[idx] |= 1 << bit;\
+   } while (0)
+
+#define NGBE_CLEAR_HWSTRIP(h, q) do {\
+   uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+   uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+   (h)->bitmap[idx] &= ~(1 << bit);\
+   } while (0)
+
+#define NGBE_GET_HWSTRIP(h, q, r) do {\
+   uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+   uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
+   (r) = (h)->bitmap[idx] >> bit & 1;\
+   } while (0)
+
 /*
  * The set of PCI devices this driver supports
  */
@@ -129,6 +150,8 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
 {
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
struct ngbe_hw *hw = ngbe_dev_hw(eth_dev);
+   struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(eth_dev);
+   struct ngbe_hwstrip *hwstrip = NGBE_DEV_HWSTRIP(eth_dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
const struct rte_memzone *mz;
uint32_t ctrl_ext;
@@ -242,6 +265,12 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
return -ENOMEM;
}
 
+   /* initialize the vfta */
+   memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+   /* initialize the hw strip bitmap*/
+   memset(hwstrip, 0, sizeof(*hwstrip));
+
ctrl_ext = rd32(hw, NGBE_PORTCTL);
/* let hardware know driver is loaded */
ctrl_ext |= NGBE_PORTCTL_DRVLOAD;
@@ -311,6 +340,237 @@ static struct rte_pci_driver rte_ngbe_pmd = {
.remove = eth_ngbe_pci_remove,
 };
 
+void
+ngbe_vlan_hw_filter_disable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t vlnctrl;
+
+   PMD_INIT_FUNC_TRACE();
+
+   /* Filter Table Disable */
+   vlnctrl = rd32(hw, NGBE_VLANCTL);
+   vlnctrl &= ~NGBE_VLANCTL_VFE;
+   wr32(hw, NGBE_VLANCTL, vlnctrl);
+}
+
+void
+ngbe_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   struct ngbe_vfta *shadow_vfta = NGBE_DEV_VFTA(dev);
+   uint32_t vlnctrl;
+   uint16_t i;
+
+   PMD_INIT_FUNC_TRACE();
+
+   /* Filter Table Enable */
+   vlnctrl = rd32(hw, NGBE_VLANCTL);
+   vlnctrl &= ~NGBE_VLANCTL_CFIENA;
+   vlnctrl |= NGBE_VLANCTL_VFE;
+   wr32(hw, NGBE_VLANCTL, vlnctrl);
+
+   /* write whatever is in local vfta copy */
+   for (i = 0; i < NGBE_VFTA_SIZE; i++)
+   wr32(hw, NGBE_VLANTBL(i), shadow_vfta->vfta[i]);
+}
+
+void
+ngbe_vlan_hw_strip_bitmap_set(struct 

[dpdk-dev] [PATCH 08/32] net/ngbe: support basic statistics

2021-09-08 Thread Jiawen Wu
Support to read and clear basic statistics, and configure per-queue
stats counter mapping.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini  |   2 +
 doc/guides/nics/ngbe.rst   |   1 +
 drivers/net/ngbe/base/ngbe_dummy.h |   5 +
 drivers/net/ngbe/base/ngbe_hw.c| 101 ++
 drivers/net/ngbe/base/ngbe_hw.h|   1 +
 drivers/net/ngbe/base/ngbe_type.h  | 134 +
 drivers/net/ngbe/ngbe_ethdev.c | 300 +
 drivers/net/ngbe/ngbe_ethdev.h |  19 ++
 8 files changed, 563 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 4ae2d66d15..f310fb102a 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -19,6 +19,8 @@ L4 checksum offload  = P
 Inner L3 checksum= P
 Inner L4 checksum= P
 Packet type parsing  = Y
+Basic stats  = Y
+Stats per queue  = Y
 Multiprocess aware   = Y
 Linux= Y
 ARMv8= Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 9518a59443..64c07e4741 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -15,6 +15,7 @@ Features
 - Checksum offload
 - VLAN/QinQ stripping and inserting
 - TSO offload
+- Port hardware statistics
 - Jumbo frames
 - Link state information
 - Scattered and gather for TX and RX
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 8863acef0d..0def116c53 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -55,6 +55,10 @@ static inline s32 ngbe_mac_stop_hw_dummy(struct ngbe_hw 
*TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_clear_hw_cntrs_dummy(struct ngbe_hw *TUP0)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_get_mac_addr_dummy(struct ngbe_hw *TUP0, u8 *TUP1)
 {
return NGBE_ERR_OPS_DUMMY;
@@ -178,6 +182,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.reset_hw = ngbe_mac_reset_hw_dummy;
hw->mac.start_hw = ngbe_mac_start_hw_dummy;
hw->mac.stop_hw = ngbe_mac_stop_hw_dummy;
+   hw->mac.clear_hw_cntrs = ngbe_mac_clear_hw_cntrs_dummy;
hw->mac.get_mac_addr = ngbe_mac_get_mac_addr_dummy;
hw->mac.enable_rx_dma = ngbe_mac_enable_rx_dma_dummy;
hw->mac.disable_sec_rx_path = ngbe_mac_disable_sec_rx_path_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 6b575fc67b..f302df5d9d 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -19,6 +19,9 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
 {
DEBUGFUNC("ngbe_start_hw");
 
+   /* Clear statistics registers */
+   hw->mac.clear_hw_cntrs(hw);
+
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
 
@@ -159,6 +162,7 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
msec_delay(50);
 
ngbe_reset_misc_em(hw);
+   hw->mac.clear_hw_cntrs(hw);
 
msec_delay(50);
 
@@ -175,6 +179,102 @@ s32 ngbe_reset_hw_em(struct ngbe_hw *hw)
return status;
 }
 
+/**
+ *  ngbe_clear_hw_cntrs - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ngbe_clear_hw_cntrs(struct ngbe_hw *hw)
+{
+   u16 i = 0;
+
+   DEBUGFUNC("ngbe_clear_hw_cntrs");
+
+   /* QP Stats */
+   /* don't write clear queue stats */
+   for (i = 0; i < NGBE_MAX_QP; i++) {
+   hw->qp_last[i].rx_qp_packets = 0;
+   hw->qp_last[i].tx_qp_packets = 0;
+   hw->qp_last[i].rx_qp_bytes = 0;
+   hw->qp_last[i].tx_qp_bytes = 0;
+   hw->qp_last[i].rx_qp_mc_packets = 0;
+   hw->qp_last[i].tx_qp_mc_packets = 0;
+   hw->qp_last[i].rx_qp_bc_packets = 0;
+   hw->qp_last[i].tx_qp_bc_packets = 0;
+   }
+
+   /* PB Stats */
+   rd32(hw, NGBE_PBRXLNKXON);
+   rd32(hw, NGBE_PBRXLNKXOFF);
+   rd32(hw, NGBE_PBTXLNKXON);
+   rd32(hw, NGBE_PBTXLNKXOFF);
+
+   /* DMA Stats */
+   rd32(hw, NGBE_DMARXPKT);
+   rd32(hw, NGBE_DMATXPKT);
+
+   rd64(hw, NGBE_DMARXOCTL);
+   rd64(hw, NGBE_DMATXOCTL);
+
+   /* MAC Stats */
+   rd64(hw, NGBE_MACRXERRCRCL);
+   rd64(hw, NGBE_MACRXMPKTL);
+   rd64(hw, NGBE_MACTXMPKTL);
+
+   rd64(hw, NGBE_MACRXPKTL);
+   rd64(hw, NGBE_MACTXPKTL);
+   rd64(hw, NGBE_MACRXGBOCTL);
+
+   rd64(hw, NGBE_MACRXOCTL);
+   rd32(hw, NGBE_MACTXOCTL);
+
+   rd64(hw, NGBE_MACRX1TO64L);
+   rd64(hw, NGBE_MACRX65TO127L);
+   rd64(hw, NGBE_MACRX128TO255L);
+   rd64(hw, NGBE_MACRX256TO511L);
+   rd64(hw, NGBE_MACRX512TO1023L);
+   rd64(hw, NGBE_MACRX1024TOMAXL);
+   rd64(hw, NGBE_MACTX1TO64L);
+   rd64(hw, NGBE_MACTX65TO127L);
+   rd64(hw, NGBE

[dpdk-dev] [PATCH 10/32] net/ngbe: support MTU set

2021-09-08 Thread Jiawen Wu
Support updating port MTU.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  1 +
 drivers/net/ngbe/base/ngbe_type.h |  3 +++
 drivers/net/ngbe/ngbe_ethdev.c| 41 +++
 3 files changed, 45 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 42101020dd..bdb06916e1 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -8,6 +8,7 @@ Speed capabilities   = Y
 Link status  = Y
 Link status event= Y
 Queue start/stop = Y
+MTU update   = Y
 Jumbo frame  = Y
 Scattered Rx = Y
 TSO  = Y
diff --git a/drivers/net/ngbe/base/ngbe_type.h 
b/drivers/net/ngbe/base/ngbe_type.h
index c13f0208fd..78fb0da7fa 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -8,6 +8,7 @@
 
 #define NGBE_LINK_UP_TIME  90 /* 9.0 Seconds */
 
+#define NGBE_FRAME_SIZE_MAX   (9728) /* Maximum frame size, +FCS */
 #define NGBE_FRAME_SIZE_DFT   (1522) /* Default frame size, +FCS */
 #define NGBE_MAX_QP   (8)
 
@@ -316,6 +317,8 @@ struct ngbe_hw {
u16 nb_rx_queues;
u16 nb_tx_queues;
 
+   u32 mode;
+
u32 q_rx_regs[8 * 4];
u32 q_tx_regs[8 * 4];
bool offset_loaded;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 45d7c48011..29f35d9e8d 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -1970,6 +1970,46 @@ ngbe_dev_interrupt_handler(void *param)
ngbe_dev_interrupt_action(dev);
 }
 
+static int
+ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   struct rte_eth_dev_info dev_info;
+   uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4;
+   struct rte_eth_dev_data *dev_data = dev->data;
+   int ret;
+
+   ret = ngbe_dev_info_get(dev, &dev_info);
+   if (ret != 0)
+   return ret;
+
+   /* check that mtu is within the allowed range */
+   if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
+   return -EINVAL;
+
+   /* If device is started, refuse mtu that requires the support of
+* scattered packets when this feature has not been enabled before.
+*/
+   if (dev_data->dev_started && !dev_data->scattered_rx &&
+   (frame_size + 2 * NGBE_VLAN_TAG_SIZE >
+dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+   PMD_INIT_LOG(ERR, "Stop port first.");
+   return -EINVAL;
+   }
+
+   /* update max frame size */
+   dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+   if (hw->mode)
+   wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+   NGBE_FRAME_SIZE_MAX);
+   else
+   wr32m(hw, NGBE_FRMSZ, NGBE_FRMSZ_MAX_MASK,
+   NGBE_FRMSZ_MAX(frame_size));
+
+   return 0;
+}
+
 /**
  * Set the IVAR registers, mapping interrupt causes to vectors
  * @param hw
@@ -2078,6 +2118,7 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
.xstats_get_names   = ngbe_dev_xstats_get_names,
.xstats_get_names_by_id = ngbe_dev_xstats_get_names_by_id,
.queue_stats_mapping_set= ngbe_dev_queue_stats_mapping_set,
+   .mtu_set= ngbe_dev_mtu_set,
.vlan_offload_set   = ngbe_vlan_offload_set,
.rx_queue_start = ngbe_dev_rx_queue_start,
.rx_queue_stop  = ngbe_dev_rx_queue_stop,
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 11/32] net/ngbe: add device promiscuous and allmulticast mode

2021-09-08 Thread Jiawen Wu
Support to enable/disable promiscuous and allmulticast mode for a port.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  2 +
 doc/guides/nics/ngbe.rst  |  2 +
 drivers/net/ngbe/ngbe_ethdev.c| 63 +++
 3 files changed, 67 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index bdb06916e1..2f38f1e843 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -12,6 +12,8 @@ MTU update   = Y
 Jumbo frame  = Y
 Scattered Rx = Y
 TSO  = Y
+Promiscuous mode = Y
+Allmulticast mode= Y
 CRC offload  = P
 VLAN offload = P
 QinQ offload = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 64c07e4741..8333fba9cd 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -15,6 +15,8 @@ Features
 - Checksum offload
 - VLAN/QinQ stripping and inserting
 - TSO offload
+- Promiscuous mode
+- Multicast mode
 - Port hardware statistics
 - Jumbo frames
 - Link state information
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 29f35d9e8d..ce71edd6d8 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -1674,6 +1674,65 @@ ngbe_dev_link_update(struct rte_eth_dev *dev, int 
wait_to_complete)
return ngbe_dev_link_update_share(dev, wait_to_complete);
 }
 
+static int
+ngbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t fctrl;
+
+   fctrl = rd32(hw, NGBE_PSRCTL);
+   fctrl |= (NGBE_PSRCTL_UCP | NGBE_PSRCTL_MCP);
+   wr32(hw, NGBE_PSRCTL, fctrl);
+
+   return 0;
+}
+
+static int
+ngbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t fctrl;
+
+   fctrl = rd32(hw, NGBE_PSRCTL);
+   fctrl &= (~NGBE_PSRCTL_UCP);
+   if (dev->data->all_multicast == 1)
+   fctrl |= NGBE_PSRCTL_MCP;
+   else
+   fctrl &= (~NGBE_PSRCTL_MCP);
+   wr32(hw, NGBE_PSRCTL, fctrl);
+
+   return 0;
+}
+
+static int
+ngbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t fctrl;
+
+   fctrl = rd32(hw, NGBE_PSRCTL);
+   fctrl |= NGBE_PSRCTL_MCP;
+   wr32(hw, NGBE_PSRCTL, fctrl);
+
+   return 0;
+}
+
+static int
+ngbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t fctrl;
+
+   if (dev->data->promiscuous == 1)
+   return 0; /* must remain in all_multicast mode */
+
+   fctrl = rd32(hw, NGBE_PSRCTL);
+   fctrl &= (~NGBE_PSRCTL_MCP);
+   wr32(hw, NGBE_PSRCTL, fctrl);
+
+   return 0;
+}
+
 /**
  * It clears the interrupt causes and enables the interrupt.
  * It will be called once only during NIC initialized.
@@ -2109,6 +2168,10 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
.dev_stop   = ngbe_dev_stop,
.dev_close  = ngbe_dev_close,
.dev_reset  = ngbe_dev_reset,
+   .promiscuous_enable = ngbe_dev_promiscuous_enable,
+   .promiscuous_disable= ngbe_dev_promiscuous_disable,
+   .allmulticast_enable= ngbe_dev_allmulticast_enable,
+   .allmulticast_disable   = ngbe_dev_allmulticast_disable,
.link_update= ngbe_dev_link_update,
.stats_get  = ngbe_dev_stats_get,
.xstats_get = ngbe_dev_xstats_get,
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 12/32] net/ngbe: support getting FW version

2021-09-08 Thread Jiawen Wu
Add firmware version get operation.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini   |  1 +
 doc/guides/nics/ngbe.rst|  1 +
 drivers/net/ngbe/base/ngbe_dummy.h  |  6 
 drivers/net/ngbe/base/ngbe_eeprom.c | 56 +
 drivers/net/ngbe/base/ngbe_eeprom.h |  5 +++
 drivers/net/ngbe/base/ngbe_hw.c |  3 ++
 drivers/net/ngbe/base/ngbe_mng.c| 44 +++
 drivers/net/ngbe/base/ngbe_mng.h|  5 +++
 drivers/net/ngbe/base/ngbe_type.h   |  2 ++
 drivers/net/ngbe/ngbe_ethdev.c  | 21 +++
 10 files changed, 144 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 2f38f1e843..1006c3935b 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -25,6 +25,7 @@ Packet type parsing  = Y
 Basic stats  = Y
 Extended stats   = Y
 Stats per queue  = Y
+FW version   = Y
 Multiprocess aware   = Y
 Linux= Y
 ARMv8= Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 8333fba9cd..50a6e85c49 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -21,6 +21,7 @@ Features
 - Jumbo frames
 - Link state information
 - Scattered and gather for TX and RX
+- FW version
 
 
 Prerequisites
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 0def116c53..689480cc9a 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -33,6 +33,11 @@ static inline s32 ngbe_rom_init_params_dummy(struct ngbe_hw 
*TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_rom_read32_dummy(struct ngbe_hw *TUP0, u32 TUP1,
+   u32 *TUP2)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_rom_validate_checksum_dummy(struct ngbe_hw *TUP0,
u16 *TUP1)
 {
@@ -177,6 +182,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 {
hw->bus.set_lan_id = ngbe_bus_set_lan_id_dummy;
hw->rom.init_params = ngbe_rom_init_params_dummy;
+   hw->rom.read32 = ngbe_rom_read32_dummy;
hw->rom.validate_checksum = ngbe_rom_validate_checksum_dummy;
hw->mac.init_hw = ngbe_mac_init_hw_dummy;
hw->mac.reset_hw = ngbe_mac_reset_hw_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_eeprom.c 
b/drivers/net/ngbe/base/ngbe_eeprom.c
index 3dcd5c2f6c..9ae2f0badb 100644
--- a/drivers/net/ngbe/base/ngbe_eeprom.c
+++ b/drivers/net/ngbe/base/ngbe_eeprom.c
@@ -161,6 +161,30 @@ void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
ngbe_flush(hw);
 }
 
+/**
+ *  ngbe_ee_read32 - Read EEPROM word using a host interface cmd
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 32 bit word from the EEPROM using the hostif.
+ **/
+s32 ngbe_ee_read32(struct ngbe_hw *hw, u32 addr, u32 *data)
+{
+   const u32 mask = NGBE_MNGSEM_SWMBX | NGBE_MNGSEM_SWFLASH;
+   int err;
+
+   err = hw->mac.acquire_swfw_sync(hw, mask);
+   if (err)
+   return err;
+
+   err = ngbe_hic_sr_read(hw, addr, (u8 *)data, 4);
+
+   hw->mac.release_swfw_sync(hw, mask);
+
+   return err;
+}
+
 /**
  *  ngbe_validate_eeprom_checksum_em - Validate EEPROM checksum
  *  @hw: pointer to hardware structure
@@ -201,3 +225,35 @@ s32 ngbe_validate_eeprom_checksum_em(struct ngbe_hw *hw,
return err;
 }
 
+/**
+ * ngbe_save_eeprom_version
+ * @hw: pointer to hardware structure
+ *
+ * Save off EEPROM version number and Option Rom version which
+ * together make a unique identify for the eeprom
+ */
+s32 ngbe_save_eeprom_version(struct ngbe_hw *hw)
+{
+   u32 eeprom_verl = 0;
+   u32 etrack_id = 0;
+   u32 offset = (hw->rom.sw_addr + NGBE_EEPROM_VERSION_L) << 1;
+
+   DEBUGFUNC("ngbe_save_eeprom_version");
+
+   if (hw->bus.lan_id == 0) {
+   hw->rom.read32(hw, offset, &eeprom_verl);
+   etrack_id = eeprom_verl;
+   wr32(hw, NGBE_EEPROM_VERSION_STORE_REG, etrack_id);
+   wr32(hw, NGBE_CALSUM_CAP_STATUS,
+   hw->rom.cksum_devcap | 0x1);
+   } else if (hw->rom.cksum_devcap) {
+   etrack_id = hw->rom.saved_version;
+   } else {
+   hw->rom.read32(hw, offset, &eeprom_verl);
+   etrack_id = eeprom_verl;
+   }
+
+   hw->eeprom_id = etrack_id;
+
+   return 0;
+}
diff --git a/drivers/net/ngbe/base/ngbe_eeprom.h 
b/drivers/net/ngbe/base/ngbe_eeprom.h
index b433077629..5f27425913 100644
--- a/drivers/net/ngbe/base/ngbe_eeprom.h
+++ b/drivers/net/ngbe/base/ngbe_eeprom.h
@@ -6,6 +6,8 @@
 #ifndef _NGBE_EEPROM_H_
 #define _NGBE_EEPROM_H_
 
+#define NGBE_EEPROM_VERSION_L  0x1D
+#define NGBE_EEPROM_VERSION_H  0x1E
 #define NGBE_CALSUM_CAP_STATUS 0x10224
 #define NGBE_

[dpdk-dev] [PATCH 13/32] net/ngbe: add loopback mode

2021-09-08 Thread Jiawen Wu
Support loopback operation mode.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/ngbe_ethdev.c |  6 ++
 drivers/net/ngbe/ngbe_rxtx.c   | 28 
 2 files changed, 34 insertions(+)

diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 5566bf26a9..9caca55df3 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -850,6 +850,10 @@ ngbe_dev_start(struct rte_eth_dev *dev)
goto error;
}
 
+   /* Skip link setup if loopback mode is enabled. */
+   if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
+   goto skip_link_setup;
+
err = hw->mac.check_link(hw, &speed, &link_up, 0);
if (err != 0)
goto error;
@@ -893,6 +897,8 @@ ngbe_dev_start(struct rte_eth_dev *dev)
if (err != 0)
goto error;
 
+skip_link_setup:
+
if (rte_intr_allow_others(intr_handle)) {
ngbe_dev_misc_interrupt_setup(dev);
/* check if lsc interrupt is enabled */
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 1151173b02..22693c144a 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -2420,6 +2420,17 @@ ngbe_dev_rx_init(struct rte_eth_dev *dev)
NGBE_FRMSZ_MAX(NGBE_FRAME_SIZE_DFT));
}
 
+   /*
+* If loopback mode is configured, set LPBK bit.
+*/
+   hlreg0 = rd32(hw, NGBE_PSRCTL);
+   if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
+   hlreg0 |= NGBE_PSRCTL_LBENA;
+   else
+   hlreg0 &= ~NGBE_PSRCTL_LBENA;
+
+   wr32(hw, NGBE_PSRCTL, hlreg0);
+
/*
 * Assume no header split and no VLAN strip support
 * on any Rx queue first .
@@ -2538,6 +2549,19 @@ ngbe_dev_tx_init(struct rte_eth_dev *dev)
}
 }
 
+/*
+ * Set up link loopback mode Tx->Rx.
+ */
+static inline void
+ngbe_setup_loopback_link(struct ngbe_hw *hw)
+{
+   PMD_INIT_FUNC_TRACE();
+
+   wr32m(hw, NGBE_MACRXCFG, NGBE_MACRXCFG_LB, NGBE_MACRXCFG_LB);
+
+   msec_delay(50);
+}
+
 /*
  * Start Transmit and Receive Units.
  */
@@ -2592,6 +2616,10 @@ ngbe_dev_rxtx_start(struct rte_eth_dev *dev)
rxctrl |= NGBE_PBRXCTL_ENA;
hw->mac.enable_rx_dma(hw, rxctrl);
 
+   /* If loopback mode is enabled, set up the link accordingly */
+   if (hw->is_pf && dev->data->dev_conf.lpbk_mode)
+   ngbe_setup_loopback_link(hw);
+
return 0;
 }
 
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 09/32] net/ngbe: support device xstats

2021-09-08 Thread Jiawen Wu
Add device extended stats get from reading hardware registers.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   1 +
 drivers/net/ngbe/ngbe_ethdev.c| 316 ++
 drivers/net/ngbe/ngbe_ethdev.h|   6 +
 3 files changed, 323 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index f310fb102a..42101020dd 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -20,6 +20,7 @@ Inner L3 checksum= P
 Inner L4 checksum= P
 Packet type parsing  = Y
 Basic stats  = Y
+Extended stats   = Y
 Stats per queue  = Y
 Multiprocess aware   = Y
 Linux= Y
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 3d459718b1..45d7c48011 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -84,6 +84,104 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
 
 static const struct eth_dev_ops ngbe_eth_dev_ops;
 
+#define HW_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, m)}
+#define HW_XSTAT_NAME(m, n) {n, offsetof(struct ngbe_hw_stats, m)}
+static const struct rte_ngbe_xstats_name_off rte_ngbe_stats_strings[] = {
+   /* MNG RxTx */
+   HW_XSTAT(mng_bmc2host_packets),
+   HW_XSTAT(mng_host2bmc_packets),
+   /* Basic RxTx */
+   HW_XSTAT(rx_packets),
+   HW_XSTAT(tx_packets),
+   HW_XSTAT(rx_bytes),
+   HW_XSTAT(tx_bytes),
+   HW_XSTAT(rx_total_bytes),
+   HW_XSTAT(rx_total_packets),
+   HW_XSTAT(tx_total_packets),
+   HW_XSTAT(rx_total_missed_packets),
+   HW_XSTAT(rx_broadcast_packets),
+   HW_XSTAT(rx_multicast_packets),
+   HW_XSTAT(rx_management_packets),
+   HW_XSTAT(tx_management_packets),
+   HW_XSTAT(rx_management_dropped),
+
+   /* Basic Error */
+   HW_XSTAT(rx_crc_errors),
+   HW_XSTAT(rx_illegal_byte_errors),
+   HW_XSTAT(rx_error_bytes),
+   HW_XSTAT(rx_mac_short_packet_dropped),
+   HW_XSTAT(rx_length_errors),
+   HW_XSTAT(rx_undersize_errors),
+   HW_XSTAT(rx_fragment_errors),
+   HW_XSTAT(rx_oversize_errors),
+   HW_XSTAT(rx_jabber_errors),
+   HW_XSTAT(rx_l3_l4_xsum_error),
+   HW_XSTAT(mac_local_errors),
+   HW_XSTAT(mac_remote_errors),
+
+   /* MACSEC */
+   HW_XSTAT(tx_macsec_pkts_untagged),
+   HW_XSTAT(tx_macsec_pkts_encrypted),
+   HW_XSTAT(tx_macsec_pkts_protected),
+   HW_XSTAT(tx_macsec_octets_encrypted),
+   HW_XSTAT(tx_macsec_octets_protected),
+   HW_XSTAT(rx_macsec_pkts_untagged),
+   HW_XSTAT(rx_macsec_pkts_badtag),
+   HW_XSTAT(rx_macsec_pkts_nosci),
+   HW_XSTAT(rx_macsec_pkts_unknownsci),
+   HW_XSTAT(rx_macsec_octets_decrypted),
+   HW_XSTAT(rx_macsec_octets_validated),
+   HW_XSTAT(rx_macsec_sc_pkts_unchecked),
+   HW_XSTAT(rx_macsec_sc_pkts_delayed),
+   HW_XSTAT(rx_macsec_sc_pkts_late),
+   HW_XSTAT(rx_macsec_sa_pkts_ok),
+   HW_XSTAT(rx_macsec_sa_pkts_invalid),
+   HW_XSTAT(rx_macsec_sa_pkts_notvalid),
+   HW_XSTAT(rx_macsec_sa_pkts_unusedsa),
+   HW_XSTAT(rx_macsec_sa_pkts_notusingsa),
+
+   /* MAC RxTx */
+   HW_XSTAT(rx_size_64_packets),
+   HW_XSTAT(rx_size_65_to_127_packets),
+   HW_XSTAT(rx_size_128_to_255_packets),
+   HW_XSTAT(rx_size_256_to_511_packets),
+   HW_XSTAT(rx_size_512_to_1023_packets),
+   HW_XSTAT(rx_size_1024_to_max_packets),
+   HW_XSTAT(tx_size_64_packets),
+   HW_XSTAT(tx_size_65_to_127_packets),
+   HW_XSTAT(tx_size_128_to_255_packets),
+   HW_XSTAT(tx_size_256_to_511_packets),
+   HW_XSTAT(tx_size_512_to_1023_packets),
+   HW_XSTAT(tx_size_1024_to_max_packets),
+
+   /* Flow Control */
+   HW_XSTAT(tx_xon_packets),
+   HW_XSTAT(rx_xon_packets),
+   HW_XSTAT(tx_xoff_packets),
+   HW_XSTAT(rx_xoff_packets),
+
+   HW_XSTAT_NAME(tx_xon_packets, "tx_flow_control_xon_packets"),
+   HW_XSTAT_NAME(rx_xon_packets, "rx_flow_control_xon_packets"),
+   HW_XSTAT_NAME(tx_xoff_packets, "tx_flow_control_xoff_packets"),
+   HW_XSTAT_NAME(rx_xoff_packets, "rx_flow_control_xoff_packets"),
+};
+
+#define NGBE_NB_HW_STATS (sizeof(rte_ngbe_stats_strings) / \
+  sizeof(rte_ngbe_stats_strings[0]))
+
+/* Per-queue statistics */
+#define QP_XSTAT(m) {#m, offsetof(struct ngbe_hw_stats, qp[0].m)}
+static const struct rte_ngbe_xstats_name_off rte_ngbe_qp_strings[] = {
+   QP_XSTAT(rx_qp_packets),
+   QP_XSTAT(tx_qp_packets),
+   QP_XSTAT(rx_qp_bytes),
+   QP_XSTAT(tx_qp_bytes),
+   QP_XSTAT(rx_qp_mc_packets),
+};
+
+#define NGBE_NB_QP_STATS (sizeof(rte_ngbe_qp_strings) / \
+  sizeof(rte_ngbe_qp_strings[0]))
+
 static inline int32_t
 ngbe_pf_reset_hw(struct ngbe_hw *hw)
 {
@@ -1213,6 +1311,219 @@ ngbe_dev_stats_reset(struct rte_eth_dev *dev)
return 0;
 }
 
+/* This function calculates the number of xstats based on 

[dpdk-dev] [PATCH 14/32] net/ngbe: support Rx interrupt

2021-09-08 Thread Jiawen Wu
Support Rx queue interrupt.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  1 +
 doc/guides/nics/ngbe.rst  |  1 +
 drivers/net/ngbe/ngbe_ethdev.c| 35 +++
 3 files changed, 37 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 1006c3935b..d14469eb43 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -7,6 +7,7 @@
 Speed capabilities   = Y
 Link status  = Y
 Link status event= Y
+Rx interrupt = Y
 Queue start/stop = Y
 MTU update   = Y
 Jumbo frame  = Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 50a6e85c49..2783c4a3c4 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -20,6 +20,7 @@ Features
 - Port hardware statistics
 - Jumbo frames
 - Link state information
+- Interrupt mode for RX
 - Scattered and gather for TX and RX
 - FW version
 
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 9caca55df3..52642161b7 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -2095,6 +2095,39 @@ ngbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
return 0;
 }
 
+static int
+ngbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+   struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+   struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+   uint32_t mask;
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+
+   if (queue_id < 32) {
+   mask = rd32(hw, NGBE_IMS(0));
+   mask &= (1 << queue_id);
+   wr32(hw, NGBE_IMS(0), mask);
+   }
+   rte_intr_enable(intr_handle);
+
+   return 0;
+}
+
+static int
+ngbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+   uint32_t mask;
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+
+   if (queue_id < 32) {
+   mask = rd32(hw, NGBE_IMS(0));
+   mask &= ~(1 << queue_id);
+   wr32(hw, NGBE_IMS(0), mask);
+   }
+
+   return 0;
+}
+
 /**
  * Set the IVAR registers, mapping interrupt causes to vectors
  * @param hw
@@ -2215,6 +2248,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
.tx_queue_start = ngbe_dev_tx_queue_start,
.tx_queue_stop  = ngbe_dev_tx_queue_stop,
.rx_queue_setup = ngbe_dev_rx_queue_setup,
+   .rx_queue_intr_enable   = ngbe_dev_rx_queue_intr_enable,
+   .rx_queue_intr_disable  = ngbe_dev_rx_queue_intr_disable,
.rx_queue_release   = ngbe_dev_rx_queue_release,
.tx_queue_setup = ngbe_dev_tx_queue_setup,
.tx_queue_release   = ngbe_dev_tx_queue_release,
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 15/32] net/ngbe: support MAC filters

2021-09-08 Thread Jiawen Wu
Add MAC addresses to filter incoming packets, support to set
multicast addresses to filter. And support to set unicast table array.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini  |   2 +
 doc/guides/nics/ngbe.rst   |   1 +
 drivers/net/ngbe/base/ngbe_dummy.h |   6 +
 drivers/net/ngbe/base/ngbe_hw.c| 135 +-
 drivers/net/ngbe/base/ngbe_hw.h|   4 +
 drivers/net/ngbe/base/ngbe_type.h  |  11 ++
 drivers/net/ngbe/ngbe_ethdev.c | 175 +
 drivers/net/ngbe/ngbe_ethdev.h |  13 +++
 8 files changed, 346 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index d14469eb43..4b22dc683a 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -15,6 +15,8 @@ Scattered Rx = Y
 TSO  = Y
 Promiscuous mode = Y
 Allmulticast mode= Y
+Unicast MAC filter   = Y
+Multicast MAC filter = Y
 CRC offload  = P
 VLAN offload = P
 QinQ offload = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 2783c4a3c4..4d01c27064 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -11,6 +11,7 @@ for Wangxun 1 Gigabit Ethernet NICs.
 Features
 
 
+- MAC filtering
 - Packet type information
 - Checksum offload
 - VLAN/QinQ stripping and inserting
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 689480cc9a..fe2d53f312 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -127,6 +127,11 @@ static inline s32 ngbe_mac_init_rx_addrs_dummy(struct 
ngbe_hw *TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_update_mc_addr_list_dummy(struct ngbe_hw *TUP0,
+   u8 *TUP1, u32 TUP2, ngbe_mc_addr_itr TUP3, bool TUP4)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_init_thermal_ssth_dummy(struct ngbe_hw *TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
@@ -203,6 +208,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.set_vmdq = ngbe_mac_set_vmdq_dummy;
hw->mac.clear_vmdq = ngbe_mac_clear_vmdq_dummy;
hw->mac.init_rx_addrs = ngbe_mac_init_rx_addrs_dummy;
+   hw->mac.update_mc_addr_list = ngbe_mac_update_mc_addr_list_dummy;
hw->mac.init_thermal_sensor_thresh = ngbe_mac_init_thermal_ssth_dummy;
hw->mac.check_overtemp = ngbe_mac_check_overtemp_dummy;
hw->phy.identify = ngbe_phy_identify_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 0dabb6c1c7..897baf179d 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -567,6 +567,138 @@ s32 ngbe_init_rx_addrs(struct ngbe_hw *hw)
return 0;
 }
 
+/**
+ *  ngbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the PSRCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 ngbe_mta_vector(struct ngbe_hw *hw, u8 *mc_addr)
+{
+   u32 vector = 0;
+
+   DEBUGFUNC("ngbe_mta_vector");
+
+   switch (hw->mac.mc_filter_type) {
+   case 0:   /* use bits [47:36] of the address */
+   vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+   break;
+   case 1:   /* use bits [46:35] of the address */
+   vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+   break;
+   case 2:   /* use bits [45:34] of the address */
+   vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+   break;
+   case 3:   /* use bits [43:32] of the address */
+   vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+   break;
+   default:  /* Invalid mc_filter_type */
+   DEBUGOUT("MC filter type param set incorrectly\n");
+   ASSERT(0);
+   break;
+   }
+
+   /* vector can only be 12-bits or boundary will be exceeded */
+   vector &= 0xFFF;
+   return vector;
+}
+
+/**
+ *  ngbe_set_mta - Set bit-vector in multicast table
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: Multicast address
+ *
+ *  Sets the bit-vector in the multicast table.
+ **/
+void ngbe_set_mta(struct ngbe_hw *hw, u8 *mc_addr)
+{
+   u32 vector;
+   u32 vector_bit;
+   u32 vector_reg;
+
+   DEBUGFUNC("ngbe_set_mta");
+
+   hw->addr_ctrl.mta_in_use++;
+
+   vector = ngbe_mta_vector(hw, mc_addr);
+   DEBUGOUT(" bit-vector = 0x%03X\n", vector);
+
+  

[dpdk-dev] [PATCH 17/32] net/ngbe: support RSS hash

2021-09-08 Thread Jiawen Wu
Support RSS hashing on Rx, and configuration of RSS hash computation.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   3 +
 doc/guides/nics/ngbe.rst  |   2 +
 drivers/net/ngbe/meson.build  |   2 +
 drivers/net/ngbe/ngbe_ethdev.c|  99 +
 drivers/net/ngbe/ngbe_ethdev.h|  27 
 drivers/net/ngbe/ngbe_rxtx.c  | 235 ++
 6 files changed, 368 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 265edba361..70d731a695 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -17,6 +17,9 @@ Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
+RSS hash = Y
+RSS key update   = Y
+RSS reta update  = Y
 VLAN filter  = Y
 CRC offload  = P
 VLAN offload = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 3683862fd1..ce160e832c 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -11,6 +11,8 @@ for Wangxun 1 Gigabit Ethernet NICs.
 Features
 
 
+- Multiple queues for Tx and Rx
+- Receiver Side Scaling (RSS)
 - MAC/VLAN filtering
 - Packet type information
 - Checksum offload
diff --git a/drivers/net/ngbe/meson.build b/drivers/net/ngbe/meson.build
index 05f94fe7d6..c55e6c20e8 100644
--- a/drivers/net/ngbe/meson.build
+++ b/drivers/net/ngbe/meson.build
@@ -16,4 +16,6 @@ sources = files(
 'ngbe_rxtx.c',
 )
 
+deps += ['hash']
+
 includes += include_directories('base')
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index acc018c811..0bc1400aea 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -856,6 +856,9 @@ ngbe_dev_configure(struct rte_eth_dev *dev)
 
PMD_INIT_FUNC_TRACE();
 
+   if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+   dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* set flag to update link status after init */
intr->flags |= NGBE_FLAG_NEED_LINK_UPDATE;
 
@@ -1082,6 +1085,7 @@ static int
 ngbe_dev_stop(struct rte_eth_dev *dev)
 {
struct rte_eth_link link;
+   struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
struct ngbe_hw *hw = ngbe_dev_hw(dev);
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
@@ -1129,6 +1133,8 @@ ngbe_dev_stop(struct rte_eth_dev *dev)
intr_handle->intr_vec = NULL;
}
 
+   adapter->rss_reta_updated = 0;
+
hw->adapter_stopped = true;
dev->data->dev_started = 0;
 
@@ -1718,6 +1724,10 @@ ngbe_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->rx_desc_lim = rx_desc_lim;
dev_info->tx_desc_lim = tx_desc_lim;
 
+   dev_info->hash_key_size = NGBE_HKEY_MAX_INDEX * sizeof(uint32_t);
+   dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
+   dev_info->flow_type_rss_offloads = NGBE_RSS_OFFLOAD_ALL;
+
dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_100M |
ETH_LINK_SPEED_10M;
 
@@ -2184,6 +2194,91 @@ ngbe_dev_interrupt_handler(void *param)
ngbe_dev_interrupt_action(dev);
 }
 
+int
+ngbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+   uint8_t i, j, mask;
+   uint32_t reta;
+   uint16_t idx, shift;
+   struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+
+   PMD_INIT_FUNC_TRACE();
+
+   if (!hw->is_pf) {
+   PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+   "NIC.");
+   return -ENOTSUP;
+   }
+
+   if (reta_size != ETH_RSS_RETA_SIZE_128) {
+   PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+   "(%d) doesn't match the number hardware can supported "
+   "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+   return -EINVAL;
+   }
+
+   for (i = 0; i < reta_size; i += 4) {
+   idx = i / RTE_RETA_GROUP_SIZE;
+   shift = i % RTE_RETA_GROUP_SIZE;
+   mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+   if (!mask)
+   continue;
+
+   reta = rd32a(hw, NGBE_REG_RSSTBL, i >> 2);
+   for (j = 0; j < 4; j++) {
+   if (RS8(mask, j, 0x1)) {
+   reta  &= ~(MS32(8 * j, 0xFF));
+   reta |= LS32(reta_conf[idx].reta[shift + j],
+   8 * j, 0xFF);
+   }
+   }
+   wr32a(hw, NGBE_REG_RSSTBL, i >> 2, reta);
+   }
+   ada

[dpdk-dev] [PATCH 16/32] net/ngbe: support VLAN filter

2021-09-08 Thread Jiawen Wu
Support to filter of a VLAN tag identifier.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini  |   1 +
 doc/guides/nics/ngbe.rst   |   2 +-
 drivers/net/ngbe/base/ngbe_dummy.h |   5 ++
 drivers/net/ngbe/base/ngbe_hw.c|  29 +++
 drivers/net/ngbe/base/ngbe_hw.h|   2 +
 drivers/net/ngbe/base/ngbe_type.h  |   3 +
 drivers/net/ngbe/ngbe_ethdev.c | 128 +
 7 files changed, 169 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 4b22dc683a..265edba361 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -17,6 +17,7 @@ Promiscuous mode = Y
 Allmulticast mode= Y
 Unicast MAC filter   = Y
 Multicast MAC filter = Y
+VLAN filter  = Y
 CRC offload  = P
 VLAN offload = P
 QinQ offload = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 4d01c27064..3683862fd1 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -11,7 +11,7 @@ for Wangxun 1 Gigabit Ethernet NICs.
 Features
 
 
-- MAC filtering
+- MAC/VLAN filtering
 - Packet type information
 - Checksum offload
 - VLAN/QinQ stripping and inserting
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index fe2d53f312..7814fd6226 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -132,6 +132,10 @@ static inline s32 
ngbe_mac_update_mc_addr_list_dummy(struct ngbe_hw *TUP0,
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_clear_vfta_dummy(struct ngbe_hw *TUP0)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_init_thermal_ssth_dummy(struct ngbe_hw *TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
@@ -209,6 +213,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.clear_vmdq = ngbe_mac_clear_vmdq_dummy;
hw->mac.init_rx_addrs = ngbe_mac_init_rx_addrs_dummy;
hw->mac.update_mc_addr_list = ngbe_mac_update_mc_addr_list_dummy;
+   hw->mac.clear_vfta = ngbe_mac_clear_vfta_dummy;
hw->mac.init_thermal_sensor_thresh = ngbe_mac_init_thermal_ssth_dummy;
hw->mac.check_overtemp = ngbe_mac_check_overtemp_dummy;
hw->phy.identify = ngbe_phy_identify_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 897baf179d..ce0867575a 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -19,6 +19,9 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
 {
DEBUGFUNC("ngbe_start_hw");
 
+   /* Clear the VLAN filter table */
+   hw->mac.clear_vfta(hw);
+
/* Clear statistics registers */
hw->mac.clear_hw_cntrs(hw);
 
@@ -910,6 +913,30 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw)
return 0;
 }
 
+/**
+ *  ngbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ngbe_clear_vfta(struct ngbe_hw *hw)
+{
+   u32 offset;
+
+   DEBUGFUNC("ngbe_clear_vfta");
+
+   for (offset = 0; offset < hw->mac.vft_size; offset++)
+   wr32(hw, NGBE_VLANTBL(offset), 0);
+
+   for (offset = 0; offset < NGBE_NUM_POOL; offset++) {
+   wr32(hw, NGBE_PSRVLANIDX, offset);
+   wr32(hw, NGBE_PSRVLAN, 0);
+   wr32(hw, NGBE_PSRVLANPLM(0), 0);
+   }
+
+   return 0;
+}
+
 /**
  *  ngbe_check_mac_link_em - Determine link and speed status
  *  @hw: pointer to hardware structure
@@ -1238,6 +1265,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
mac->update_mc_addr_list = ngbe_update_mc_addr_list;
mac->set_vmdq = ngbe_set_vmdq;
mac->clear_vmdq = ngbe_clear_vmdq;
+   mac->clear_vfta = ngbe_clear_vfta;
 
/* Link */
mac->get_link_capabilities = ngbe_get_link_capabilities_em;
@@ -1254,6 +1282,7 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
rom->validate_checksum = ngbe_validate_eeprom_checksum_em;
 
mac->mcft_size  = NGBE_EM_MC_TBL_SIZE;
+   mac->vft_size   = NGBE_EM_VFT_TBL_SIZE;
mac->num_rar_entries= NGBE_EM_RAR_ENTRIES;
mac->max_rx_queues  = NGBE_EM_MAX_RX_QUEUES;
mac->max_tx_queues  = NGBE_EM_MAX_TX_QUEUES;
diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h
index f06baa4395..a27bd3e650 100644
--- a/drivers/net/ngbe/base/ngbe_hw.h
+++ b/drivers/net/ngbe/base/ngbe_hw.h
@@ -12,6 +12,7 @@
 #define NGBE_EM_MAX_RX_QUEUES 8
 #define NGBE_EM_RAR_ENTRIES   32
 #define NGBE_EM_MC_TBL_SIZE   32
+#define NGBE_EM_VFT_TBL_SIZE  128
 
 s32 ngbe_init_hw(struct ngbe_hw *hw);
 s32 ngbe_start_hw(struct ngbe_hw *hw);
@@ -48,6 +49,7 @@ void ngbe_release_swfw_sync(struct ngbe_hw *hw, u32 mask);
 s32 ngbe_set_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq);
 s32 ngbe_clear_vmdq(struct ngbe_hw *hw, u32 rar, u32 vmdq);
 s

[dpdk-dev] [PATCH 18/32] net/ngbe: support SRIOV

2021-09-08 Thread Jiawen Wu
Initialize and configure PF module to support SRIOV.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini  |   1 +
 drivers/net/ngbe/base/meson.build  |   1 +
 drivers/net/ngbe/base/ngbe_dummy.h |  17 +++
 drivers/net/ngbe/base/ngbe_hw.c|  47 ++-
 drivers/net/ngbe/base/ngbe_mbx.c   |  30 +
 drivers/net/ngbe/base/ngbe_mbx.h   |  11 ++
 drivers/net/ngbe/base/ngbe_type.h  |  22 
 drivers/net/ngbe/meson.build   |   1 +
 drivers/net/ngbe/ngbe_ethdev.c |  32 -
 drivers/net/ngbe/ngbe_ethdev.h |  19 +++
 drivers/net/ngbe/ngbe_pf.c | 196 +
 drivers/net/ngbe/ngbe_rxtx.c   |  26 ++--
 12 files changed, 390 insertions(+), 13 deletions(-)
 create mode 100644 drivers/net/ngbe/base/ngbe_mbx.c
 create mode 100644 drivers/net/ngbe/base/ngbe_mbx.h
 create mode 100644 drivers/net/ngbe/ngbe_pf.c

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 70d731a695..9a497ccae6 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -20,6 +20,7 @@ Multicast MAC filter = Y
 RSS hash = Y
 RSS key update   = Y
 RSS reta update  = Y
+SR-IOV   = Y
 VLAN filter  = Y
 CRC offload  = P
 VLAN offload = P
diff --git a/drivers/net/ngbe/base/meson.build 
b/drivers/net/ngbe/base/meson.build
index 6081281135..390b0f9c12 100644
--- a/drivers/net/ngbe/base/meson.build
+++ b/drivers/net/ngbe/base/meson.build
@@ -4,6 +4,7 @@
 sources = [
 'ngbe_eeprom.c',
 'ngbe_hw.c',
+'ngbe_mbx.c',
 'ngbe_mng.c',
 'ngbe_phy.c',
 'ngbe_phy_rtl.c',
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 7814fd6226..5cb09bfcaa 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -136,6 +136,14 @@ static inline s32 ngbe_mac_clear_vfta_dummy(struct ngbe_hw 
*TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline void ngbe_mac_set_mac_anti_spoofing_dummy(struct ngbe_hw *TUP0,
+   bool TUP1, int TUP2)
+{
+}
+static inline void ngbe_mac_set_vlan_anti_spoofing_dummy(struct ngbe_hw *TUP0,
+   bool TUP1, int TUP2)
+{
+}
 static inline s32 ngbe_mac_init_thermal_ssth_dummy(struct ngbe_hw *TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
@@ -187,6 +195,12 @@ static inline s32 ngbe_phy_check_link_dummy(struct ngbe_hw 
*TUP0, u32 *TUP1,
 {
return NGBE_ERR_OPS_DUMMY;
 }
+
+/* struct ngbe_mbx_operations */
+static inline void ngbe_mbx_init_params_dummy(struct ngbe_hw *TUP0)
+{
+}
+
 static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 {
hw->bus.set_lan_id = ngbe_bus_set_lan_id_dummy;
@@ -214,6 +228,8 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.init_rx_addrs = ngbe_mac_init_rx_addrs_dummy;
hw->mac.update_mc_addr_list = ngbe_mac_update_mc_addr_list_dummy;
hw->mac.clear_vfta = ngbe_mac_clear_vfta_dummy;
+   hw->mac.set_mac_anti_spoofing = ngbe_mac_set_mac_anti_spoofing_dummy;
+   hw->mac.set_vlan_anti_spoofing = ngbe_mac_set_vlan_anti_spoofing_dummy;
hw->mac.init_thermal_sensor_thresh = ngbe_mac_init_thermal_ssth_dummy;
hw->mac.check_overtemp = ngbe_mac_check_overtemp_dummy;
hw->phy.identify = ngbe_phy_identify_dummy;
@@ -225,6 +241,7 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->phy.write_reg_unlocked = ngbe_phy_write_reg_unlocked_dummy;
hw->phy.setup_link = ngbe_phy_setup_link_dummy;
hw->phy.check_link = ngbe_phy_check_link_dummy;
+   hw->mbx.init_params = ngbe_mbx_init_params_dummy;
 }
 
 #endif /* _NGBE_TYPE_DUMMY_H_ */
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index ce0867575a..8b45a91f78 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -4,6 +4,7 @@
  */
 
 #include "ngbe_type.h"
+#include "ngbe_mbx.h"
 #include "ngbe_phy.h"
 #include "ngbe_eeprom.h"
 #include "ngbe_mng.h"
@@ -1008,6 +1009,44 @@ s32 ngbe_setup_mac_link_em(struct ngbe_hw *hw,
return status;
 }
 
+/**
+ *  ngbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for MAC anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
+ *
+ **/
+void ngbe_set_mac_anti_spoofing(struct ngbe_hw *hw, bool enable, int vf)
+{
+   u32 pfvfspoof;
+
+   pfvfspoof = rd32(hw, NGBE_POOLTXASMAC);
+   if (enable)
+   pfvfspoof |= (1 << vf);
+   else
+   pfvfspoof &= ~(1 << vf);
+   wr32(hw, NGBE_POOLTXASMAC, pfvfspoof);
+}
+
+/**
+ *  ngbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for VLAN anti-spoofing
+ *  @vf: Virtual Function pool - VF Pool to set for VLAN

[dpdk-dev] [PATCH 20/32] net/ngbe: support flow control

2021-09-08 Thread Jiawen Wu
Support to get and set flow control.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini|   1 +
 doc/guides/nics/ngbe.rst |   1 +
 drivers/net/ngbe/base/ngbe_dummy.h   |  31 +++
 drivers/net/ngbe/base/ngbe_hw.c  | 334 +++
 drivers/net/ngbe/base/ngbe_hw.h  |   6 +
 drivers/net/ngbe/base/ngbe_phy.c |   9 +
 drivers/net/ngbe/base/ngbe_phy.h |   3 +
 drivers/net/ngbe/base/ngbe_phy_mvl.c |  57 +
 drivers/net/ngbe/base/ngbe_phy_mvl.h |   4 +
 drivers/net/ngbe/base/ngbe_phy_rtl.c |  42 
 drivers/net/ngbe/base/ngbe_phy_rtl.h |   3 +
 drivers/net/ngbe/base/ngbe_phy_yt.c  |  44 
 drivers/net/ngbe/base/ngbe_phy_yt.h  |   6 +
 drivers/net/ngbe/base/ngbe_type.h|  32 +++
 drivers/net/ngbe/ngbe_ethdev.c   | 111 +
 drivers/net/ngbe/ngbe_ethdev.h   |   8 +
 16 files changed, 692 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 9a497ccae6..00150282cb 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -22,6 +22,7 @@ RSS key update   = Y
 RSS reta update  = Y
 SR-IOV   = Y
 VLAN filter  = Y
+Flow control = Y
 CRC offload  = P
 VLAN offload = P
 QinQ offload = P
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index ce160e832c..09175e83cd 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -23,6 +23,7 @@ Features
 - Port hardware statistics
 - Jumbo frames
 - Link state information
+- Link flow control
 - Interrupt mode for RX
 - Scattered and gather for TX and RX
 - FW version
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 940b448734..0baabcbae7 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -154,6 +154,17 @@ static inline void 
ngbe_mac_set_vlan_anti_spoofing_dummy(struct ngbe_hw *TUP0,
bool TUP1, int TUP2)
 {
 }
+static inline s32 ngbe_mac_fc_enable_dummy(struct ngbe_hw *TUP0)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mac_setup_fc_dummy(struct ngbe_hw *TUP0)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline void ngbe_mac_fc_autoneg_dummy(struct ngbe_hw *TUP0)
+{
+}
 static inline s32 ngbe_mac_init_thermal_ssth_dummy(struct ngbe_hw *TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
@@ -205,6 +216,20 @@ static inline s32 ngbe_phy_check_link_dummy(struct ngbe_hw 
*TUP0, u32 *TUP1,
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_get_phy_advertised_pause_dummy(struct ngbe_hw *TUP0,
+   u8 *TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_get_phy_lp_advertised_pause_dummy(struct ngbe_hw *TUP0,
+   u8 *TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_set_phy_pause_adv_dummy(struct ngbe_hw *TUP0, u16 TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 
 /* struct ngbe_mbx_operations */
 static inline void ngbe_mbx_init_params_dummy(struct ngbe_hw *TUP0)
@@ -264,6 +289,9 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.set_vlvf = ngbe_mac_set_vlvf_dummy;
hw->mac.set_mac_anti_spoofing = ngbe_mac_set_mac_anti_spoofing_dummy;
hw->mac.set_vlan_anti_spoofing = ngbe_mac_set_vlan_anti_spoofing_dummy;
+   hw->mac.fc_enable = ngbe_mac_fc_enable_dummy;
+   hw->mac.setup_fc = ngbe_mac_setup_fc_dummy;
+   hw->mac.fc_autoneg = ngbe_mac_fc_autoneg_dummy;
hw->mac.init_thermal_sensor_thresh = ngbe_mac_init_thermal_ssth_dummy;
hw->mac.check_overtemp = ngbe_mac_check_overtemp_dummy;
hw->phy.identify = ngbe_phy_identify_dummy;
@@ -275,6 +303,9 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->phy.write_reg_unlocked = ngbe_phy_write_reg_unlocked_dummy;
hw->phy.setup_link = ngbe_phy_setup_link_dummy;
hw->phy.check_link = ngbe_phy_check_link_dummy;
+   hw->phy.get_adv_pause = ngbe_get_phy_advertised_pause_dummy;
+   hw->phy.get_lp_adv_pause = ngbe_get_phy_lp_advertised_pause_dummy;
+   hw->phy.set_pause_adv = ngbe_set_phy_pause_adv_dummy;
hw->mbx.init_params = ngbe_mbx_init_params_dummy;
hw->mbx.read = ngbe_mbx_read_dummy;
hw->mbx.write = ngbe_mbx_write_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index afde58a89e..35351a2702 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -18,6 +18,8 @@
  **/
 s32 ngbe_start_hw(struct ngbe_hw *hw)
 {
+   s32 err;
+
DEBUGFUNC("ngbe_start_hw");
 
/* Clear the VLAN filter table */
@@ -26,6 +28,13 @@ s32 ngbe_start_hw(struct ngbe_hw *hw)
/* Clear statistics registers */
hw->mac.clear_hw_cntrs(hw);
 
+   /* Setup flow control */
+   err = hw->mac.setup_fc(hw);
+   if (err != 0 && er

[dpdk-dev] [PATCH 21/32] net/ngbe: support device LED on and off

2021-09-08 Thread Jiawen Wu
Support device LED on and off.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/base/ngbe_dummy.h | 10 +++
 drivers/net/ngbe/base/ngbe_hw.c| 48 ++
 drivers/net/ngbe/base/ngbe_hw.h|  3 ++
 drivers/net/ngbe/base/ngbe_type.h  |  4 +++
 drivers/net/ngbe/ngbe_ethdev.c | 16 ++
 5 files changed, 81 insertions(+)

diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 0baabcbae7..9930a3a1d6 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -104,6 +104,14 @@ static inline s32 
ngbe_mac_get_link_capabilities_dummy(struct ngbe_hw *TUP0,
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_led_on_dummy(struct ngbe_hw *TUP0, u32 TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mac_led_off_dummy(struct ngbe_hw *TUP0, u32 TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_mac_set_rar_dummy(struct ngbe_hw *TUP0, u32 TUP1,
u8 *TUP2, u32 TUP3, u32 TUP4)
 {
@@ -278,6 +286,8 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.setup_link = ngbe_mac_setup_link_dummy;
hw->mac.check_link = ngbe_mac_check_link_dummy;
hw->mac.get_link_capabilities = ngbe_mac_get_link_capabilities_dummy;
+   hw->mac.led_on = ngbe_mac_led_on_dummy;
+   hw->mac.led_off = ngbe_mac_led_off_dummy;
hw->mac.set_rar = ngbe_mac_set_rar_dummy;
hw->mac.clear_rar = ngbe_mac_clear_rar_dummy;
hw->mac.set_vmdq = ngbe_mac_set_vmdq_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 35351a2702..476e5f25cf 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -390,6 +390,50 @@ s32 ngbe_stop_hw(struct ngbe_hw *hw)
return 0;
 }
 
+/**
+ *  ngbe_led_on - Turns on the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ **/
+s32 ngbe_led_on(struct ngbe_hw *hw, u32 index)
+{
+   u32 led_reg = rd32(hw, NGBE_LEDCTL);
+
+   DEBUGFUNC("ngbe_led_on");
+
+   if (index > 3)
+   return NGBE_ERR_PARAM;
+
+   /* To turn on the LED, set mode to ON. */
+   led_reg |= NGBE_LEDCTL_100M;
+   wr32(hw, NGBE_LEDCTL, led_reg);
+   ngbe_flush(hw);
+
+   return 0;
+}
+
+/**
+ *  ngbe_led_off - Turns off the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ **/
+s32 ngbe_led_off(struct ngbe_hw *hw, u32 index)
+{
+   u32 led_reg = rd32(hw, NGBE_LEDCTL);
+
+   DEBUGFUNC("ngbe_led_off");
+
+   if (index > 3)
+   return NGBE_ERR_PARAM;
+
+   /* To turn off the LED, set mode to OFF. */
+   led_reg &= ~NGBE_LEDCTL_100M;
+   wr32(hw, NGBE_LEDCTL, led_reg);
+   ngbe_flush(hw);
+
+   return 0;
+}
+
 /**
  *  ngbe_validate_mac_addr - Validate MAC address
  *  @mac_addr: pointer to MAC address.
@@ -1836,6 +1880,10 @@ s32 ngbe_init_ops_pf(struct ngbe_hw *hw)
mac->disable_sec_rx_path = ngbe_disable_sec_rx_path;
mac->enable_sec_rx_path = ngbe_enable_sec_rx_path;
 
+   /* LEDs */
+   mac->led_on = ngbe_led_on;
+   mac->led_off = ngbe_led_off;
+
/* RAR, Multicast, VLAN */
mac->set_rar = ngbe_set_rar;
mac->clear_rar = ngbe_clear_rar;
diff --git a/drivers/net/ngbe/base/ngbe_hw.h b/drivers/net/ngbe/base/ngbe_hw.h
index a84ddca6ac..ad7e8fc2d9 100644
--- a/drivers/net/ngbe/base/ngbe_hw.h
+++ b/drivers/net/ngbe/base/ngbe_hw.h
@@ -32,6 +32,9 @@ s32 ngbe_setup_mac_link_em(struct ngbe_hw *hw,
   u32 speed,
   bool autoneg_wait_to_complete);
 
+s32 ngbe_led_on(struct ngbe_hw *hw, u32 index);
+s32 ngbe_led_off(struct ngbe_hw *hw, u32 index);
+
 s32 ngbe_set_rar(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
  u32 enable_addr);
 s32 ngbe_clear_rar(struct ngbe_hw *hw, u32 index);
diff --git a/drivers/net/ngbe/base/ngbe_type.h 
b/drivers/net/ngbe/base/ngbe_type.h
index 310d32ecfa..886dffc0db 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -265,6 +265,10 @@ struct ngbe_mac_info {
s32 (*get_link_capabilities)(struct ngbe_hw *hw,
  u32 *speed, bool *autoneg);
 
+   /* LED */
+   s32 (*led_on)(struct ngbe_hw *hw, u32 index);
+   s32 (*led_off)(struct ngbe_hw *hw, u32 index);
+
/* RAR */
s32 (*set_rar)(struct ngbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
  u32 enable_addr);
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index e950146f42..6ed836df9e 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -2239,6 +2239,20 @@ ngbe_dev_interrupt_handler(void *param)
ngbe_dev_interrupt_action(dev);
 }
 
+static int
+ngbe_dev_led_on(s

[dpdk-dev] [PATCH 22/32] net/ngbe: support EEPROM dump

2021-09-08 Thread Jiawen Wu
Support to get and set device EEPROM data.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini   |  1 +
 drivers/net/ngbe/base/ngbe_dummy.h  | 12 +
 drivers/net/ngbe/base/ngbe_eeprom.c | 77 +
 drivers/net/ngbe/base/ngbe_eeprom.h |  5 ++
 drivers/net/ngbe/base/ngbe_hw.c |  2 +
 drivers/net/ngbe/base/ngbe_mng.c| 41 +++
 drivers/net/ngbe/base/ngbe_mng.h| 13 +
 drivers/net/ngbe/base/ngbe_type.h   |  4 ++
 drivers/net/ngbe/ngbe_ethdev.c  | 52 +++
 9 files changed, 207 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 00150282cb..3c169ab774 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -35,6 +35,7 @@ Basic stats  = Y
 Extended stats   = Y
 Stats per queue  = Y
 FW version   = Y
+EEPROM dump  = Y
 Multiprocess aware   = Y
 Linux= Y
 ARMv8= Y
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 9930a3a1d6..61b0d82bfb 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -33,11 +33,21 @@ static inline s32 ngbe_rom_init_params_dummy(struct ngbe_hw 
*TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_rom_readw_buffer_dummy(struct ngbe_hw *TUP0, u32 TUP1,
+   u32 TUP2, void *TUP3)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_rom_read32_dummy(struct ngbe_hw *TUP0, u32 TUP1,
u32 *TUP2)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_rom_writew_buffer_dummy(struct ngbe_hw *TUP0, u32 TUP1,
+   u32 TUP2, void *TUP3)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline s32 ngbe_rom_validate_checksum_dummy(struct ngbe_hw *TUP0,
u16 *TUP1)
 {
@@ -270,7 +280,9 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 {
hw->bus.set_lan_id = ngbe_bus_set_lan_id_dummy;
hw->rom.init_params = ngbe_rom_init_params_dummy;
+   hw->rom.readw_buffer = ngbe_rom_readw_buffer_dummy;
hw->rom.read32 = ngbe_rom_read32_dummy;
+   hw->rom.writew_buffer = ngbe_rom_writew_buffer_dummy;
hw->rom.validate_checksum = ngbe_rom_validate_checksum_dummy;
hw->mac.init_hw = ngbe_mac_init_hw_dummy;
hw->mac.reset_hw = ngbe_mac_reset_hw_dummy;
diff --git a/drivers/net/ngbe/base/ngbe_eeprom.c 
b/drivers/net/ngbe/base/ngbe_eeprom.c
index 9ae2f0badb..f9a876e9bd 100644
--- a/drivers/net/ngbe/base/ngbe_eeprom.c
+++ b/drivers/net/ngbe/base/ngbe_eeprom.c
@@ -161,6 +161,45 @@ void ngbe_release_eeprom_semaphore(struct ngbe_hw *hw)
ngbe_flush(hw);
 }
 
+/**
+ *  ngbe_ee_read_buffer- Read EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the hostif.
+ **/
+s32 ngbe_ee_readw_buffer(struct ngbe_hw *hw,
+u32 offset, u32 words, void *data)
+{
+   const u32 mask = NGBE_MNGSEM_SWMBX | NGBE_MNGSEM_SWFLASH;
+   u32 addr = (offset << 1);
+   u32 len = (words << 1);
+   u8 *buf = (u8 *)data;
+   int err;
+
+   err = hw->mac.acquire_swfw_sync(hw, mask);
+   if (err)
+   return err;
+
+   while (len) {
+   u32 seg = (len <= NGBE_PMMBX_DATA_SIZE
+   ? len : NGBE_PMMBX_DATA_SIZE);
+
+   err = ngbe_hic_sr_read(hw, addr, buf, seg);
+   if (err)
+   break;
+
+   len -= seg;
+   addr += seg;
+   buf += seg;
+   }
+
+   hw->mac.release_swfw_sync(hw, mask);
+   return err;
+}
+
 /**
  *  ngbe_ee_read32 - Read EEPROM word using a host interface cmd
  *  @hw: pointer to hardware structure
@@ -185,6 +224,44 @@ s32 ngbe_ee_read32(struct ngbe_hw *hw, u32 addr, u32 *data)
return err;
 }
 
+/**
+ *  ngbe_ee_write_buffer - Write EEPROM word(s) using hostif
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of words
+ *  @data: word(s) write to the EEPROM
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the hostif.
+ **/
+s32 ngbe_ee_writew_buffer(struct ngbe_hw *hw,
+ u32 offset, u32 words, void *data)
+{
+   const u32 mask = NGBE_MNGSEM_SWMBX | NGBE_MNGSEM_SWFLASH;
+   u32 addr = (offset << 1);
+   u32 len = (words << 1);
+   u8 *buf = (u8 *)data;
+   int err;
+
+   err = hw->mac.acquire_swfw_sync(hw, mask);
+   if (err)
+   return err;
+
+   while (len) {
+   u32 seg = (len <= NGBE_PMMBX_DATA_SIZE
+   ? l

[dpdk-dev] [PATCH 19/32] net/ngbe: add mailbox process operations

2021-09-08 Thread Jiawen Wu
Add check operation for vf function level reset,
mailbox messages and ack from vf.
Waiting to process the messages.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/base/ngbe.h   |   4 +
 drivers/net/ngbe/base/ngbe_dummy.h |  39 ++
 drivers/net/ngbe/base/ngbe_hw.c| 215 +++
 drivers/net/ngbe/base/ngbe_hw.h|   8 +
 drivers/net/ngbe/base/ngbe_mbx.c   | 297 +++
 drivers/net/ngbe/base/ngbe_mbx.h   |  78 
 drivers/net/ngbe/base/ngbe_type.h  |  10 +
 drivers/net/ngbe/meson.build   |   2 +
 drivers/net/ngbe/ngbe_ethdev.c |   7 +
 drivers/net/ngbe/ngbe_ethdev.h |  13 +
 drivers/net/ngbe/ngbe_pf.c | 564 +
 drivers/net/ngbe/rte_pmd_ngbe.h|  39 ++
 12 files changed, 1276 insertions(+)
 create mode 100644 drivers/net/ngbe/rte_pmd_ngbe.h

diff --git a/drivers/net/ngbe/base/ngbe.h b/drivers/net/ngbe/base/ngbe.h
index fe85b07b57..1d17c2f115 100644
--- a/drivers/net/ngbe/base/ngbe.h
+++ b/drivers/net/ngbe/base/ngbe.h
@@ -6,6 +6,10 @@
 #define _NGBE_H_
 
 #include "ngbe_type.h"
+#include "ngbe_mng.h"
+#include "ngbe_mbx.h"
+#include "ngbe_eeprom.h"
+#include "ngbe_phy.h"
 #include "ngbe_hw.h"
 
 #endif /* _NGBE_H_ */
diff --git a/drivers/net/ngbe/base/ngbe_dummy.h 
b/drivers/net/ngbe/base/ngbe_dummy.h
index 5cb09bfcaa..940b448734 100644
--- a/drivers/net/ngbe/base/ngbe_dummy.h
+++ b/drivers/net/ngbe/base/ngbe_dummy.h
@@ -136,6 +136,16 @@ static inline s32 ngbe_mac_clear_vfta_dummy(struct ngbe_hw 
*TUP0)
 {
return NGBE_ERR_OPS_DUMMY;
 }
+static inline s32 ngbe_mac_set_vfta_dummy(struct ngbe_hw *TUP0, u32 TUP1,
+   u32 TUP2, bool TUP3, bool TUP4)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mac_set_vlvf_dummy(struct ngbe_hw *TUP0, u32 TUP1,
+   u32 TUP2, bool TUP3, u32 *TUP4, u32 TUP5, bool TUP6)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 static inline void ngbe_mac_set_mac_anti_spoofing_dummy(struct ngbe_hw *TUP0,
bool TUP1, int TUP2)
 {
@@ -200,6 +210,28 @@ static inline s32 ngbe_phy_check_link_dummy(struct ngbe_hw 
*TUP0, u32 *TUP1,
 static inline void ngbe_mbx_init_params_dummy(struct ngbe_hw *TUP0)
 {
 }
+static inline s32 ngbe_mbx_read_dummy(struct ngbe_hw *TUP0, u32 *TUP1,
+   u16 TUP2, u16 TUP3)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mbx_write_dummy(struct ngbe_hw *TUP0, u32 *TUP1,
+   u16 TUP2, u16 TUP3)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mbx_check_for_msg_dummy(struct ngbe_hw *TUP0, u16 TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mbx_check_for_ack_dummy(struct ngbe_hw *TUP0, u16 TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
+static inline s32 ngbe_mbx_check_for_rst_dummy(struct ngbe_hw *TUP0, u16 TUP1)
+{
+   return NGBE_ERR_OPS_DUMMY;
+}
 
 static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
 {
@@ -228,6 +260,8 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->mac.init_rx_addrs = ngbe_mac_init_rx_addrs_dummy;
hw->mac.update_mc_addr_list = ngbe_mac_update_mc_addr_list_dummy;
hw->mac.clear_vfta = ngbe_mac_clear_vfta_dummy;
+   hw->mac.set_vfta = ngbe_mac_set_vfta_dummy;
+   hw->mac.set_vlvf = ngbe_mac_set_vlvf_dummy;
hw->mac.set_mac_anti_spoofing = ngbe_mac_set_mac_anti_spoofing_dummy;
hw->mac.set_vlan_anti_spoofing = ngbe_mac_set_vlan_anti_spoofing_dummy;
hw->mac.init_thermal_sensor_thresh = ngbe_mac_init_thermal_ssth_dummy;
@@ -242,6 +276,11 @@ static inline void ngbe_init_ops_dummy(struct ngbe_hw *hw)
hw->phy.setup_link = ngbe_phy_setup_link_dummy;
hw->phy.check_link = ngbe_phy_check_link_dummy;
hw->mbx.init_params = ngbe_mbx_init_params_dummy;
+   hw->mbx.read = ngbe_mbx_read_dummy;
+   hw->mbx.write = ngbe_mbx_write_dummy;
+   hw->mbx.check_for_msg = ngbe_mbx_check_for_msg_dummy;
+   hw->mbx.check_for_ack = ngbe_mbx_check_for_ack_dummy;
+   hw->mbx.check_for_rst = ngbe_mbx_check_for_rst_dummy;
 }
 
 #endif /* _NGBE_TYPE_DUMMY_H_ */
diff --git a/drivers/net/ngbe/base/ngbe_hw.c b/drivers/net/ngbe/base/ngbe_hw.c
index 8b45a91f78..afde58a89e 100644
--- a/drivers/net/ngbe/base/ngbe_hw.c
+++ b/drivers/net/ngbe/base/ngbe_hw.c
@@ -914,6 +914,214 @@ s32 ngbe_init_uta_tables(struct ngbe_hw *hw)
return 0;
 }
 
+/**
+ *  ngbe_find_vlvf_slot - find the vlanid or the first empty slot
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vlvf_bypass: true to find vlanid only, false returns first empty slot if
+ *   vlanid not found
+ *
+ *
+ *  return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ngbe_find_vlvf_slot(struct ngbe_hw *hw, u32 vlan, bool vlvf_bypass)
+{
+   s32 regindex, first_empty_slot;
+   u32 bits;
+
+   /* short cut the special case */
+ 

[dpdk-dev] [PATCH 24/32] net/ngbe: support timesync

2021-09-08 Thread Jiawen Wu
Add to support IEEE1588/802.1AS timestamping, and IEEE1588 timestamp
offload on Tx.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   1 +
 doc/guides/nics/ngbe.rst  |   1 +
 drivers/net/ngbe/ngbe_ethdev.c| 216 ++
 drivers/net/ngbe/ngbe_ethdev.h|  10 ++
 drivers/net/ngbe/ngbe_rxtx.c  |  33 -
 5 files changed, 260 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 1d6399a2e7..c780f1aa68 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -31,6 +31,7 @@ L4 checksum offload  = P
 Inner L3 checksum= P
 Inner L4 checksum= P
 Packet type parsing  = Y
+Timesync = Y
 Basic stats  = Y
 Extended stats   = Y
 Stats per queue  = Y
diff --git a/doc/guides/nics/ngbe.rst b/doc/guides/nics/ngbe.rst
index 09175e83cd..67fc7c89cc 100644
--- a/doc/guides/nics/ngbe.rst
+++ b/doc/guides/nics/ngbe.rst
@@ -26,6 +26,7 @@ Features
 - Link flow control
 - Interrupt mode for RX
 - Scattered and gather for TX and RX
+- IEEE 1588
 - FW version
 
 
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 4d94bc8b83..506b94168c 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -2830,6 +2830,215 @@ ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
 ngbe_dev_addr_list_itr, TRUE);
 }
 
+static uint64_t
+ngbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint64_t systime_cycles;
+
+   systime_cycles = (uint64_t)rd32(hw, NGBE_TSTIMEL);
+   systime_cycles |= (uint64_t)rd32(hw, NGBE_TSTIMEH) << 32;
+
+   return systime_cycles;
+}
+
+static uint64_t
+ngbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint64_t rx_tstamp_cycles;
+
+   /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
+   rx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSRXSTMPL);
+   rx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSRXSTMPH) << 32;
+
+   return rx_tstamp_cycles;
+}
+
+static uint64_t
+ngbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint64_t tx_tstamp_cycles;
+
+   /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
+   tx_tstamp_cycles = (uint64_t)rd32(hw, NGBE_TSTXSTMPL);
+   tx_tstamp_cycles |= (uint64_t)rd32(hw, NGBE_TSTXSTMPH) << 32;
+
+   return tx_tstamp_cycles;
+}
+
+static void
+ngbe_start_timecounters(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+   uint32_t incval = 0;
+   uint32_t shift = 0;
+
+   incval = NGBE_INCVAL_1GB;
+   shift = NGBE_INCVAL_SHIFT_1GB;
+
+   wr32(hw, NGBE_TSTIMEINC, NGBE_TSTIMEINC_IV(incval));
+
+   memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+   memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+   memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+   adapter->systime_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
+   adapter->systime_tc.cc_shift = shift;
+   adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+   adapter->rx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
+   adapter->rx_tstamp_tc.cc_shift = shift;
+   adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+   adapter->tx_tstamp_tc.cc_mask = NGBE_CYCLECOUNTER_MASK;
+   adapter->tx_tstamp_tc.cc_shift = shift;
+   adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+ngbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+   struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+
+   adapter->systime_tc.nsec += delta;
+   adapter->rx_tstamp_tc.nsec += delta;
+   adapter->tx_tstamp_tc.nsec += delta;
+
+   return 0;
+}
+
+static int
+ngbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+   uint64_t ns;
+   struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+
+   ns = rte_timespec_to_ns(ts);
+   /* Set the timecounters to a new value. */
+   adapter->systime_tc.nsec = ns;
+   adapter->rx_tstamp_tc.nsec = ns;
+   adapter->tx_tstamp_tc.nsec = ns;
+
+   return 0;
+}
+
+static int
+ngbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+   uint64_t ns, systime_cycles;
+   struct ngbe_adapter *adapter = ngbe_dev_adapter(dev);
+
+   systime_cycles = ngbe_read_systime_cyclecounter(dev);
+   ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+   *ts = rte_ns_to_timespec(ns);
+
+   return 0;
+}
+
+static int
+ngbe_timesync_enable(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t tsync_ctl;
+
+   /* Stop the times

[dpdk-dev] [PATCH 23/32] net/ngbe: support register dump

2021-09-08 Thread Jiawen Wu
Support to dump registers.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini  |   1 +
 drivers/net/ngbe/base/ngbe_type.h  |   1 +
 drivers/net/ngbe/ngbe_ethdev.c | 108 +
 drivers/net/ngbe/ngbe_regs_group.h |  54 +++
 4 files changed, 164 insertions(+)
 create mode 100644 drivers/net/ngbe/ngbe_regs_group.h

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 3c169ab774..1d6399a2e7 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -36,6 +36,7 @@ Extended stats   = Y
 Stats per queue  = Y
 FW version   = Y
 EEPROM dump  = Y
+Registers dump   = Y
 Multiprocess aware   = Y
 Linux= Y
 ARMv8= Y
diff --git a/drivers/net/ngbe/base/ngbe_type.h 
b/drivers/net/ngbe/base/ngbe_type.h
index 32d3ab5d03..12847b7272 100644
--- a/drivers/net/ngbe/base/ngbe_type.h
+++ b/drivers/net/ngbe/base/ngbe_type.h
@@ -398,6 +398,7 @@ struct ngbe_hw {
u16 sub_device_id;
u16 sub_system_id;
u32 eeprom_id;
+   u8 revision_id;
bool adapter_stopped;
 
uint64_t isb_dma;
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 1cf4ca54af..4d94bc8b83 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -13,6 +13,67 @@
 #include "ngbe.h"
 #include "ngbe_ethdev.h"
 #include "ngbe_rxtx.h"
+#include "ngbe_regs_group.h"
+
+static const struct reg_info ngbe_regs_general[] = {
+   {NGBE_RST, 1, 1, "NGBE_RST"},
+   {NGBE_STAT, 1, 1, "NGBE_STAT"},
+   {NGBE_PORTCTL, 1, 1, "NGBE_PORTCTL"},
+   {NGBE_GPIODATA, 1, 1, "NGBE_GPIODATA"},
+   {NGBE_GPIOCTL, 1, 1, "NGBE_GPIOCTL"},
+   {NGBE_LEDCTL, 1, 1, "NGBE_LEDCTL"},
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_nvm[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_interrupt[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_fctl_others[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_rxdma[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_rx[] = {
+   {0, 0, 0, ""}
+};
+
+static struct reg_info ngbe_regs_tx[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_wakeup[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_mac[] = {
+   {0, 0, 0, ""}
+};
+
+static const struct reg_info ngbe_regs_diagnostic[] = {
+   {0, 0, 0, ""},
+};
+
+/* PF registers */
+static const struct reg_info *ngbe_regs_others[] = {
+   ngbe_regs_general,
+   ngbe_regs_nvm,
+   ngbe_regs_interrupt,
+   ngbe_regs_fctl_others,
+   ngbe_regs_rxdma,
+   ngbe_regs_rx,
+   ngbe_regs_tx,
+   ngbe_regs_wakeup,
+   ngbe_regs_mac,
+   ngbe_regs_diagnostic,
+   NULL};
 
 static int ngbe_dev_close(struct rte_eth_dev *dev);
 static int ngbe_dev_link_update(struct rte_eth_dev *dev,
@@ -2769,6 +2830,52 @@ ngbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
 ngbe_dev_addr_list_itr, TRUE);
 }
 
+static int
+ngbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+   int count = 0;
+   int g_ind = 0;
+   const struct reg_info *reg_group;
+   const struct reg_info **reg_set = ngbe_regs_others;
+
+   while ((reg_group = reg_set[g_ind++]))
+   count += ngbe_regs_group_count(reg_group);
+
+   return count;
+}
+
+static int
+ngbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t *data = regs->data;
+   int g_ind = 0;
+   int count = 0;
+   const struct reg_info *reg_group;
+   const struct reg_info **reg_set = ngbe_regs_others;
+
+   if (data == NULL) {
+   regs->length = ngbe_get_reg_length(dev);
+   regs->width = sizeof(uint32_t);
+   return 0;
+   }
+
+   /* Support only full register dump */
+   if (regs->length == 0 ||
+   regs->length == (uint32_t)ngbe_get_reg_length(dev)) {
+   regs->version = hw->mac.type << 24 |
+   hw->revision_id << 16 |
+   hw->device_id;
+   while ((reg_group = reg_set[g_ind++]))
+   count += ngbe_read_regs_group(dev, &data[count],
+ reg_group);
+   return 0;
+   }
+
+   return -ENOTSUP;
+}
+
 static int
 ngbe_get_eeprom_length(struct rte_eth_dev *dev)
 {
@@ -2868,6 +2975,7 @@ static const struct eth_dev_ops ngbe_eth_dev_ops =

[dpdk-dev] [PATCH 26/32] net/ngbe: add Rx and Tx descriptor status

2021-09-08 Thread Jiawen Wu
Supports to get the number of used Rx descriptos,
and check the status of Rx and Tx descriptors.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |  2 +
 drivers/net/ngbe/ngbe_ethdev.c|  3 ++
 drivers/net/ngbe/ngbe_ethdev.h|  6 +++
 drivers/net/ngbe/ngbe_rxtx.c  | 73 +++
 4 files changed, 84 insertions(+)

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index c780f1aa68..56d5d71ea8 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -32,6 +32,8 @@ Inner L3 checksum= P
 Inner L4 checksum= P
 Packet type parsing  = Y
 Timesync = Y
+Rx descriptor status = Y
+Tx descriptor status = Y
 Basic stats  = Y
 Extended stats   = Y
 Stats per queue  = Y
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 2d0c9e3453..ec652aa359 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -370,6 +370,9 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
PMD_INIT_FUNC_TRACE();
 
eth_dev->dev_ops = &ngbe_eth_dev_ops;
+   eth_dev->rx_queue_count   = ngbe_dev_rx_queue_count;
+   eth_dev->rx_descriptor_status = ngbe_dev_rx_descriptor_status;
+   eth_dev->tx_descriptor_status = ngbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ngbe_recv_pkts;
eth_dev->tx_pkt_burst = &ngbe_xmit_pkts;
eth_dev->tx_pkt_prepare = &ngbe_prep_pkts;
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 98df1c3bf0..aacc0b68b2 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -181,6 +181,12 @@ int  ngbe_dev_tx_queue_setup(struct rte_eth_dev *dev, 
uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
 
+uint32_t ngbe_dev_rx_queue_count(struct rte_eth_dev *dev,
+   uint16_t rx_queue_id);
+
+int ngbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ngbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
 int ngbe_dev_rx_init(struct rte_eth_dev *dev);
 
 void ngbe_dev_tx_init(struct rte_eth_dev *dev);
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index ac97eec1c0..0b31474193 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -2263,6 +2263,79 @@ ngbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
return 0;
 }
 
+uint32_t
+ngbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+#define NGBE_RXQ_SCAN_INTERVAL 4
+   volatile struct ngbe_rx_desc *rxdp;
+   struct ngbe_rx_queue *rxq;
+   uint32_t desc = 0;
+
+   rxq = dev->data->rx_queues[rx_queue_id];
+   rxdp = &rxq->rx_ring[rxq->rx_tail];
+
+   while ((desc < rxq->nb_rx_desc) &&
+   (rxdp->qw1.lo.status &
+   rte_cpu_to_le_32(NGBE_RXD_STAT_DD))) {
+   desc += NGBE_RXQ_SCAN_INTERVAL;
+   rxdp += NGBE_RXQ_SCAN_INTERVAL;
+   if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
+   rxdp = &(rxq->rx_ring[rxq->rx_tail +
+   desc - rxq->nb_rx_desc]);
+   }
+
+   return desc;
+}
+
+int
+ngbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+   struct ngbe_rx_queue *rxq = rx_queue;
+   volatile uint32_t *status;
+   uint32_t nb_hold, desc;
+
+   if (unlikely(offset >= rxq->nb_rx_desc))
+   return -EINVAL;
+
+   nb_hold = rxq->nb_rx_hold;
+   if (offset >= rxq->nb_rx_desc - nb_hold)
+   return RTE_ETH_RX_DESC_UNAVAIL;
+
+   desc = rxq->rx_tail + offset;
+   if (desc >= rxq->nb_rx_desc)
+   desc -= rxq->nb_rx_desc;
+
+   status = &rxq->rx_ring[desc].qw1.lo.status;
+   if (*status & rte_cpu_to_le_32(NGBE_RXD_STAT_DD))
+   return RTE_ETH_RX_DESC_DONE;
+
+   return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+ngbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+   struct ngbe_tx_queue *txq = tx_queue;
+   volatile uint32_t *status;
+   uint32_t desc;
+
+   if (unlikely(offset >= txq->nb_tx_desc))
+   return -EINVAL;
+
+   desc = txq->tx_tail + offset;
+   if (desc >= txq->nb_tx_desc) {
+   desc -= txq->nb_tx_desc;
+   if (desc >= txq->nb_tx_desc)
+   desc -= txq->nb_tx_desc;
+   }
+
+   status = &txq->tx_ring[desc].dw3;
+   if (*status & rte_cpu_to_le_32(NGBE_TXD_DD))
+   return RTE_ETH_TX_DESC_DONE;
+
+   return RTE_ETH_TX_DESC_FULL;
+}
+
 void
 ngbe_dev_clear_queues(struct rte_eth_dev *dev)
 {
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 25/32] net/ngbe: add Rx and Tx queue info get

2021-09-08 Thread Jiawen Wu
Add Rx and Tx queue information get operation.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/ngbe_ethdev.c |  2 ++
 drivers/net/ngbe/ngbe_ethdev.h |  6 ++
 drivers/net/ngbe/ngbe_rxtx.c   | 37 ++
 3 files changed, 45 insertions(+)

diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 506b94168c..2d0c9e3453 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -3184,6 +3184,8 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
.rss_hash_update= ngbe_dev_rss_hash_update,
.rss_hash_conf_get  = ngbe_dev_rss_hash_conf_get,
.set_mc_addr_list   = ngbe_dev_set_mc_addr_list,
+   .rxq_info_get   = ngbe_rxq_info_get,
+   .txq_info_get   = ngbe_txq_info_get,
.timesync_enable= ngbe_timesync_enable,
.timesync_disable   = ngbe_timesync_disable,
.timesync_read_rx_timestamp = ngbe_timesync_read_rx_timestamp,
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index b6e623ab0f..98df1c3bf0 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -200,6 +200,12 @@ int ngbe_dev_tx_queue_start(struct rte_eth_dev *dev, 
uint16_t tx_queue_id);
 
 int ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+void ngbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo);
+
+void ngbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo);
+
 uint16_t ngbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
 
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index e0ca4af9d9..ac97eec1c0 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -3092,3 +3092,40 @@ ngbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
 
return 0;
 }
+
+void
+ngbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_rxq_info *qinfo)
+{
+   struct ngbe_rx_queue *rxq;
+
+   rxq = dev->data->rx_queues[queue_id];
+
+   qinfo->mp = rxq->mb_pool;
+   qinfo->scattered_rx = dev->data->scattered_rx;
+   qinfo->nb_desc = rxq->nb_rx_desc;
+
+   qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+   qinfo->conf.rx_drop_en = rxq->drop_en;
+   qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+   qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+ngbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+   struct rte_eth_txq_info *qinfo)
+{
+   struct ngbe_tx_queue *txq;
+
+   txq = dev->data->tx_queues[queue_id];
+
+   qinfo->nb_desc = txq->nb_tx_desc;
+
+   qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+   qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+   qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+   qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+   qinfo->conf.offloads = txq->offloads;
+   qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 27/32] net/ngbe: add Tx done cleanup

2021-09-08 Thread Jiawen Wu
Add support for API rte_eth_tx_done_cleanup().

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/ngbe_ethdev.c |  1 +
 drivers/net/ngbe/ngbe_rxtx.c   | 89 ++
 drivers/net/ngbe/ngbe_rxtx.h   |  1 +
 3 files changed, 91 insertions(+)

diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index ec652aa359..4eaf9b0724 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -3200,6 +3200,7 @@ static const struct eth_dev_ops ngbe_eth_dev_ops = {
.timesync_adjust_time   = ngbe_timesync_adjust_time,
.timesync_read_time = ngbe_timesync_read_time,
.timesync_write_time= ngbe_timesync_write_time,
+   .tx_done_cleanup= ngbe_dev_tx_done_cleanup,
 };
 
 RTE_PMD_REGISTER_PCI(net_ngbe, rte_ngbe_pmd);
diff --git a/drivers/net/ngbe/ngbe_rxtx.c b/drivers/net/ngbe/ngbe_rxtx.c
index 0b31474193..bee4f04616 100644
--- a/drivers/net/ngbe/ngbe_rxtx.c
+++ b/drivers/net/ngbe/ngbe_rxtx.c
@@ -1717,6 +1717,95 @@ ngbe_tx_queue_release_mbufs(struct ngbe_tx_queue *txq)
}
 }
 
+static int
+ngbe_tx_done_cleanup_full(struct ngbe_tx_queue *txq, uint32_t free_cnt)
+{
+   struct ngbe_tx_entry *swr_ring = txq->sw_ring;
+   uint16_t i, tx_last, tx_id;
+   uint16_t nb_tx_free_last;
+   uint16_t nb_tx_to_clean;
+   uint32_t pkt_cnt;
+
+   /* Start free mbuf from the next of tx_tail */
+   tx_last = txq->tx_tail;
+   tx_id  = swr_ring[tx_last].next_id;
+
+   if (txq->nb_tx_free == 0 && ngbe_xmit_cleanup(txq))
+   return 0;
+
+   nb_tx_to_clean = txq->nb_tx_free;
+   nb_tx_free_last = txq->nb_tx_free;
+   if (!free_cnt)
+   free_cnt = txq->nb_tx_desc;
+
+   /* Loop through swr_ring to count the amount of
+* freeable mubfs and packets.
+*/
+   for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
+   for (i = 0; i < nb_tx_to_clean &&
+   pkt_cnt < free_cnt &&
+   tx_id != tx_last; i++) {
+   if (swr_ring[tx_id].mbuf != NULL) {
+   rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
+   swr_ring[tx_id].mbuf = NULL;
+
+   /*
+* last segment in the packet,
+* increment packet count
+*/
+   pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
+   }
+
+   tx_id = swr_ring[tx_id].next_id;
+   }
+
+   if (pkt_cnt < free_cnt) {
+   if (ngbe_xmit_cleanup(txq))
+   break;
+
+   nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
+   nb_tx_free_last = txq->nb_tx_free;
+   }
+   }
+
+   return (int)pkt_cnt;
+}
+
+static int
+ngbe_tx_done_cleanup_simple(struct ngbe_tx_queue *txq,
+   uint32_t free_cnt)
+{
+   int i, n, cnt;
+
+   if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
+   free_cnt = txq->nb_tx_desc;
+
+   cnt = free_cnt - free_cnt % txq->tx_free_thresh;
+
+   for (i = 0; i < cnt; i += n) {
+   if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
+   break;
+
+   n = ngbe_tx_free_bufs(txq);
+
+   if (n == 0)
+   break;
+   }
+
+   return i;
+}
+
+int
+ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
+{
+   struct ngbe_tx_queue *txq = (struct ngbe_tx_queue *)tx_queue;
+   if (txq->offloads == 0 &&
+   txq->tx_free_thresh >= RTE_PMD_NGBE_TX_MAX_BURST)
+   return ngbe_tx_done_cleanup_simple(txq, free_cnt);
+
+   return ngbe_tx_done_cleanup_full(txq, free_cnt);
+}
+
 static void
 ngbe_tx_free_swring(struct ngbe_tx_queue *txq)
 {
diff --git a/drivers/net/ngbe/ngbe_rxtx.h b/drivers/net/ngbe/ngbe_rxtx.h
index 812bc57c9e..d63b25c1aa 100644
--- a/drivers/net/ngbe/ngbe_rxtx.h
+++ b/drivers/net/ngbe/ngbe_rxtx.h
@@ -370,6 +370,7 @@ struct ngbe_txq_ops {
 void ngbe_set_tx_function(struct rte_eth_dev *dev, struct ngbe_tx_queue *txq);
 
 void ngbe_set_rx_function(struct rte_eth_dev *dev);
+int ngbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt);
 
 uint64_t ngbe_get_tx_port_offloads(struct rte_eth_dev *dev);
 uint64_t ngbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 28/32] net/ngbe: add IPsec context creation

2021-09-08 Thread Jiawen Wu
Initialize securiry context, and support to get security
capabilities.

Signed-off-by: Jiawen Wu 
---
 doc/guides/nics/features/ngbe.ini |   1 +
 drivers/net/ngbe/meson.build  |   3 +-
 drivers/net/ngbe/ngbe_ethdev.c|  10 ++
 drivers/net/ngbe/ngbe_ethdev.h|   4 +
 drivers/net/ngbe/ngbe_ipsec.c | 178 ++
 5 files changed, 195 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ngbe/ngbe_ipsec.c

diff --git a/doc/guides/nics/features/ngbe.ini 
b/doc/guides/nics/features/ngbe.ini
index 56d5d71ea8..facdb5f006 100644
--- a/doc/guides/nics/features/ngbe.ini
+++ b/doc/guides/nics/features/ngbe.ini
@@ -23,6 +23,7 @@ RSS reta update  = Y
 SR-IOV   = Y
 VLAN filter  = Y
 Flow control = Y
+Inline crypto= Y
 CRC offload  = P
 VLAN offload = P
 QinQ offload = P
diff --git a/drivers/net/ngbe/meson.build b/drivers/net/ngbe/meson.build
index b276ec3341..f222595b19 100644
--- a/drivers/net/ngbe/meson.build
+++ b/drivers/net/ngbe/meson.build
@@ -12,12 +12,13 @@ objs = [base_objs]
 
 sources = files(
 'ngbe_ethdev.c',
+'ngbe_ipsec.c',
 'ngbe_ptypes.c',
 'ngbe_pf.c',
 'ngbe_rxtx.c',
 )
 
-deps += ['hash']
+deps += ['hash', 'security']
 
 includes += include_directories('base')
 
diff --git a/drivers/net/ngbe/ngbe_ethdev.c b/drivers/net/ngbe/ngbe_ethdev.c
index 4eaf9b0724..b0e0f7411e 100644
--- a/drivers/net/ngbe/ngbe_ethdev.c
+++ b/drivers/net/ngbe/ngbe_ethdev.c
@@ -430,6 +430,12 @@ eth_ngbe_dev_init(struct rte_eth_dev *eth_dev, void 
*init_params __rte_unused)
/* Unlock any pending hardware semaphore */
ngbe_swfw_lock_reset(hw);
 
+#ifdef RTE_LIB_SECURITY
+   /* Initialize security_ctx only for primary process*/
+   if (ngbe_ipsec_ctx_create(eth_dev))
+   return -ENOMEM;
+#endif
+
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ngbe_fc_full;
hw->fc.current_mode = ngbe_fc_full;
@@ -1282,6 +1288,10 @@ ngbe_dev_close(struct rte_eth_dev *dev)
rte_free(dev->data->hash_mac_addrs);
dev->data->hash_mac_addrs = NULL;
 
+#ifdef RTE_LIB_SECURITY
+   rte_free(dev->security_ctx);
+#endif
+
return ret;
 }
 
diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index aacc0b68b2..9eda024d65 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -264,6 +264,10 @@ void ngbe_pf_mbx_process(struct rte_eth_dev *eth_dev);
 
 int ngbe_pf_host_configure(struct rte_eth_dev *eth_dev);
 
+#ifdef RTE_LIB_SECURITY
+int ngbe_ipsec_ctx_create(struct rte_eth_dev *dev);
+#endif
+
 /* High threshold controlling when to start sending XOFF frames. */
 #define NGBE_FC_XOFF_HITH  128 /*KB*/
 /* Low threshold controlling when to start sending XON frames. */
diff --git a/drivers/net/ngbe/ngbe_ipsec.c b/drivers/net/ngbe/ngbe_ipsec.c
new file mode 100644
index 00..5f8b0bab29
--- /dev/null
+++ b/drivers/net/ngbe/ngbe_ipsec.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
+ * Copyright(c) 2010-2017 Intel Corporation
+ */
+
+#include 
+#include 
+#include 
+
+#include "base/ngbe.h"
+#include "ngbe_ethdev.h"
+
+static const struct rte_security_capability *
+ngbe_crypto_capabilities_get(void *device __rte_unused)
+{
+   static const struct rte_cryptodev_capabilities
+   aes_gcm_gmac_crypto_capabilities[] = {
+   {   /* AES GMAC (128-bit) */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+   {.sym = {
+   .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+   {.auth = {
+   .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+   .block_size = 16,
+   .key_size = {
+   .min = 16,
+   .max = 16,
+   .increment = 0
+   },
+   .digest_size = {
+   .min = 16,
+   .max = 16,
+   .increment = 0
+   },
+   .iv_size = {
+   .min = 12,
+   .max = 12,
+   .increment = 0
+   }
+   }, }
+   }, }
+   },
+   {   /* AES GCM (128-bit) */
+   .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+   {.sym = {
+

[dpdk-dev] [PATCH 29/32] net/ngbe: create and destroy security session

2021-09-08 Thread Jiawen Wu
Support to configure a security session, add create and destroy
operations for a security session.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/ngbe_ethdev.h |   8 +
 drivers/net/ngbe/ngbe_ipsec.c  | 377 +
 drivers/net/ngbe/ngbe_ipsec.h  |  78 +++
 3 files changed, 463 insertions(+)
 create mode 100644 drivers/net/ngbe/ngbe_ipsec.h

diff --git a/drivers/net/ngbe/ngbe_ethdev.h b/drivers/net/ngbe/ngbe_ethdev.h
index 9eda024d65..e8ce01e1f4 100644
--- a/drivers/net/ngbe/ngbe_ethdev.h
+++ b/drivers/net/ngbe/ngbe_ethdev.h
@@ -7,6 +7,9 @@
 #define _NGBE_ETHDEV_H_
 
 #include "ngbe_ptypes.h"
+#ifdef RTE_LIB_SECURITY
+#include "ngbe_ipsec.h"
+#endif
 #include 
 #include 
 #include 
@@ -107,6 +110,9 @@ struct ngbe_adapter {
struct ngbe_hwstriphwstrip;
struct ngbe_vf_info*vfdata;
struct ngbe_uta_info   uta_info;
+#ifdef RTE_LIB_SECURITY
+   struct ngbe_ipsec  ipsec;
+#endif
bool   rx_bulk_alloc_allowed;
struct rte_timecounter systime_tc;
struct rte_timecounter rx_tstamp_tc;
@@ -160,6 +166,8 @@ ngbe_dev_intr(struct rte_eth_dev *dev)
 #define NGBE_DEV_UTA_INFO(dev) \
(&((struct ngbe_adapter *)(dev)->data->dev_private)->uta_info)
 
+#define NGBE_DEV_IPSEC(dev) \
+   (&((struct ngbe_adapter *)(dev)->data->dev_private)->ipsec)
 
 /*
  * Rx/Tx function prototypes
diff --git a/drivers/net/ngbe/ngbe_ipsec.c b/drivers/net/ngbe/ngbe_ipsec.c
index 5f8b0bab29..80151d45dc 100644
--- a/drivers/net/ngbe/ngbe_ipsec.c
+++ b/drivers/net/ngbe/ngbe_ipsec.c
@@ -9,6 +9,381 @@
 
 #include "base/ngbe.h"
 #include "ngbe_ethdev.h"
+#include "ngbe_ipsec.h"
+
+#define CMP_IP(a, b) (\
+   (a).ipv6[0] == (b).ipv6[0] && \
+   (a).ipv6[1] == (b).ipv6[1] && \
+   (a).ipv6[2] == (b).ipv6[2] && \
+   (a).ipv6[3] == (b).ipv6[3])
+
+static int
+ngbe_crypto_add_sa(struct ngbe_crypto_session *ic_session)
+{
+   struct rte_eth_dev *dev = ic_session->dev;
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   struct ngbe_ipsec *priv = NGBE_DEV_IPSEC(dev);
+   uint32_t reg_val;
+   int sa_index = -1;
+
+   if (ic_session->op == NGBE_OP_AUTHENTICATED_DECRYPTION) {
+   int i, ip_index = -1;
+   uint8_t *key;
+
+   /* Find a match in the IP table*/
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   if (CMP_IP(priv->rx_ip_tbl[i].ip,
+  ic_session->dst_ip)) {
+   ip_index = i;
+   break;
+   }
+   }
+   /* If no match, find a free entry in the IP table*/
+   if (ip_index < 0) {
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   if (priv->rx_ip_tbl[i].ref_count == 0) {
+   ip_index = i;
+   break;
+   }
+   }
+   }
+
+   /* Fail if no match and no free entries*/
+   if (ip_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "No free entry left in the Rx IP table\n");
+   return -1;
+   }
+
+   /* Find a free entry in the SA table*/
+   for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+   if (priv->rx_sa_tbl[i].used == 0) {
+   sa_index = i;
+   break;
+   }
+   }
+   /* Fail if no free entries*/
+   if (sa_index < 0) {
+   PMD_DRV_LOG(ERR,
+   "No free entry left in the Rx SA table\n");
+   return -1;
+   }
+
+   priv->rx_ip_tbl[ip_index].ip.ipv6[0] =
+   ic_session->dst_ip.ipv6[0];
+   priv->rx_ip_tbl[ip_index].ip.ipv6[1] =
+   ic_session->dst_ip.ipv6[1];
+   priv->rx_ip_tbl[ip_index].ip.ipv6[2] =
+   ic_session->dst_ip.ipv6[2];
+   priv->rx_ip_tbl[ip_index].ip.ipv6[3] =
+   ic_session->dst_ip.ipv6[3];
+   priv->rx_ip_tbl[ip_index].ref_count++;
+
+   priv->rx_sa_tbl[sa_index].spi = ic_session->spi;
+   priv->rx_sa_tbl[sa_index].ip_index = ip_index;
+   priv->rx_sa_tbl[sa_index].mode = IPSRXMOD_VALID;
+   if (ic_session->op == NGBE_OP_AUTHENTICATED_DECRYPTION)
+   priv->rx_sa_tbl[sa_index].mode |=
+   (IPSRXMOD_PROTO | IPSRXMOD_DECRYPT);
+   if (ic_session->dst_ip.type == IPv6) {
+   priv->rx_sa_tbl[sa_index].mode |= IPSRXMOD_IPV6;
+   priv->

[dpdk-dev] [PATCH 31/32] net/ngbe: add security offload in Rx and Tx

2021-09-08 Thread Jiawen Wu
Add security offload in Rx and Tx process.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/ngbe_ipsec.c | 106 ++
 drivers/net/ngbe/ngbe_ipsec.h |   2 +
 drivers/net/ngbe/ngbe_rxtx.c  |  91 -
 drivers/net/ngbe/ngbe_rxtx.h  |  14 -
 4 files changed, 210 insertions(+), 3 deletions(-)

diff --git a/drivers/net/ngbe/ngbe_ipsec.c b/drivers/net/ngbe/ngbe_ipsec.c
index cc79d7d88f..54e05a834f 100644
--- a/drivers/net/ngbe/ngbe_ipsec.c
+++ b/drivers/net/ngbe/ngbe_ipsec.c
@@ -17,6 +17,55 @@
(a).ipv6[2] == (b).ipv6[2] && \
(a).ipv6[3] == (b).ipv6[3])
 
+static void
+ngbe_crypto_clear_ipsec_tables(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   struct ngbe_ipsec *priv = NGBE_DEV_IPSEC(dev);
+   int i = 0;
+
+   /* clear Rx IP table*/
+   for (i = 0; i < IPSEC_MAX_RX_IP_COUNT; i++) {
+   uint16_t index = i << 3;
+   uint32_t reg_val = NGBE_IPSRXIDX_WRITE |
+   NGBE_IPSRXIDX_TB_IP | index;
+   wr32(hw, NGBE_IPSRXADDR(0), 0);
+   wr32(hw, NGBE_IPSRXADDR(1), 0);
+   wr32(hw, NGBE_IPSRXADDR(2), 0);
+   wr32(hw, NGBE_IPSRXADDR(3), 0);
+   wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000);
+   }
+
+   /* clear Rx SPI and Rx/Tx SA tables*/
+   for (i = 0; i < IPSEC_MAX_SA_COUNT; i++) {
+   uint32_t index = i << 3;
+   uint32_t reg_val = NGBE_IPSRXIDX_WRITE |
+   NGBE_IPSRXIDX_TB_SPI | index;
+   wr32(hw, NGBE_IPSRXSPI, 0);
+   wr32(hw, NGBE_IPSRXADDRIDX, 0);
+   wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000);
+   reg_val = NGBE_IPSRXIDX_WRITE | NGBE_IPSRXIDX_TB_KEY | index;
+   wr32(hw, NGBE_IPSRXKEY(0), 0);
+   wr32(hw, NGBE_IPSRXKEY(1), 0);
+   wr32(hw, NGBE_IPSRXKEY(2), 0);
+   wr32(hw, NGBE_IPSRXKEY(3), 0);
+   wr32(hw, NGBE_IPSRXSALT, 0);
+   wr32(hw, NGBE_IPSRXMODE, 0);
+   wr32w(hw, NGBE_IPSRXIDX, reg_val, NGBE_IPSRXIDX_WRITE, 1000);
+   reg_val = NGBE_IPSTXIDX_WRITE | index;
+   wr32(hw, NGBE_IPSTXKEY(0), 0);
+   wr32(hw, NGBE_IPSTXKEY(1), 0);
+   wr32(hw, NGBE_IPSTXKEY(2), 0);
+   wr32(hw, NGBE_IPSTXKEY(3), 0);
+   wr32(hw, NGBE_IPSTXSALT, 0);
+   wr32w(hw, NGBE_IPSTXIDX, reg_val, NGBE_IPSTXIDX_WRITE, 1000);
+   }
+
+   memset(priv->rx_ip_tbl, 0, sizeof(priv->rx_ip_tbl));
+   memset(priv->rx_sa_tbl, 0, sizeof(priv->rx_sa_tbl));
+   memset(priv->tx_sa_tbl, 0, sizeof(priv->tx_sa_tbl));
+}
+
 static int
 ngbe_crypto_add_sa(struct ngbe_crypto_session *ic_session)
 {
@@ -550,6 +599,63 @@ ngbe_crypto_capabilities_get(void *device __rte_unused)
return ngbe_security_capabilities;
 }
 
+int
+ngbe_crypto_enable_ipsec(struct rte_eth_dev *dev)
+{
+   struct ngbe_hw *hw = ngbe_dev_hw(dev);
+   uint32_t reg;
+   uint64_t rx_offloads;
+   uint64_t tx_offloads;
+
+   rx_offloads = dev->data->dev_conf.rxmode.offloads;
+   tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+   /* sanity checks */
+   if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+   PMD_DRV_LOG(ERR, "RSC and IPsec not supported");
+   return -1;
+   }
+   if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
+   PMD_DRV_LOG(ERR, "HW CRC strip needs to be enabled for IPsec");
+   return -1;
+   }
+
+   /* Set NGBE_SECTXBUFFAF to 0x14 as required in the datasheet*/
+   wr32(hw, NGBE_SECTXBUFAF, 0x14);
+
+   /* IFG needs to be set to 3 when we are using security. Otherwise a Tx
+* hang will occur with heavy traffic.
+*/
+   reg = rd32(hw, NGBE_SECTXIFG);
+   reg = (reg & ~NGBE_SECTXIFG_MIN_MASK) | NGBE_SECTXIFG_MIN(0x3);
+   wr32(hw, NGBE_SECTXIFG, reg);
+
+   reg = rd32(hw, NGBE_SECRXCTL);
+   reg |= NGBE_SECRXCTL_CRCSTRIP;
+   wr32(hw, NGBE_SECRXCTL, reg);
+
+   if (rx_offloads & DEV_RX_OFFLOAD_SECURITY) {
+   wr32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_ODSA, 0);
+   reg = rd32m(hw, NGBE_SECRXCTL, NGBE_SECRXCTL_ODSA);
+   if (reg != 0) {
+   PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+   return -1;
+   }
+   }
+   if (tx_offloads & DEV_TX_OFFLOAD_SECURITY) {
+   wr32(hw, NGBE_SECTXCTL, NGBE_SECTXCTL_STFWD);
+   reg = rd32(hw, NGBE_SECTXCTL);
+   if (reg != NGBE_SECTXCTL_STFWD) {
+   PMD_DRV_LOG(ERR, "Error enabling Rx Crypto");
+   return -1;
+   }
+   }
+
+   ngbe_crypto_clear_ipsec_tables(dev);
+
+   return 0;
+}
+
 static struct rte_security_ops ngbe_security_ops = {
.sessio

[dpdk-dev] [PATCH 30/32] net/ngbe: support security operations

2021-09-08 Thread Jiawen Wu
Support to update a security session and clear a security session
statistics.

Signed-off-by: Jiawen Wu 
---
 drivers/net/ngbe/ngbe_ipsec.c | 41 +++
 drivers/net/ngbe/ngbe_ipsec.h | 15 +
 2 files changed, 56 insertions(+)

diff --git a/drivers/net/ngbe/ngbe_ipsec.c b/drivers/net/ngbe/ngbe_ipsec.c
index 80151d45dc..cc79d7d88f 100644
--- a/drivers/net/ngbe/ngbe_ipsec.c
+++ b/drivers/net/ngbe/ngbe_ipsec.c
@@ -360,6 +360,12 @@ ngbe_crypto_create_session(void *device,
return 0;
 }
 
+static unsigned int
+ngbe_crypto_session_get_size(__rte_unused void *device)
+{
+   return sizeof(struct ngbe_crypto_session);
+}
+
 static int
 ngbe_crypto_remove_session(void *device,
struct rte_security_session *session)
@@ -385,6 +391,39 @@ ngbe_crypto_remove_session(void *device,
return 0;
 }
 
+static inline uint8_t
+ngbe_crypto_compute_pad_len(struct rte_mbuf *m)
+{
+   if (m->nb_segs == 1) {
+   /* 16 bytes ICV + 2 bytes ESP trailer + payload padding size
+* payload padding size is stored at 
+*/
+   uint8_t *esp_pad_len = rte_pktmbuf_mtod_offset(m, uint8_t *,
+   rte_pktmbuf_pkt_len(m) -
+   (ESP_TRAILER_SIZE + ESP_ICV_SIZE));
+   return *esp_pad_len + ESP_TRAILER_SIZE + ESP_ICV_SIZE;
+   }
+   return 0;
+}
+
+static int
+ngbe_crypto_update_mb(void *device __rte_unused,
+   struct rte_security_session *session,
+  struct rte_mbuf *m, void *params __rte_unused)
+{
+   struct ngbe_crypto_session *ic_session =
+   get_sec_session_private_data(session);
+   if (ic_session->op == NGBE_OP_AUTHENTICATED_ENCRYPTION) {
+   union ngbe_crypto_tx_desc_md *mdata =
+   (union ngbe_crypto_tx_desc_md *)
+   rte_security_dynfield(m);
+   mdata->enc = 1;
+   mdata->sa_idx = ic_session->sa_index;
+   mdata->pad_len = ngbe_crypto_compute_pad_len(m);
+   }
+   return 0;
+}
+
 static const struct rte_security_capability *
 ngbe_crypto_capabilities_get(void *device __rte_unused)
 {
@@ -513,7 +552,9 @@ ngbe_crypto_capabilities_get(void *device __rte_unused)
 
 static struct rte_security_ops ngbe_security_ops = {
.session_create = ngbe_crypto_create_session,
+   .session_get_size = ngbe_crypto_session_get_size,
.session_destroy = ngbe_crypto_remove_session,
+   .set_pkt_metadata = ngbe_crypto_update_mb,
.capabilities_get = ngbe_crypto_capabilities_get
 };
 
diff --git a/drivers/net/ngbe/ngbe_ipsec.h b/drivers/net/ngbe/ngbe_ipsec.h
index 8442bb2157..fa5f21027b 100644
--- a/drivers/net/ngbe/ngbe_ipsec.h
+++ b/drivers/net/ngbe/ngbe_ipsec.h
@@ -18,6 +18,9 @@
 #define IPSEC_MAX_RX_IP_COUNT   16
 #define IPSEC_MAX_SA_COUNT  16
 
+#define ESP_ICV_SIZE 16
+#define ESP_TRAILER_SIZE 2
+
 enum ngbe_operation {
NGBE_OP_AUTHENTICATED_ENCRYPTION,
NGBE_OP_AUTHENTICATED_DECRYPTION
@@ -69,6 +72,18 @@ struct ngbe_crypto_tx_sa_table {
uint8_t  used;
 };
 
+union ngbe_crypto_tx_desc_md {
+   uint64_t data;
+   struct {
+   /**< SA table index */
+   uint32_t sa_idx;
+   /**< ICV and ESP trailer length */
+   uint8_t pad_len;
+   /**< enable encryption */
+   uint8_t enc;
+   };
+};
+
 struct ngbe_ipsec {
struct ngbe_crypto_rx_ip_table rx_ip_tbl[IPSEC_MAX_RX_IP_COUNT];
struct ngbe_crypto_rx_sa_table rx_sa_tbl[IPSEC_MAX_SA_COUNT];
-- 
2.21.0.windows.1





[dpdk-dev] [PATCH 32/32] doc: update for ngbe

2021-09-08 Thread Jiawen Wu
Add ngbe PMD new features in release note 21.11.

Signed-off-by: Jiawen Wu 
---
 doc/guides/rel_notes/release_21_11.rst | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 675b573834..81093cf6c0 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -62,6 +62,16 @@ New Features
   * Added bus-level parsing of the devargs syntax.
   * Kept compatibility with the legacy syntax as parsing fallback.
 
+* **Updated Wangxun ngbe driver.**
+  Updated the Wangxun ngbe driver. Add more features to complete the driver,
+  some of them including:
+
+  * Added offloads and packet type on RxTx.
+  * Added device basic statistics and extended stats.
+  * Added VLAN and MAC filters.
+  * Added multi-queue and RSS.
+  * Added SRIOV.
+  * Added IPsec.
 
 Removed Items
 -
-- 
2.21.0.windows.1





Re: [dpdk-dev] [RFC PATCH v5 1/5] sched: add PIE based congestion management

2021-09-08 Thread Liguzinski, WojciechX
Thanks Stephen,

I will do my best to apply your comments.

Best Regards,
Wojciech Liguzinski

-Original Message-
From: Stephen Hemminger  
Sent: Tuesday, September 7, 2021 9:15 PM
To: Liguzinski, WojciechX 
Cc: dev@dpdk.org; Singh, Jasvinder ; Dumitrescu, 
Cristian ; Ajmera, Megha 
Subject: Re: [dpdk-dev] [RFC PATCH v5 1/5] sched: add PIE based congestion 
management

On Tue,  7 Sep 2021 07:33:24 +
"Liguzinski, WojciechX"  wrote:

> +/**
> + * @brief make a decision to drop or enqueue a packet based on probability
> + *criteria
> + *
> + * @param pie_cfg [in] config pointer to a PIE configuration 
> +parameter structure
> + * @param pie [in, out] data pointer to PIE runtime data
> + * @param time [in] current time (measured in cpu cycles)  */ static 
> +inline void __rte_experimental _calc_drop_probability(const struct 
> +rte_pie_config *pie_cfg,
> + struct rte_pie *pie, uint64_t time)

This code adds a lot of inline functions in the name of performance.
But every inline like this means the internal ABI for the implmentation has to 
be exposed.

You would probably get a bigger performance bump from not using floating point 
in the internal math, than the minor performance optimization from having so 
many inlines.


Re: [dpdk-dev] [PATCH] ethdev: promote sibling iterators to stable

2021-09-08 Thread Kinsella, Ray



On 06/09/2021 15:19, Andrew Rybchenko wrote:
> On 9/6/21 4:02 PM, David Marchand wrote:
>> This API saw no update since its introduction and will help applications
>> like OVS ([1] and [2]) that currently look at rte_eth_devices[] to
>> achieve the same.
>>
>> 1: https://github.com/openvswitch/ovs/blob/master/lib/netdev-dpdk.c#L1285
>> 2: https://github.com/openvswitch/ovs/blob/master/lib/netdev-dpdk.c#L1476
>>
>> Signed-off-by: David Marchand 
> 
> Acked-by: Andrew Rybchenko 
> 
Acked-by: Ray Kinsella 


Re: [dpdk-dev] [PATCH] vhost: promote some APIs to stable

2021-09-08 Thread Kinsella, Ray



On 07/09/2021 03:58, Chenbo Xia wrote:
> As reported by symbol bot, APIs listed in this patch have been
> experimental for more than two years. This patch promotes these
> 18 APIs to stable.
> 
> Signed-off-by: Chenbo Xia 
> ---
>  lib/vhost/rte_vhost.h| 13 -
>  lib/vhost/rte_vhost_crypto.h |  5 -
>  lib/vhost/version.map| 36 ++--
>  3 files changed, 18 insertions(+), 36 deletions(-)
> 

Acked-by: Ray Kinsella 


[dpdk-dev] [PATCH v2] net/af_packet: reinsert the stripped vlan tag

2021-09-08 Thread Tudor Cornea
The af_packet pmd driver binds to a raw socket and allows
sending and receiving of packets through the kernel.

Since commit [1], the kernel strips the vlan tags early in
__netif_receive_skb_core(), so we receive untagged packets while
running with the af_packet pmd.

Luckily for us, the skb vlan-related fields are still populated from the
stripped vlan tags, so we end up having all the information
that we need in the mbuf.

Having the PMD driver support DEV_RX_OFFLOAD_VLAN_STRIP allows the
application to control the desired vlan stripping behavior.

[1] 
https://github.com/torvalds/linux/commit/bcc6d47903612c3861201cc3a866fb604f26b8b2

Signed-off-by: Tudor Cornea 

---
v2:
* Add DEV_RX_OFFLOAD_VLAN_STRIP to rxmode->offloads
---
 drivers/net/af_packet/rte_eth_af_packet.c | 12 
 1 file changed, 12 insertions(+)

diff --git a/drivers/net/af_packet/rte_eth_af_packet.c 
b/drivers/net/af_packet/rte_eth_af_packet.c
index b73b211..5ed9dd6 100644
--- a/drivers/net/af_packet/rte_eth_af_packet.c
+++ b/drivers/net/af_packet/rte_eth_af_packet.c
@@ -48,6 +48,7 @@ struct pkt_rx_queue {
 
struct rte_mempool *mb_pool;
uint16_t in_port;
+   uint8_t vlan_strip;
 
volatile unsigned long rx_pkts;
volatile unsigned long rx_bytes;
@@ -78,6 +79,7 @@ struct pmd_internals {
 
struct pkt_rx_queue *rx_queue;
struct pkt_tx_queue *tx_queue;
+   uint8_t vlan_strip;
 };
 
 static const char *valid_arguments[] = {
@@ -148,6 +150,9 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, 
uint16_t nb_pkts)
if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
mbuf->vlan_tci = ppd->tp_vlan_tci;
mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+
+   if (!pkt_q->vlan_strip && rte_vlan_insert(&mbuf))
+   PMD_LOG(ERR, "Failed to reinsert VLAN tag");
}
 
/* release incoming frame and advance ring buffer */
@@ -302,6 +307,11 @@ eth_dev_stop(struct rte_eth_dev *dev)
 static int
 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 {
+   struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+   const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
+   struct pmd_internals *internals = dev->data->dev_private;
+
+   internals->vlan_strip = !!(rxmode->offloads & 
DEV_RX_OFFLOAD_VLAN_STRIP);
return 0;
 }
 
@@ -318,6 +328,7 @@ eth_dev_info(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
dev_info->min_rx_bufsize = 0;
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT;
+   dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
 
return 0;
 }
@@ -448,6 +459,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
 
dev->data->rx_queues[rx_queue_id] = pkt_q;
pkt_q->in_port = dev->data->port_id;
+   pkt_q->vlan_strip = internals->vlan_strip;
 
return 0;
 }
-- 
2.7.4



Re: [dpdk-dev] [PATCH] doc: announce change in vfio dma mapping

2021-09-08 Thread Kinsella, Ray
> 
> So there won't be symbol versioning but only new API, which means no 
> deprecation
> notice is required, please update this patch's status accordingly.
> 
> Thanks for keep working on the issue to find a better solution.
> 

+1, good work


[dpdk-dev] [PATCH v2 02/13] net/bnxt: enable dpool allocator

2021-09-08 Thread Venkat Duvvuru
From: Peter Spreadborough 

Enable dynamic entry allocator for EM SRAM entries.
Deprecate static entry allocator code.

Signed-off-by: Peter Spreadborough 
Reviewed-by: Randy Schacher 
---
 drivers/net/bnxt/tf_core/tf_device_p58.c  |   4 -
 drivers/net/bnxt/tf_core/tf_em.h  |  10 -
 .../net/bnxt/tf_core/tf_em_hash_internal.c|  34 
 drivers/net/bnxt/tf_core/tf_em_internal.c | 180 +-
 4 files changed, 1 insertion(+), 227 deletions(-)

diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c 
b/drivers/net/bnxt/tf_core/tf_device_p58.c
index ce4d8c661f..808dcb1f77 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -348,11 +348,7 @@ const struct tf_dev_ops tf_dev_ops_p58 = {
.tf_dev_get_tcam_resc_info = tf_tcam_get_resc_info,
.tf_dev_insert_int_em_entry = tf_em_hash_insert_int_entry,
.tf_dev_delete_int_em_entry = tf_em_hash_delete_int_entry,
-#if (TF_EM_ALLOC == 1)
.tf_dev_move_int_em_entry = tf_em_move_int_entry,
-#else
-   .tf_dev_move_int_em_entry = NULL,
-#endif
.tf_dev_insert_ext_em_entry = NULL,
.tf_dev_delete_ext_em_entry = NULL,
.tf_dev_get_em_resc_info = tf_em_get_resc_info,
diff --git a/drivers/net/bnxt/tf_core/tf_em.h b/drivers/net/bnxt/tf_core/tf_em.h
index 568071ad8c..074c128651 100644
--- a/drivers/net/bnxt/tf_core/tf_em.h
+++ b/drivers/net/bnxt/tf_core/tf_em.h
@@ -13,16 +13,6 @@
 
 #include "hcapi_cfa_defs.h"
 
-/**
- * TF_EM_ALLOC
- *
- * 0: Use stack allocator with fixed sized entries
- *(default).
- * 1: Use dpool allocator with variable size
- *entries.
- */
-#define TF_EM_ALLOC 0
-
 #define TF_EM_MIN_ENTRIES (1 << 15) /* 32K */
 #define TF_EM_MAX_ENTRIES (1 << 27) /* 128M */
 
diff --git a/drivers/net/bnxt/tf_core/tf_em_hash_internal.c 
b/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
index 098e8af07e..60273a798c 100644
--- a/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_hash_internal.c
@@ -22,9 +22,7 @@
 /**
  * EM Pool
  */
-#if (TF_EM_ALLOC == 1)
 #include "dpool.h"
-#endif
 
 /**
  * Insert EM internal entry API
@@ -41,11 +39,7 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
uint16_t rptr_index = 0;
uint8_t rptr_entry = 0;
uint8_t num_of_entries = 0;
-#if (TF_EM_ALLOC == 1)
struct dpool *pool;
-#else
-   struct stack *pool;
-#endif
uint32_t index;
uint32_t key0_hash;
uint32_t key1_hash;
@@ -62,7 +56,6 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
rc = tf_session_get_device(tfs, &dev);
if (rc)
return rc;
-#if (TF_EM_ALLOC == 1)
pool = (struct dpool *)tfs->em_pool[parms->dir];
index = dpool_alloc(pool,
parms->em_record_sz_in_bits / 128,
@@ -74,16 +67,6 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
tf_dir_2_str(parms->dir));
return -1;
}
-#else
-   pool = (struct stack *)tfs->em_pool[parms->dir];
-   rc = stack_pop(pool, &index);
-   if (rc) {
-   PMD_DRV_LOG(ERR,
-   "%s, EM entry index allocation failed\n",
-   tf_dir_2_str(parms->dir));
-   return rc;
-   }
-#endif
 
if (dev->ops->tf_dev_cfa_key_hash == NULL)
return -EINVAL;
@@ -103,11 +86,7 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
  &num_of_entries);
if (rc) {
/* Free the allocated index before returning */
-#if (TF_EM_ALLOC == 1)
dpool_free(pool, index);
-#else
-   stack_push(pool, index);
-#endif
return -1;
}
 
@@ -128,9 +107,7 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
 rptr_index,
 rptr_entry,
 0);
-#if (TF_EM_ALLOC == 1)
dpool_set_entry_data(pool, index, parms->flow_handle);
-#endif
return 0;
 }
 
@@ -146,11 +123,7 @@ tf_em_hash_delete_int_entry(struct tf *tfp,
 {
int rc = 0;
struct tf_session *tfs;
-#if (TF_EM_ALLOC == 1)
struct dpool *pool;
-#else
-   struct stack *pool;
-#endif
/* Retrieve the session information */
rc = tf_session_get_session(tfp, &tfs);
if (rc) {
@@ -165,19 +138,13 @@ tf_em_hash_delete_int_entry(struct tf *tfp,
 
/* Return resource to pool */
if (rc == 0) {
-#if (TF_EM_ALLOC == 1)
pool = (struct dpool *)tfs->em_pool[parms->dir];
dpool_free(pool, parms->index);
-#else
-   pool = (struct stack *)tfs->em_pool[parms->dir];
-   stack_push(pool, parms->index);
-#endif
}
 
return rc;
 }
 
-#if (TF_EM_ALLOC == 1)
 /** Move EM internal entry API
  *
  * returns:
@@ -212,4 +179,3 @@ tf_em_move_int_entry(struct tf *tfp,
 
r

[dpdk-dev] [PATCH v2 01/13] net/bnxt: tf core index table updates

2021-09-08 Thread Venkat Duvvuru
From: Farah Smith 

Remove unused shadow table functionality.

Signed-off-by: Farah Smith 
Reviewed-by: Peter Spreadborough 
---
 drivers/net/bnxt/tf_core/tf_core.c   |  65 --
 drivers/net/bnxt/tf_core/tf_core.h   | 103 +--
 drivers/net/bnxt/tf_core/tf_device.h |  22 -
 drivers/net/bnxt/tf_core/tf_device_p4.c  |   2 -
 drivers/net/bnxt/tf_core/tf_device_p58.c |   2 -
 drivers/net/bnxt/tf_core/tf_em_common.c  |   4 +
 drivers/net/bnxt/tf_core/tf_tbl.c|  21 -
 drivers/net/bnxt/tf_core/tf_tbl.h|  72 
 drivers/net/bnxt/tf_ulp/ulp_mapper.c |   3 +-
 9 files changed, 7 insertions(+), 287 deletions(-)

diff --git a/drivers/net/bnxt/tf_core/tf_core.c 
b/drivers/net/bnxt/tf_core/tf_core.c
index 97e6165e92..5458f76e2d 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_core/tf_core.c
@@ -1105,71 +1105,6 @@ tf_alloc_tbl_entry(struct tf *tfp,
return 0;
 }
 
-int
-tf_search_tbl_entry(struct tf *tfp,
-   struct tf_search_tbl_entry_parms *parms)
-{
-   int rc;
-   struct tf_session *tfs;
-   struct tf_dev_info *dev;
-   struct tf_tbl_alloc_search_parms sparms;
-
-   TF_CHECK_PARMS2(tfp, parms);
-
-   /* Retrieve the session information */
-   rc = tf_session_get_session(tfp, &tfs);
-   if (rc) {
-   TFP_DRV_LOG(ERR,
-   "%s: Failed to lookup session, rc:%s\n",
-   tf_dir_2_str(parms->dir),
-   strerror(-rc));
-   return rc;
-   }
-
-   /* Retrieve the device information */
-   rc = tf_session_get_device(tfs, &dev);
-   if (rc) {
-   TFP_DRV_LOG(ERR,
-   "%s: Failed to lookup device, rc:%s\n",
-   tf_dir_2_str(parms->dir),
-   strerror(-rc));
-   return rc;
-   }
-
-   if (dev->ops->tf_dev_alloc_search_tbl == NULL) {
-   rc = -EOPNOTSUPP;
-   TFP_DRV_LOG(ERR,
-   "%s: Operation not supported, rc:%s\n",
-   tf_dir_2_str(parms->dir),
-   strerror(-rc));
-   return rc;
-   }
-
-   memset(&sparms, 0, sizeof(struct tf_tbl_alloc_search_parms));
-   sparms.dir = parms->dir;
-   sparms.type = parms->type;
-   sparms.result = parms->result;
-   sparms.result_sz_in_bytes = parms->result_sz_in_bytes;
-   sparms.alloc = parms->alloc;
-   sparms.tbl_scope_id = parms->tbl_scope_id;
-   rc = dev->ops->tf_dev_alloc_search_tbl(tfp, &sparms);
-   if (rc) {
-   TFP_DRV_LOG(ERR,
-   "%s: TBL allocation failed, rc:%s\n",
-   tf_dir_2_str(parms->dir),
-   strerror(-rc));
-   return rc;
-   }
-
-   /* Return the outputs from the search */
-   parms->hit = sparms.hit;
-   parms->search_status = sparms.search_status;
-   parms->ref_cnt = sparms.ref_cnt;
-   parms->idx = sparms.idx;
-
-   return 0;
-}
-
 int
 tf_free_tbl_entry(struct tf *tfp,
  struct tf_free_tbl_entry_parms *parms)
diff --git a/drivers/net/bnxt/tf_core/tf_core.h 
b/drivers/net/bnxt/tf_core/tf_core.h
index 84b234f0e3..7e0cdf7e0d 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -1622,79 +1622,6 @@ int tf_clear_tcam_shared_entries(struct tf *tfp,
  * @ref tf_get_shared_tbl_increment
  */
 
-/**
- * tf_alloc_tbl_entry parameter definition
- */
-struct tf_search_tbl_entry_parms {
-   /**
-* [in] Receive or transmit direction
-*/
-   enum tf_dir dir;
-   /**
-* [in] Type of the allocation
-*/
-   enum tf_tbl_type type;
-   /**
-* [in] Table scope identifier (ignored unless TF_TBL_TYPE_EXT)
-*/
-   uint32_t tbl_scope_id;
-   /**
-* [in] Result data to search for
-*/
-   uint8_t *result;
-   /**
-* [in] Result data size in bytes
-*/
-   uint16_t result_sz_in_bytes;
-   /**
-* [in] Allocate on miss.
-*/
-   uint8_t alloc;
-   /**
-* [out] Set if matching entry found
-*/
-   uint8_t hit;
-   /**
-* [out] Search result status (hit, miss, reject)
-*/
-   enum tf_search_status search_status;
-   /**
-* [out] Current ref count after allocation
-*/
-   uint16_t ref_cnt;
-   /**
-* [out] Idx of allocated entry or found entry
-*/
-   uint32_t idx;
-};
-
-/**
- * search Table Entry (experimental)
- *
- * This function searches the shadow copy of an index table for a matching
- * entry.  The result data must match for hit to be set.  Only TruFlow core
- * data is accessed.  If shadow_copy is not enabled, an error is returned.
- *
- * Implementation:
- *
- * A hash

[dpdk-dev] [PATCH v2 00/13] enhancements to host based flow table management

2021-09-08 Thread Venkat Duvvuru
This patch set adds support for new offload features/enhancments for
Thor adapters like VF representor support, new flow matches/actions
& dynamic SRAM manager support.

v1 ==> v2
=
1. Fixed compilation issues
2. Abandoned patch 14

Farah Smith (4):
  net/bnxt: tf core index table updates
  net/bnxt: add Thor SRAM mgr model
  net/bnxt: tf core SRAM Manager
  net/bnxt: sram manager shared session

Jay Ding (1):
  net/bnxt: add flow meter drop counter support

Kishore Padmanabha (6):
  net/bnxt: add flow templates support for Thor
  net/bnxt: add support for tunnel offloads
  net/bnxt: add support for dynamic encap action
  net/bnxt: add wild card TCAM byte order for Thor
  net/bnxt: add flow templates for Thor
  net/bnxt: add enhancements to TF ULP

Peter Spreadborough (1):
  net/bnxt: enable dpool allocator

Randy Schacher (1):
  net/bnxt: dynamically allocate space for EM defrag function

 drivers/net/bnxt/tf_core/cfa_resource_types.h | 5 +-
 drivers/net/bnxt/tf_core/dpool.c  |38 +-
 drivers/net/bnxt/tf_core/ll.c | 3 +
 drivers/net/bnxt/tf_core/ll.h |50 +-
 drivers/net/bnxt/tf_core/meson.build  | 2 +
 drivers/net/bnxt/tf_core/tf_core.c|   169 +-
 drivers/net/bnxt/tf_core/tf_core.h|   159 +-
 drivers/net/bnxt/tf_core/tf_device.c  |40 +-
 drivers/net/bnxt/tf_core/tf_device.h  |   137 +-
 drivers/net/bnxt/tf_core/tf_device_p4.c   |77 +-
 drivers/net/bnxt/tf_core/tf_device_p4.h   |50 +-
 drivers/net/bnxt/tf_core/tf_device_p58.c  |   112 +-
 drivers/net/bnxt/tf_core/tf_device_p58.h  |70 +-
 drivers/net/bnxt/tf_core/tf_em.h  |10 -
 drivers/net/bnxt/tf_core/tf_em_common.c   | 4 +
 .../net/bnxt/tf_core/tf_em_hash_internal.c|34 -
 drivers/net/bnxt/tf_core/tf_em_internal.c |   185 +-
 drivers/net/bnxt/tf_core/tf_msg.c | 2 +-
 drivers/net/bnxt/tf_core/tf_rm.c  |   180 +-
 drivers/net/bnxt/tf_core/tf_rm.h  |62 +-
 drivers/net/bnxt/tf_core/tf_session.c |56 +
 drivers/net/bnxt/tf_core/tf_session.h |58 +-
 drivers/net/bnxt/tf_core/tf_sram_mgr.c|   971 +
 drivers/net/bnxt/tf_core/tf_sram_mgr.h|   317 +
 drivers/net/bnxt/tf_core/tf_tbl.c |   259 +-
 drivers/net/bnxt/tf_core/tf_tbl.h |87 +-
 drivers/net/bnxt/tf_core/tf_tbl_sram.c|   747 +
 drivers/net/bnxt/tf_core/tf_tbl_sram.h|   154 +
 drivers/net/bnxt/tf_core/tf_tcam.c|16 +-
 drivers/net/bnxt/tf_core/tf_tcam.h| 7 +
 drivers/net/bnxt/tf_core/tf_tcam_shared.c |28 +-
 drivers/net/bnxt/tf_core/tf_util.c|12 +
 drivers/net/bnxt/tf_ulp/bnxt_tf_common.h  |10 +-
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c|52 +-
 drivers/net/bnxt/tf_ulp/bnxt_ulp.h|20 +-
 drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c   |   226 +-
 .../bnxt/tf_ulp/generic_templates/meson.build |17 +-
 .../generic_templates/ulp_template_db_act.c   | 2 +-
 .../generic_templates/ulp_template_db_class.c | 12109 +++-
 .../generic_templates/ulp_template_db_enum.h  |   618 +-
 .../generic_templates/ulp_template_db_field.h |   767 +-
 .../generic_templates/ulp_template_db_tbl.c   |  2757 +-
 .../ulp_template_db_thor_act.c|  5079 +-
 .../ulp_template_db_thor_class.c  | 45573 ++--
 .../ulp_template_db_wh_plus_act.c |  1700 +-
 .../ulp_template_db_wh_plus_class.c   |  8329 ++-
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c  |48 +-
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.h  | 8 +-
 drivers/net/bnxt/tf_ulp/ulp_flow_db.c |   678 +-
 drivers/net/bnxt/tf_ulp/ulp_flow_db.h |68 +-
 drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c | 9 +-
 drivers/net/bnxt/tf_ulp/ulp_mapper.c  |   448 +-
 drivers/net/bnxt/tf_ulp/ulp_mapper.h  |10 +-
 drivers/net/bnxt/tf_ulp/ulp_matcher.c |13 +
 drivers/net/bnxt/tf_ulp/ulp_port_db.c |15 +-
 drivers/net/bnxt/tf_ulp/ulp_rte_handler_tbl.c |31 +
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.c  |   663 +-
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.h  |12 +-
 drivers/net/bnxt/tf_ulp/ulp_template_struct.h |32 +-
 drivers/net/bnxt/tf_ulp/ulp_tun.c |   521 +-
 drivers/net/bnxt/tf_ulp/ulp_tun.h |89 +-
 drivers/net/bnxt/tf_ulp/ulp_utils.c   |71 +-
 drivers/net/bnxt/tf_ulp/ulp_utils.h   |27 +-
 meson_options.txt | 2 +
 64 files changed, 71149 insertions(+), 12956 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.h
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.h

-- 
2.17.1



[dpdk-dev] [PATCH v2 04/13] net/bnxt: add Thor SRAM mgr model

2021-09-08 Thread Venkat Duvvuru
From: Farah Smith 

Add dynamic SRAM manager allocation support.

Signed-off-by: Farah Smith 
Reviewed-by: Shahaji Bhosle 
Reviewed-by: Peter Spreadborough 
---
 drivers/net/bnxt/tf_core/ll.c |   3 +
 drivers/net/bnxt/tf_core/ll.h |  50 +-
 drivers/net/bnxt/tf_core/meson.build  |   2 +
 drivers/net/bnxt/tf_core/tf_core.c| 104 ++-
 drivers/net/bnxt/tf_core/tf_core.h|  48 +-
 drivers/net/bnxt/tf_core/tf_device.c  |  40 +-
 drivers/net/bnxt/tf_core/tf_device.h  | 133 ++-
 drivers/net/bnxt/tf_core/tf_device_p4.c   |  75 +-
 drivers/net/bnxt/tf_core/tf_device_p4.h   |  50 +-
 drivers/net/bnxt/tf_core/tf_device_p58.c  | 105 ++-
 drivers/net/bnxt/tf_core/tf_device_p58.h  |  60 +-
 drivers/net/bnxt/tf_core/tf_msg.c |   2 +-
 drivers/net/bnxt/tf_core/tf_rm.c  |  46 +-
 drivers/net/bnxt/tf_core/tf_rm.h  |  62 +-
 drivers/net/bnxt/tf_core/tf_session.c |  56 ++
 drivers/net/bnxt/tf_core/tf_session.h |  58 +-
 drivers/net/bnxt/tf_core/tf_sram_mgr.c| 971 ++
 drivers/net/bnxt/tf_core/tf_sram_mgr.h| 317 +++
 drivers/net/bnxt/tf_core/tf_tbl.c | 186 +
 drivers/net/bnxt/tf_core/tf_tbl.h |  15 +-
 drivers/net/bnxt/tf_core/tf_tbl_sram.c| 713 
 drivers/net/bnxt/tf_core/tf_tbl_sram.h| 154 
 drivers/net/bnxt/tf_core/tf_tcam.c|  10 +-
 drivers/net/bnxt/tf_core/tf_tcam.h|   7 +
 drivers/net/bnxt/tf_core/tf_tcam_shared.c |  28 +-
 drivers/net/bnxt/tf_core/tf_util.c|  10 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c|  23 +
 meson_options.txt |   2 +
 28 files changed, 2978 insertions(+), 352 deletions(-)
 create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_sram_mgr.h
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.c
 create mode 100644 drivers/net/bnxt/tf_core/tf_tbl_sram.h

diff --git a/drivers/net/bnxt/tf_core/ll.c b/drivers/net/bnxt/tf_core/ll.c
index cd168a7970..f2bdff6b9e 100644
--- a/drivers/net/bnxt/tf_core/ll.c
+++ b/drivers/net/bnxt/tf_core/ll.c
@@ -13,6 +13,7 @@ void ll_init(struct ll *ll)
 {
ll->head = NULL;
ll->tail = NULL;
+   ll->cnt = 0;
 }
 
 /* insert entry in linked list */
@@ -30,6 +31,7 @@ void ll_insert(struct ll *ll,
entry->next->prev = entry;
ll->head = entry->next->prev;
}
+   ll->cnt++;
 }
 
 /* delete entry from linked list */
@@ -49,4 +51,5 @@ void ll_delete(struct ll *ll,
entry->prev->next = entry->next;
entry->next->prev = entry->prev;
}
+   ll->cnt--;
 }
diff --git a/drivers/net/bnxt/tf_core/ll.h b/drivers/net/bnxt/tf_core/ll.h
index 239478b4f8..9cf8f64ec2 100644
--- a/drivers/net/bnxt/tf_core/ll.h
+++ b/drivers/net/bnxt/tf_core/ll.h
@@ -8,6 +8,8 @@
 #ifndef _LL_H_
 #define _LL_H_
 
+#include 
+
 /* linked list entry */
 struct ll_entry {
struct ll_entry *prev;
@@ -18,6 +20,7 @@ struct ll_entry {
 struct ll {
struct ll_entry *head;
struct ll_entry *tail;
+   uint32_t cnt;
 };
 
 /**
@@ -28,7 +31,7 @@ struct ll {
 void ll_init(struct ll *ll);
 
 /**
- * Linked list insert
+ * Linked list insert head
  *
  * [in] ll, linked list where element is inserted
  * [in] entry, entry to be added
@@ -43,4 +46,49 @@ void ll_insert(struct ll *ll, struct ll_entry *entry);
  */
 void ll_delete(struct ll *ll, struct ll_entry *entry);
 
+/**
+ * Linked list return next entry without deleting it
+ *
+ * Useful in performing search
+ *
+ * [in] Entry in the list
+ */
+static inline struct ll_entry *ll_next(struct ll_entry *entry)
+{
+   return entry->next;
+}
+
+/**
+ * Linked list return the head of the list without removing it
+ *
+ * Useful in performing search
+ *
+ * [in] ll, linked list
+ */
+static inline struct ll_entry *ll_head(struct ll *ll)
+{
+   return ll->head;
+}
+
+/**
+ * Linked list return the tail of the list without removing it
+ *
+ * Useful in performing search
+ *
+ * [in] ll, linked list
+ */
+static inline struct ll_entry *ll_tail(struct ll *ll)
+{
+   return ll->tail;
+}
+
+/**
+ * Linked list return the number of entries in the list
+ *
+ * [in] ll, linked list
+ */
+static inline uint32_t ll_cnt(struct ll *ll)
+{
+   return ll->cnt;
+}
 #endif /* _LL_H_ */
diff --git a/drivers/net/bnxt/tf_core/meson.build 
b/drivers/net/bnxt/tf_core/meson.build
index f28e77ec2e..b7333a431b 100644
--- a/drivers/net/bnxt/tf_core/meson.build
+++ b/drivers/net/bnxt/tf_core/meson.build
@@ -16,6 +16,8 @@ sources += files(
 'stack.c',
 'tf_rm.c',
 'tf_tbl.c',
+   'tf_tbl_sram.c',
+   'tf_sram_mgr.c',
 'tf_em_common.c',
 'tf_em_host.c',
 'tf_em_internal.c',
diff --git a/drivers/net/bnxt/tf_core/tf_core.c 
b/drivers/net/bnxt/tf_core/tf_core.c
index 5458f76e2d..936102c804 100644
--- a/drivers/net/bnxt/tf_core/tf_core.c
+++ b/drivers/net/bnxt/tf_cor

[dpdk-dev] [PATCH v2 05/13] net/bnxt: add flow templates support for Thor

2021-09-08 Thread Venkat Duvvuru
From: Kishore Padmanabha 

Template adds non-VFR based support for testpmd with:
matches to include
- DMAC, SIP, DIP, Proto, Sport, Dport
- SIP, DIP, Proto, Sport, Dport
actions:
- count, drop

Signed-off-by: Kishore Padmanabha 
Signed-off-by: Venkat Duvvuru 
Reviewed-by: Mike Baucom 
---
 drivers/net/bnxt/tf_ulp/bnxt_tf_common.h  |   6 +
 drivers/net/bnxt/tf_ulp/bnxt_ulp.c|  36 +++---
 drivers/net/bnxt/tf_ulp/bnxt_ulp_flow.c   |  12 ++
 .../bnxt/tf_ulp/generic_templates/meson.build |  17 ++-
 .../ulp_template_db_thor_class.c  |   1 -
 drivers/net/bnxt/tf_ulp/ulp_fc_mgr.c  |   2 +-
 drivers/net/bnxt/tf_ulp/ulp_flow_db.c | 122 +-
 drivers/net/bnxt/tf_ulp/ulp_flow_db.h |  26 +++-
 drivers/net/bnxt/tf_ulp/ulp_gen_tbl.c |   5 +
 drivers/net/bnxt/tf_ulp/ulp_ha_mgr.c  |   2 +-
 drivers/net/bnxt/tf_ulp/ulp_mapper.c  | 111 +++-
 drivers/net/bnxt/tf_ulp/ulp_matcher.c |  13 ++
 drivers/net/bnxt/tf_ulp/ulp_port_db.c |  15 ++-
 drivers/net/bnxt/tf_ulp/ulp_rte_parser.c  |   9 +-
 drivers/net/bnxt/tf_ulp/ulp_tun.c |  20 +++
 drivers/net/bnxt/tf_ulp/ulp_utils.c   |   8 +-
 16 files changed, 356 insertions(+), 49 deletions(-)

diff --git a/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h 
b/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
index f59da41e54..e0ebed3fed 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
+++ b/drivers/net/bnxt/tf_ulp/bnxt_tf_common.h
@@ -13,6 +13,12 @@
 
 #define BNXT_TF_DBG(lvl, fmt, args...) PMD_DRV_LOG(lvl, fmt, ## args)
 
+#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
+#define BNXT_TF_INF(fmt, args...)  PMD_DRV_LOG(INFO, fmt, ## args)
+#else
+#define BNXT_TF_INF(fmt, args...)
+#endif
+
 #define BNXT_ULP_EM_FLOWS  8192
 #define BNXT_ULP_1M_FLOWS  100
 #define BNXT_EEM_RX_GLOBAL_ID_MASK (BNXT_ULP_1M_FLOWS - 1)
diff --git a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c 
b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
index 183bae66c5..475c7a6cdf 100644
--- a/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
+++ b/drivers/net/bnxt/tf_ulp/bnxt_ulp.c
@@ -698,6 +698,11 @@ ulp_eem_tbl_scope_init(struct bnxt *bp)
rc);
return rc;
}
+#ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
+   BNXT_TF_DBG(DEBUG, "TableScope=0x%0x %d\n",
+   params.tbl_scope_id,
+   params.tbl_scope_id);
+#endif
rc = bnxt_ulp_cntxt_tbl_scope_id_set(bp->ulp_ctx, params.tbl_scope_id);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to set table scope id\n");
@@ -825,6 +830,8 @@ ulp_ctx_init(struct bnxt *bp,
goto error_deinit;
}
 
+   /* TODO: For now we are overriding to APP:1 on this branch*/
+   bp->app_id = 1;
rc = bnxt_ulp_cntxt_app_id_set(bp->ulp_ctx, bp->app_id);
if (rc) {
BNXT_TF_DBG(ERR, "Unable to set app_id for ULP init.\n");
@@ -838,11 +845,6 @@ ulp_ctx_init(struct bnxt *bp,
goto error_deinit;
}
 
-   if (devid == BNXT_ULP_DEVICE_ID_THOR) {
-   ulp_data->ulp_flags &= ~BNXT_ULP_VF_REP_ENABLED;
-   BNXT_TF_DBG(ERR, "Enabled non-VFR mode\n");
-   }
-
/*
 * Shared session must be created before first regular session but after
 * the ulp_ctx is valid.
@@ -902,7 +904,7 @@ ulp_dparms_init(struct bnxt *bp, struct bnxt_ulp_context 
*ulp_ctx)
dparms->ext_flow_db_num_entries = bp->max_num_kflows * 1024;
/* GFID =  2 * num_flows */
dparms->mark_db_gfid_entries = dparms->ext_flow_db_num_entries * 2;
-   BNXT_TF_DBG(DEBUG, "Set the number of flows = %"PRIu64"\n",
+   BNXT_TF_DBG(DEBUG, "Set the number of flows = %" PRIu64 "\n",
dparms->ext_flow_db_num_entries);
 
return 0;
@@ -1393,17 +1395,13 @@ bnxt_ulp_port_init(struct bnxt *bp)
uint32_t ulp_flags;
int32_t rc = 0;
 
+   if (!bp || !BNXT_TRUFLOW_EN(bp))
+   return rc;
+
if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
BNXT_TF_DBG(ERR,
"Skip ulp init for port: %d, not a TVF or PF\n",
-   bp->eth_dev->data->port_id);
-   return rc;
-   }
-
-   if (!BNXT_TRUFLOW_EN(bp)) {
-   BNXT_TF_DBG(DEBUG,
-   "Skip ulp init for port: %d, truflow is not 
enabled\n",
-   bp->eth_dev->data->port_id);
+   bp->eth_dev->data->port_id);
return rc;
}
 
@@ -1524,6 +1522,9 @@ bnxt_ulp_port_deinit(struct bnxt *bp)
struct rte_pci_device *pci_dev;
struct rte_pci_addr *pci_addr;
 
+   if (!BNXT_TRUFLOW_EN(bp))
+   return;
+
if (!BNXT_PF(bp) && !BNXT_VF_IS_TRUSTED(bp)) {
BNXT_TF_DBG(ERR,
"Skip ULP deinit port:%d, not a TVF or PF\n",
@@ -1531,13 +1532,6 @@ bnx

[dpdk-dev] [PATCH v2 03/13] net/bnxt: add flow meter drop counter support

2021-09-08 Thread Venkat Duvvuru
From: Jay Ding 

- Add flow meter drop counter support for Thor.

Signed-off-by: Jay Ding 
Reviewed-by: Farah Smith 
Reviewed-by: Ajit Khaparde 
---
 drivers/net/bnxt/tf_core/cfa_resource_types.h |  5 +-
 drivers/net/bnxt/tf_core/tf_core.h|  8 +-
 drivers/net/bnxt/tf_core/tf_device_p58.c  |  1 +
 drivers/net/bnxt/tf_core/tf_device_p58.h  | 14 
 drivers/net/bnxt/tf_core/tf_tbl.c | 74 +++
 drivers/net/bnxt/tf_core/tf_util.c|  2 +
 6 files changed, 68 insertions(+), 36 deletions(-)

diff --git a/drivers/net/bnxt/tf_core/cfa_resource_types.h 
b/drivers/net/bnxt/tf_core/cfa_resource_types.h
index cbab0d0078..36a55d4e17 100644
--- a/drivers/net/bnxt/tf_core/cfa_resource_types.h
+++ b/drivers/net/bnxt/tf_core/cfa_resource_types.h
@@ -104,10 +104,11 @@
 #define CFA_RESOURCE_TYPE_P58_WC_FKB 0x12UL
 /* VEB TCAM */
 #define CFA_RESOURCE_TYPE_P58_VEB_TCAM   0x13UL
+/* Metadata */
+#define CFA_RESOURCE_TYPE_P58_METADATA   0x14UL
 /* Meter drop counter */
 #define CFA_RESOURCE_TYPE_P58_METER_DROP_CNT 0x15UL
-#define CFA_RESOURCE_TYPE_P58_LAST   
CFA_RESOURCE_TYPE_P58_METER_DROP_CNT
-
+#define CFA_RESOURCE_TYPE_P58_LAST  
CFA_RESOURCE_TYPE_P58_METER_DROP_CNT
 
 /* Multicast Group */
 #define CFA_RESOURCE_TYPE_P45_MCG 0x0UL
diff --git a/drivers/net/bnxt/tf_core/tf_core.h 
b/drivers/net/bnxt/tf_core/tf_core.h
index 7e0cdf7e0d..af8d13bd7e 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -283,9 +283,9 @@ enum tf_tbl_type {
TF_TBL_TYPE_ACT_MODIFY_32B,
/** TH 64B Modify Record */
TF_TBL_TYPE_ACT_MODIFY_64B,
-   /** (Future) Meter Profiles */
+   /** Meter Profiles */
TF_TBL_TYPE_METER_PROF,
-   /** (Future) Meter Instance */
+   /** Meter Instance */
TF_TBL_TYPE_METER_INST,
/** Wh+/SR/Th Mirror Config */
TF_TBL_TYPE_MIRROR_CONFIG,
@@ -301,6 +301,8 @@ enum tf_tbl_type {
TF_TBL_TYPE_EM_FKB,
/** TH WC Flexible Key builder */
TF_TBL_TYPE_WC_FKB,
+   /** Meter Drop Counter */
+   TF_TBL_TYPE_METER_DROP_CNT,
 
/* External */
 
@@ -2194,6 +2196,8 @@ enum tf_global_config_type {
TF_TUNNEL_ENCAP,  /**< Tunnel Encap Config(TECT) */
TF_ACTION_BLOCK,  /**< Action Block Config(ABCR) */
TF_COUNTER_CFG,   /**< Counter Configuration (CNTRS_CTRL) */
+   TF_METER_CFG, /**< Meter Config(ACTP4_FMTCR) */
+   TF_METER_INTERVAL_CFG, /**< Meter Interval Config(FMTCR_INTERVAL)  */
TF_GLOBAL_CFG_TYPE_MAX
 };
 
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c 
b/drivers/net/bnxt/tf_core/tf_device_p58.c
index 808dcb1f77..a492c62bff 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -43,6 +43,7 @@ const char *tf_resource_str_p58[CFA_RESOURCE_TYPE_P58_LAST + 
1] = {
[CFA_RESOURCE_TYPE_P58_EM_FKB] = "em_fkb  ",
[CFA_RESOURCE_TYPE_P58_WC_FKB] = "wc_fkb  ",
[CFA_RESOURCE_TYPE_P58_VEB_TCAM]   = "veb ",
+   [CFA_RESOURCE_TYPE_P58_METADATA]   = "metadata",
 };
 
 /**
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.h 
b/drivers/net/bnxt/tf_core/tf_device_p58.h
index 66b0f4e983..8c2e07aa34 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.h
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.h
@@ -75,10 +75,18 @@ struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER,
0, 0, 0
},
+   [TF_TBL_TYPE_METER_DROP_CNT] = {
+   TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METER_DROP_CNT,
+   0, 0, 0
+   },
[TF_TBL_TYPE_MIRROR_CONFIG] = {
TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR,
0, 0, 0
},
+   [TF_TBL_TYPE_METADATA] = {
+   TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_METADATA,
+   0, 0, 0
+   },
/* Policy - ARs in bank 1 */
[TF_TBL_TYPE_FULL_ACT_RECORD] = {
.cfg_type= TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
@@ -194,5 +202,11 @@ struct tf_global_cfg_cfg 
tf_global_cfg_p58[TF_GLOBAL_CFG_TYPE_MAX] = {
[TF_COUNTER_CFG] = {
TF_GLOBAL_CFG_CFG_HCAPI, TF_COUNTER_CFG
},
+   [TF_METER_CFG] = {
+   TF_GLOBAL_CFG_CFG_HCAPI, TF_METER_CFG
+   },
+   [TF_METER_INTERVAL_CFG] = {
+   TF_GLOBAL_CFG_CFG_HCAPI, TF_METER_INTERVAL_CFG
+   },
 };
 #endif /* _TF_DEVICE_P58_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c 
b/drivers/net/bnxt/tf_core/tf_tbl.c
index e77399c6bd..7011edcd78 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -374,23 +374,28 @@ tf_tbl_set(struct tf *tfp,
}
}
 
-   /* Verify that the entry has been previously allocated */
-

[dpdk-dev] [PATCH v2 11/13] net/bnxt: dynamically allocate space for EM defrag function

2021-09-08 Thread Venkat Duvvuru
From: Randy Schacher 

Alter defrag function to dynamically allocate and delete
free_list and adj_list buffers.

Signed-off-by: Randy Schacher 
Reviewed-by: Peter Spreadborough 
---
 drivers/net/bnxt/tf_core/dpool.c | 38 +---
 1 file changed, 25 insertions(+), 13 deletions(-)

diff --git a/drivers/net/bnxt/tf_core/dpool.c b/drivers/net/bnxt/tf_core/dpool.c
index 145efa486f..5c03f775a5 100644
--- a/drivers/net/bnxt/tf_core/dpool.c
+++ b/drivers/net/bnxt/tf_core/dpool.c
@@ -7,9 +7,6 @@
 #include 
 #include 
 #include 
-
-#include 
-
 #include "tfp.h"
 #include "dpool.h"
 
@@ -84,13 +81,13 @@ static int dpool_move(struct dpool *dpool,
return 0;
 }
 
-
 int dpool_defrag(struct dpool *dpool,
 uint32_t entry_size,
 uint8_t defrag)
 {
struct dpool_free_list *free_list;
struct dpool_adj_list *adj_list;
+   struct tfp_calloc_parms parms;
uint32_t count;
uint32_t index;
uint32_t used;
@@ -103,15 +100,31 @@ int dpool_defrag(struct dpool *dpool,
uint32_t max_size = 0;
int rc;
 
-   free_list = rte_zmalloc("dpool_free_list",
-   sizeof(struct dpool_free_list), 0);
+   parms.nitems = 1;
+   parms.size = sizeof(struct dpool_free_list);
+   parms.alignment = 0;
+
+   rc = tfp_calloc(&parms);
+
+   if (rc)
+   return rc;
+
+   free_list = (struct dpool_free_list *)parms.mem_va;
if (free_list == NULL) {
TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
return -ENOMEM;
}
 
-   adj_list = rte_zmalloc("dpool_adjacent_list",
-   sizeof(struct dpool_adj_list), 0);
+   parms.nitems = 1;
+   parms.size = sizeof(struct dpool_adj_list);
+   parms.alignment = 0;
+
+   rc = tfp_calloc(&parms);
+
+   if (rc)
+   return rc;
+
+   adj_list = (struct dpool_adj_list *)parms.mem_va;
if (adj_list == NULL) {
TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
return -ENOMEM;
@@ -239,8 +252,8 @@ int dpool_defrag(struct dpool *dpool,

free_list->entry[largest_free_index].index,
max_index);
if (rc) {
-   rte_free(free_list);
-   rte_free(adj_list);
+   tfp_free(free_list);
+   tfp_free(adj_list);
return rc;
}
} else {
@@ -249,12 +262,11 @@ int dpool_defrag(struct dpool *dpool,
}
 
 done:
-   rte_free(free_list);
-   rte_free(adj_list);
+   tfp_free(free_list);
+   tfp_free(adj_list);
return largest_free_size;
 }
 
-
 uint32_t dpool_alloc(struct dpool *dpool,
 uint32_t size,
 uint8_t defrag)
-- 
2.17.1



[dpdk-dev] [PATCH v2 08/13] net/bnxt: add wild card TCAM byte order for Thor

2021-09-08 Thread Venkat Duvvuru
From: Kishore Padmanabha 

The wild card tcam for Thor platform is different from the profile tcam
byte order.

Signed-off-by: Kishore Padmanabha 
Signed-off-by: Venkat Duvvuru 
Reviewed-by: Shuanglin Wang 
Reviewed-by: Michael Baucom 
Reviewed-by: Ajit Khaparde 
---
 .../generic_templates/ulp_template_db_tbl.c   |  2 ++
 drivers/net/bnxt/tf_ulp/ulp_mapper.c  | 25 +--
 drivers/net/bnxt/tf_ulp/ulp_template_struct.h |  1 +
 3 files changed, 21 insertions(+), 7 deletions(-)

diff --git a/drivers/net/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c 
b/drivers/net/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c
index b5bce6f4c7..68f1b5fd00 100644
--- a/drivers/net/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c
+++ b/drivers/net/bnxt/tf_ulp/generic_templates/ulp_template_db_tbl.c
@@ -201,6 +201,7 @@ struct bnxt_ulp_device_params 
ulp_device_params[BNXT_ULP_DEVICE_ID_LAST] = {
.key_byte_order  = BNXT_ULP_BYTE_ORDER_LE,
.result_byte_order   = BNXT_ULP_BYTE_ORDER_LE,
.encap_byte_order= BNXT_ULP_BYTE_ORDER_BE,
+   .wc_key_byte_order   = BNXT_ULP_BYTE_ORDER_BE,
.encap_byte_swap = 1,
.int_flow_db_num_entries = 16384,
.ext_flow_db_num_entries = 32768,
@@ -223,6 +224,7 @@ struct bnxt_ulp_device_params 
ulp_device_params[BNXT_ULP_DEVICE_ID_LAST] = {
.key_byte_order  = BNXT_ULP_BYTE_ORDER_LE,
.result_byte_order   = BNXT_ULP_BYTE_ORDER_LE,
.encap_byte_order= BNXT_ULP_BYTE_ORDER_BE,
+   .wc_key_byte_order   = BNXT_ULP_BYTE_ORDER_BE,
.encap_byte_swap = 1,
.int_flow_db_num_entries = 16384,
.ext_flow_db_num_entries = 32768,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_mapper.c 
b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
index 2687a545f3..bcc089b3e1 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_mapper.c
+++ b/drivers/net/bnxt/tf_ulp/ulp_mapper.c
@@ -1953,6 +1953,15 @@ static void ulp_mapper_wc_tcam_tbl_post_process(struct 
ulp_blob *blob)
 #endif
 }
 
+static int32_t ulp_mapper_tcam_is_wc_tcam(struct bnxt_ulp_mapper_tbl_info *tbl)
+{
+   if (tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM ||
+   tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM_HIGH ||
+   tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM_LOW)
+   return 1;
+   return 0;
+}
+
 static int32_t
 ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms *parms,
struct bnxt_ulp_mapper_tbl_info *tbl)
@@ -1972,6 +1981,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms 
*parms,
uint32_t hit = 0;
uint16_t tmplen = 0;
uint16_t idx;
+   enum bnxt_ulp_byte_order key_byte_order;
 
/* Set the key and mask to the original key and mask. */
key = &okey;
@@ -2003,10 +2013,13 @@ ulp_mapper_tcam_tbl_process(struct 
bnxt_ulp_mapper_parms *parms,
return -EINVAL;
}
 
-   if (!ulp_blob_init(key, tbl->blob_key_bit_size,
-  dparms->key_byte_order) ||
-   !ulp_blob_init(mask, tbl->blob_key_bit_size,
-  dparms->key_byte_order) ||
+   if (ulp_mapper_tcam_is_wc_tcam(tbl))
+   key_byte_order = dparms->wc_key_byte_order;
+   else
+   key_byte_order = dparms->key_byte_order;
+
+   if (!ulp_blob_init(key, tbl->blob_key_bit_size, key_byte_order) ||
+   !ulp_blob_init(mask, tbl->blob_key_bit_size, key_byte_order) ||
!ulp_blob_init(&data, tbl->result_bit_size,
   dparms->result_byte_order) ||
!ulp_blob_init(&update_data, tbl->result_bit_size,
@@ -2043,9 +2056,7 @@ ulp_mapper_tcam_tbl_process(struct bnxt_ulp_mapper_parms 
*parms,
}
 
/* For wild card tcam perform the post process to swap the blob */
-   if (tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM ||
-   tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM_HIGH ||
-   tbl->resource_type == TF_TCAM_TBL_TYPE_WC_TCAM_LOW) {
+   if (ulp_mapper_tcam_is_wc_tcam(tbl)) {
if (dparms->dynamic_pad_en) {
/* Sets up the slices for writing to the WC TCAM */
rc = ulp_mapper_wc_tcam_tbl_dyn_post_process(dparms,
diff --git a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h 
b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
index 904763f27d..e2a4b81cec 100644
--- a/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
+++ b/drivers/net/bnxt/tf_ulp/ulp_template_struct.h
@@ -212,6 +212,7 @@ struct bnxt_ulp_device_params {
enum bnxt_ulp_byte_orderkey_byte_order;
enum bnxt_ulp_byte_orderresult_byte_order;
enum bnxt_ulp_byte_orderencap_byte_order;
+   enum bnxt_ulp_byte_orderwc_key_byte_order;
uint8_t encap_byte_swap;
uint8_t num_phy_ports;
uint32_tmark_db_lfi

[dpdk-dev] [PATCH v2 10/13] net/bnxt: tf core SRAM Manager

2021-09-08 Thread Venkat Duvvuru
From: Farah Smith 

Adjust info message to debug level to prevent excessive
logging.

Signed-off-by: Farah Smith 
Reviewed-by: Michael Baucom 
---
 drivers/net/bnxt/tf_core/tf_tbl_sram.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/net/bnxt/tf_core/tf_tbl_sram.c 
b/drivers/net/bnxt/tf_core/tf_tbl_sram.c
index ea10afecb6..d7727f7a11 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl_sram.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl_sram.c
@@ -130,7 +130,7 @@ static int tf_tbl_sram_get_info(struct 
tf_tbl_sram_get_info_parms *parms)
if (slices)
parms->slice_size = tf_tbl_sram_slices_2_size[slices];
 
-   TFP_DRV_LOG(INFO,
+   TFP_DRV_LOG(DEBUG,
"(%s) bank(%s) slice_size(%s)\n",
tf_tbl_type_2_str(parms->tbl_type),
tf_sram_bank_2_str(parms->bank_id),
-- 
2.17.1



[dpdk-dev] [PATCH v2 12/13] net/bnxt: sram manager shared session

2021-09-08 Thread Venkat Duvvuru
From: Farah Smith 

Fix shared session support issues due to SRAM manager
additions.  Shared session does not support slices within
RM blocks.  Calculate resources required without slices
and determine base addresses using old methods for the
shared session.

Signed-off-by: Farah Smith 
Reviewed-by: Kishore Padmanabha 
Reviewed-by: Shahaji Bhosle 
---
 drivers/net/bnxt/tf_core/tf_em_internal.c |   5 +-
 drivers/net/bnxt/tf_core/tf_rm.c  | 134 +++---
 drivers/net/bnxt/tf_core/tf_tbl_sram.c|  73 +---
 3 files changed, 176 insertions(+), 36 deletions(-)

diff --git a/drivers/net/bnxt/tf_core/tf_em_internal.c 
b/drivers/net/bnxt/tf_core/tf_em_internal.c
index 2d57595f17..67ba011eae 100644
--- a/drivers/net/bnxt/tf_core/tf_em_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_internal.c
@@ -326,8 +326,11 @@ tf_em_int_unbind(struct tf *tfp)
return rc;
 
if (!tf_session_is_shared_session(tfs)) {
-   for (i = 0; i < TF_DIR_MAX; i++)
+   for (i = 0; i < TF_DIR_MAX; i++) {
+   if (tfs->em_pool[i] == NULL)
+   continue;
dpool_free_all(tfs->em_pool[i]);
+   }
}
 
rc = tf_session_get_db(tfp, TF_MODULE_TYPE_EM, &em_db_ptr);
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index 03c958a7d6..dd537aaece 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -18,6 +18,9 @@
 #include "tfp.h"
 #include "tf_msg.h"
 
+/* Logging defines */
+#define TF_RM_DEBUG  0
+
 /**
  * Generic RM Element data type that an RM DB is build upon.
  */
@@ -207,6 +210,45 @@ tf_rm_adjust_index(struct tf_rm_element *db,
return rc;
 }
 
+/**
+ * Logs an array of found residual entries to the console.
+ *
+ * [in] dir
+ *   Receive or transmit direction
+ *
+ * [in] module
+ *   Type of Device Module
+ *
+ * [in] count
+ *   Number of entries in the residual array
+ *
+ * [in] residuals
+ *   Pointer to an array of residual entries. Array is index same as
+ *   the DB in which this function is used. Each entry holds residual
+ *   value for that entry.
+ */
+#if (TF_RM_DEBUG == 1)
+static void
+tf_rm_log_residuals(enum tf_dir dir,
+   enum tf_module_type module,
+   uint16_t count,
+   uint16_t *residuals)
+{
+   int i;
+
+   /* Walk the residual array and log the types that wasn't
+* cleaned up to the console.
+*/
+   for (i = 0; i < count; i++) {
+   if (residuals[i] != 0)
+   TFP_DRV_LOG(INFO,
+   "%s, %s was not cleaned up, %d outstanding\n",
+   tf_dir_2_str(dir),
+   tf_module_subtype_2_str(module, i),
+   residuals[i]);
+   }
+}
+#endif /* TF_RM_DEBUG == 1 */
 /**
  * Performs a check of the passed in DB for any lingering elements. If
  * a resource type was found to not have been cleaned up by the caller
@@ -322,6 +364,12 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
*resv_size = found;
}
 
+#if (TF_RM_DEBUG == 1)
+   tf_rm_log_residuals(rm_db->dir,
+   rm_db->module,
+   rm_db->num_entries,
+   residuals);
+#endif
tfp_free((void *)residuals);
*resv = local_resv;
 
@@ -367,7 +415,8 @@ tf_rm_update_parent_reservations(struct tf *tfp,
 struct tf_rm_element_cfg *cfg,
 uint16_t *alloc_cnt,
 uint16_t num_elements,
-uint16_t *req_cnt)
+uint16_t *req_cnt,
+bool shared_session)
 {
int parent, child;
const char *type_str;
@@ -378,18 +427,28 @@ tf_rm_update_parent_reservations(struct tf *tfp,
 
/* If I am a parent */
if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
-   /* start with my own count */
-   RTE_ASSERT(cfg[parent].slices);
-   combined_cnt =
-   alloc_cnt[parent] / cfg[parent].slices;
+   uint8_t p_slices = 1;
+
+   /* Shared session doesn't support slices */
+   if (!shared_session)
+   p_slices = cfg[parent].slices;
+
+   RTE_ASSERT(p_slices);
 
-   if (alloc_cnt[parent] % cfg[parent].slices)
+   combined_cnt = alloc_cnt[parent] / p_slices;
+
+   if (alloc_cnt[parent] % p_slices)
combined_cnt++;
 
if (alloc_cnt[parent]) {
dev->ops->tf_dev_get_resource_str(tfp,
   

Re: [dpdk-dev] [PATCH v21 4/7] dmadev: introduce DMA device library implementation

2021-09-08 Thread Walsh, Conor


Hi Chengwen,

While testing the IOAT driver I realised that we hadn't implemented the new 
RTE_DMADEV_ALL_VCHAN
flag for stats. Rather than every driver that only supports 1 vchan enabling 
support for this flag it would
probably be better to catch it in the library as shown below.

Thanks,
Conor.

> +int
> +rte_dmadev_stats_get(uint16_t dev_id, uint16_t vchan,
> +  struct rte_dmadev_stats *stats)
> +{
> + const struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> + RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> + if (stats == NULL)
> + return -EINVAL;
> + if (vchan >= dev->data->dev_conf.nb_vchans &&
> + vchan != RTE_DMADEV_ALL_VCHAN) {
> + RTE_DMADEV_LOG(ERR,
> + "Device %u vchan %u out of range", dev_id, vchan);
> + return -EINVAL;
> + }
if (vchan == RTE_DMADEV_ALL_VCHAN && dev->data->dev_conf.nb_vchans == 1)
vchan = 0;
> +
> + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -
> ENOTSUP);
> + memset(stats, 0, sizeof(struct rte_dmadev_stats));
> + return (*dev->dev_ops->stats_get)(dev, vchan, stats,
> +   sizeof(struct rte_dmadev_stats));
> +}
> +
> +int
> +rte_dmadev_stats_reset(uint16_t dev_id, uint16_t vchan)
> +{
> + struct rte_dmadev *dev = &rte_dmadevices[dev_id];
> +
> + RTE_DMADEV_VALID_DEV_ID_OR_ERR_RET(dev_id, -EINVAL);
> + if (vchan >= dev->data->dev_conf.nb_vchans &&
> + vchan != RTE_DMADEV_ALL_VCHAN) {
> + RTE_DMADEV_LOG(ERR,
> + "Device %u vchan %u out of range", dev_id, vchan);
> + return -EINVAL;
> + }
if (vchan == RTE_DMADEV_ALL_VCHAN && dev->data->dev_conf.nb_vchans == 1)
vchan = 0;
> +
> + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -
> ENOTSUP);
> + return (*dev->dev_ops->stats_reset)(dev, vchan);
> +}




Re: [dpdk-dev] [EXT] [PATCH v4 01/10] crypto/dpaa_sec: support DES-CBC

2021-09-08 Thread Akhil Goyal
> From: Gagandeep Singh 
> 
> add DES-CBC support and enable available cipher-only
> test cases.
> 
> Signed-off-by: Gagandeep Singh 
> ---
Series Acked-by: Akhil Goyal 

Applied to dpdk-next-crypto
Thanks.


Re: [dpdk-dev] [EXT] Re: [PATCH] RFC: ethdev: add reassembly offload

2021-09-08 Thread Anoob Joseph
Hi Ferruh, Rosen, Andrew,

Please see inline.

Thanks,
Anoob

> Subject: [EXT] Re: [PATCH] RFC: ethdev: add reassembly offload
> 
> External Email
> 
> --
> On 8/23/2021 11:02 AM, Akhil Goyal wrote:
> > Reassembly is a costly operation if it is done in software, however,
> > if it is offloaded to HW, it can considerably save application cycles.
> > The operation becomes even more costlier if IP fragmants are
> > encrypted.
> >
> > To resolve above two issues, a new offload
> DEV_RX_OFFLOAD_REASSEMBLY
> > is introduced in ethdev for devices which can attempt reassembly of
> > packets in hardware.
> > rte_eth_dev_info is added with the reassembly capabilities which a
> > device can support.
> > Now, if IP fragments are encrypted, reassembly can also be attempted
> > while doing inline IPsec processing.
> > This is controlled by a flag in rte_security_ipsec_sa_options to
> > enable reassembly of encrypted IP fragments in the inline path.
> >
> > The resulting reassembled packet would be a typical segmented mbuf in
> > case of success.
> >
> > And if reassembly of fragments is failed or is incomplete (if
> > fragments do not come before the reass_timeout), the mbuf is updated
> > with an ol_flag PKT_RX_REASSEMBLY_INCOMPLETE and mbuf is returned
> as
> > is. Now application may decide the fate of the packet to wait more for
> > fragments to come or drop.
> >
> > Signed-off-by: Akhil Goyal 
> > ---
> >  lib/ethdev/rte_ethdev.c |  1 +
> >  lib/ethdev/rte_ethdev.h | 18 +-
> >  lib/mbuf/rte_mbuf_core.h|  3 ++-
> >  lib/security/rte_security.h | 10 ++
> >  4 files changed, 30 insertions(+), 2 deletions(-)
> >
> > diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c index
> > 9d95cd11e1..1ab3a093cf 100644
> > --- a/lib/ethdev/rte_ethdev.c
> > +++ b/lib/ethdev/rte_ethdev.c
> > @@ -119,6 +119,7 @@ static const struct {
> > RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
> > RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
> > RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
> > +   RTE_RX_OFFLOAD_BIT2STR(REASSEMBLY),
> > RTE_RX_OFFLOAD_BIT2STR(SCATTER),
> > RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
> > RTE_RX_OFFLOAD_BIT2STR(SECURITY),
> > diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h index
> > d2b27c351f..e89a4dc1eb 100644
> > --- a/lib/ethdev/rte_ethdev.h
> > +++ b/lib/ethdev/rte_ethdev.h
> > @@ -1360,6 +1360,7 @@ struct rte_eth_conf {
> >  #define DEV_RX_OFFLOAD_VLAN_FILTER 0x0200
> >  #define DEV_RX_OFFLOAD_VLAN_EXTEND 0x0400
> >  #define DEV_RX_OFFLOAD_JUMBO_FRAME 0x0800
> > +#define DEV_RX_OFFLOAD_REASSEMBLY  0x1000
> 
> previous '0x1000' was 'DEV_RX_OFFLOAD_CRC_STRIP', it has been long
> that offload has been removed, but not sure if it cause any problem to re-
> use it.
> 
> >  #define DEV_RX_OFFLOAD_SCATTER 0x2000
> >  /**
> >   * Timestamp is set by the driver in
> RTE_MBUF_DYNFIELD_TIMESTAMP_NAME
> > @@ -1477,6 +1478,20 @@ struct rte_eth_dev_portconf {
> >   */
> >  #define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID
>   (UINT16_MAX)
> >
> > +/**
> > + * Reassembly capabilities that a device can support.
> > + * The device which can support reassembly offload should set
> > + * DEV_RX_OFFLOAD_REASSEMBLY
> > + */
> > +struct rte_eth_reass_capa {
> > +   /** Maximum time in ns that a fragment can wait for further
> fragments */
> > +   uint64_t reass_timeout;
> > +   /** Maximum number of fragments that device can reassemble */
> > +   uint16_t max_frags;
> > +   /** Reserved for future capabilities */
> > +   uint16_t reserved[3];
> > +};
> > +
> 
> I wonder if there is any other hardware around supports reassembly offload,
> it would be good to get more feedback on the capabilities list.
> 
> >  /**
> >   * Ethernet device associated switch information
> >   */
> > @@ -1582,8 +1597,9 @@ struct rte_eth_dev_info {
> >  * embedded managed interconnect/switch.
> >  */
> > struct rte_eth_switch_info switch_info;
> > +   /* Reassembly capabilities of a device for reassembly offload */
> > +   struct rte_eth_reass_capa reass_capa;
> >
> > -   uint64_t reserved_64s[2]; /**< Reserved for future fields */
> 
> Reserved fields were added to be able to update the struct without breaking
> the ABI, so that a critical change doesn't have to wait until next ABI break
> release.
> Since this is ABI break release, we can keep the reserved field and add the
> new struct. Or this can be an opportunity to get rid of the reserved field.
> 
> Personally I have no objection to get rid of the reserved field, but better to
> agree on this explicitly.
> 
> > void *reserved_ptrs[2];   /**< Reserved for future fields */
> >  };
> >
> > diff --git a/lib/mbuf/rte_mbuf_core.h b/lib/mbuf/rte_mbuf_core.h index
> > bb38d7f581..cea25c87f7 100644
> > --- a/lib/mbuf/rte_mbuf_core.h
> > +++ b/lib/mbuf/rte_mbuf_core.h
> > @@ -200,10 +200,11 @@ extern "C" {
> >  #define PKT_RX_OUTER_L4_CK

[dpdk-dev] [PATCH v3 00/17] add dmadev driver for idxd devices

2021-09-08 Thread Kevin Laatz
This patchset adds a dmadev driver and associated documentation to support
Intel Data Streaming Accelerator devices. This driver is intended to
ultimately replace the current IDXD part of the IOAT rawdev driver.
 
NOTE: This patchset has several dependencies:
 - v21 of the dmadev lib set [1]
 - v3 of the dmadev test suite [2]
 
[1] http://patches.dpdk.org/project/dpdk/list/?series=18738
[2] http://patches.dpdk.org/project/dpdk/list/?series=18744

v3:
 * rebased on above patchsets
 * added burst capacity API

v2:
 * rebased on above patchsets
 * added API to check for device being idle
 * added devbind updates for DMA devices
 * fixed issue identified by internal coverity scan
 * other minor miscellaneous changes and fixes

Bruce Richardson (1):
  raw/ioat: only build if dmadev not present

Conor Walsh (1):
  dma/idxd: move dpdk_idxd_cfg.py from raw to dma

Kevin Laatz (15):
  doc: initial commit for dmadevs section
  dma/idxd: add skeleton for VFIO based DSA device
  dma/idxd: add bus device probing
  dma/idxd: create dmadev instances on bus probe
  dma/idxd: create dmadev instances on pci probe
  dma/idxd: add datapath structures
  dma/idxd: add configure and info_get functions
  dma/idxd: add start and stop functions for pci devices
  dma/idxd: add data-path job submission functions
  dma/idxd: add data-path job completion functions
  dma/idxd: add operation statistic tracking
  dma/idxd: add vchan status function
  dma/idxd: add burst capacity API
  devbind: add dma device class
  devbind: move idxd device ID to dmadev class

 MAINTAINERS|  10 +
 app/test/test_dmadev.c |   2 +
 doc/guides/dmadevs/idxd.rst| 255 ++
 doc/guides/dmadevs/index.rst   |  14 +
 doc/guides/index.rst   |   1 +
 doc/guides/rel_notes/release_21_11.rst |   5 +
 drivers/dma/idxd/dpdk_idxd_cfg.py  | 117 +
 drivers/dma/idxd/idxd_bus.c| 378 +++
 drivers/dma/idxd/idxd_common.c | 616 +
 drivers/dma/idxd/idxd_hw_defs.h| 131 ++
 drivers/dma/idxd/idxd_internal.h   | 108 +
 drivers/dma/idxd/idxd_pci.c| 381 +++
 drivers/dma/idxd/meson.build   |  10 +
 drivers/dma/idxd/version.map   |   3 +
 drivers/dma/meson.build|   1 +
 drivers/raw/ioat/dpdk_idxd_cfg.py  | 118 +
 drivers/raw/ioat/meson.build   |  23 +-
 usertools/dpdk-devbind.py  |  12 +-
 18 files changed, 2062 insertions(+), 123 deletions(-)
 create mode 100644 doc/guides/dmadevs/idxd.rst
 create mode 100644 doc/guides/dmadevs/index.rst
 create mode 100755 drivers/dma/idxd/dpdk_idxd_cfg.py
 create mode 100644 drivers/dma/idxd/idxd_bus.c
 create mode 100644 drivers/dma/idxd/idxd_common.c
 create mode 100644 drivers/dma/idxd/idxd_hw_defs.h
 create mode 100644 drivers/dma/idxd/idxd_internal.h
 create mode 100644 drivers/dma/idxd/idxd_pci.c
 create mode 100644 drivers/dma/idxd/meson.build
 create mode 100644 drivers/dma/idxd/version.map
 mode change 100755 => 12 drivers/raw/ioat/dpdk_idxd_cfg.py

-- 
2.30.2



[dpdk-dev] [PATCH v3 01/17] raw/ioat: only build if dmadev not present

2021-09-08 Thread Kevin Laatz
From: Bruce Richardson 

Only build the rawdev IDXD/IOAT drivers if the dmadev drivers are not
present.

Signed-off-by: Bruce Richardson 
---
 drivers/raw/ioat/meson.build | 23 ---
 1 file changed, 20 insertions(+), 3 deletions(-)

diff --git a/drivers/raw/ioat/meson.build b/drivers/raw/ioat/meson.build
index 0e81cb5951..7bd9ac912b 100644
--- a/drivers/raw/ioat/meson.build
+++ b/drivers/raw/ioat/meson.build
@@ -2,14 +2,31 @@
 # Copyright 2019 Intel Corporation
 
 build = dpdk_conf.has('RTE_ARCH_X86')
+# only use ioat rawdev driver if we don't have the equivalent dmadev ones
+if not dpdk_conf.has('RTE_DMA_IDXD') and not dpdk_conf.has('RTE_DMA_IOAT')
+build = false
+subdir_done()
+endif
+
 reason = 'only supported on x86'
 sources = files(
-'idxd_bus.c',
-'idxd_pci.c',
 'ioat_common.c',
-'ioat_rawdev.c',
 'ioat_rawdev_test.c',
 )
+
+if not dpdk_conf.has('RTE_DMA_IDXD')
+sources += files(
+'idxd_bus.c',
+'idxd_pci.c',
+)
+endif
+
+if not dpdk_conf.has('RTE_DMA_IOAT')
+sources += files (
+'ioat_rawdev.c',
+)
+endif
+
 deps += ['bus_pci', 'mbuf', 'rawdev']
 headers = files(
 'rte_ioat_rawdev.h',
-- 
2.30.2



[dpdk-dev] [PATCH v3 02/17] doc: initial commit for dmadevs section

2021-09-08 Thread Kevin Laatz
Add new section to the programmer's guide for dmadev devices.

Signed-off-by: Kevin Laatz 
Acked-by: Bruce Richardson 
---
 doc/guides/dmadevs/index.rst | 14 ++
 doc/guides/index.rst |  1 +
 2 files changed, 15 insertions(+)
 create mode 100644 doc/guides/dmadevs/index.rst

diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
new file mode 100644
index 00..b30004fd65
--- /dev/null
+++ b/doc/guides/dmadevs/index.rst
@@ -0,0 +1,14 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+Copyright(c) 2021 Intel Corporation.
+
+DMA Device Drivers
+==
+
+The following are a list of DMA device PMDs, which can be used from an
+application through DMAdev API.
+
+.. toctree::
+:maxdepth: 2
+:numbered:
+
+idxd
diff --git a/doc/guides/index.rst b/doc/guides/index.rst
index 857f0363d3..ccb71640dd 100644
--- a/doc/guides/index.rst
+++ b/doc/guides/index.rst
@@ -19,6 +19,7 @@ DPDK documentation
bbdevs/index
cryptodevs/index
compressdevs/index
+   dmadevs/index
vdpadevs/index
regexdevs/index
eventdevs/index
-- 
2.30.2



[dpdk-dev] [PATCH v3 03/17] dma/idxd: add skeleton for VFIO based DSA device

2021-09-08 Thread Kevin Laatz
Add the basic device probe/remove skeleton code for DSA device bound to
the vfio pci driver. Relevant documentation and MAINTAINERS update also
included.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 MAINTAINERS| 10 +
 doc/guides/dmadevs/idxd.rst| 58 ++
 doc/guides/rel_notes/release_21_11.rst |  5 +++
 drivers/dma/idxd/idxd_internal.h   | 27 
 drivers/dma/idxd/idxd_pci.c| 55 
 drivers/dma/idxd/meson.build   |  7 
 drivers/dma/idxd/version.map   |  3 ++
 drivers/dma/meson.build|  1 +
 8 files changed, 166 insertions(+)
 create mode 100644 doc/guides/dmadevs/idxd.rst
 create mode 100644 drivers/dma/idxd/idxd_internal.h
 create mode 100644 drivers/dma/idxd/idxd_pci.c
 create mode 100644 drivers/dma/idxd/meson.build
 create mode 100644 drivers/dma/idxd/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index c057a090d6..b4c614a229 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1199,6 +1199,16 @@ F: doc/guides/compressdevs/zlib.rst
 F: doc/guides/compressdevs/features/zlib.ini
 
 
+DMAdev Drivers
+--
+
+Intel IDXD - EXPERIMENTAL
+M: Bruce Richardson 
+M: Kevin Laatz 
+F: drivers/dma/idxd/
+F: doc/guides/dmadevs/idxd.rst
+
+
 RegEx Drivers
 -
 
diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
new file mode 100644
index 00..924700d17e
--- /dev/null
+++ b/doc/guides/dmadevs/idxd.rst
@@ -0,0 +1,58 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+Copyright(c) 2021 Intel Corporation.
+
+.. include:: 
+
+IDXD DMA Device Driver
+==
+
+The ``idxd`` dmadev driver provides a poll-mode driver (PMD) for Intel\ |reg|
+Data Streaming Accelerator `(Intel DSA)
+`_.
+This PMD can be used in conjunction with Intel\ |reg| DSA devices to offload
+data operations, such as data copies, to hardware, freeing up CPU cycles for
+other tasks.
+
+Hardware Requirements
+--
+
+The ``dpdk-devbind.py`` script, included with DPDK, can be used to show the
+presence of supported hardware. Running ``dpdk-devbind.py --status-dev dma``
+will show all the DMA devices on the system, including IDXD supported devices.
+Intel\ |reg| DSA devices, are currently (at time of writing) appearing
+as devices with type “0b25”, due to the absence of pci-id database entries for
+them at this point.
+
+Compilation
+
+
+For builds using ``meson`` and ``ninja``, the driver will be built when the
+target platform is x86-based. No additional compilation steps are necessary.
+
+Device Setup
+-
+
+Devices using VFIO/UIO drivers
+~~~
+
+The HW devices to be used will need to be bound to a user-space IO driver for 
use.
+The ``dpdk-devbind.py`` script can be used to view the state of the devices
+and to bind them to a suitable DPDK-supported driver, such as ``vfio-pci``.
+For example::
+
+   $ dpdk-devbind.py -b vfio-pci 6a:01.0
+
+Device Probing and Initialization
+~~
+
+For devices bound to a suitable DPDK-supported VFIO/UIO driver, the HW devices 
will
+be found as part of the device scan done at application initialization time 
without
+the need to pass parameters to the application.
+
+For Intel\ |reg| DSA devices, DPDK will automatically configure the device 
with the
+maximum number of workqueues available on it, partitioning all resources 
equally
+among the queues.
+If fewer workqueues are required, then the ``max_queues`` parameter may be 
passed to
+the device driver on the EAL commandline, via the ``allowlist`` or ``-a`` flag 
e.g.::
+
+   $ dpdk-test -a ,max_queues=4
diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 3562822b3d..8526646b13 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -67,6 +67,11 @@ New Features
   The dmadev library provides a DMA device framework for management and
   provision of hardware and software DMA devices.
 
+* **Added IDXD dmadev driver implementation.**
+
+  The IDXD dmadev driver provide device drivers for the Intel DSA devices.
+  This device driver can be used through the generic dmadev API.
+
 
 Removed Items
 -
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
new file mode 100644
index 00..c6a7dcd72f
--- /dev/null
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 Intel Corporation
+ */
+
+#ifndef _IDXD_INTERNAL_H_
+#define _IDXD_INTERNAL_H_
+
+/**
+ * @file idxd_internal.h
+ *
+ * Internal data structures for the idxd/DSA driver for dmadev
+ *
+ * @warning
+ * @b EXPERIMENTAL: these structures and APIs may change without p

[dpdk-dev] [PATCH v3 04/17] dma/idxd: add bus device probing

2021-09-08 Thread Kevin Laatz
Add the basic device probing for DSA devices bound to the IDXD kernel
driver. These devices can be configured via sysfs and made available to
DPDK if they are found during bus scan. Relevant documentation is included.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 doc/guides/dmadevs/idxd.rst  |  64 +++
 drivers/dma/idxd/idxd_bus.c  | 352 +++
 drivers/dma/idxd/meson.build |   1 +
 3 files changed, 417 insertions(+)
 create mode 100644 drivers/dma/idxd/idxd_bus.c

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index 924700d17e..ce33e2857a 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -32,6 +32,56 @@ target platform is x86-based. No additional compilation 
steps are necessary.
 Device Setup
 -
 
+Intel\ |reg| DSA devices can use the IDXD kernel driver or DPDK-supported 
drivers,
+such as ``vfio-pci``. Both are supported by the IDXD PMD.
+
+Intel\ |reg| DSA devices using IDXD kernel driver
+~~
+
+To use an Intel\ |reg| DSA device bound to the IDXD kernel driver, the device 
must first be configured.
+The `accel-config `_ utility library can 
be used for configuration.
+
+.. note::
+The device configuration can also be done by directly interacting with 
the sysfs nodes.
+An example of how this may be done can be seen in the script 
``dpdk_idxd_cfg.py``
+included in the driver source directory.
+
+There are some mandatory configuration steps before being able to use a device 
with an application.
+The internal engines, which do the copies or other operations,
+and the work-queues, which are used by applications to assign work to the 
device,
+need to be assigned to groups, and the various other configuration options,
+such as priority or queue depth, need to be set for each queue.
+
+To assign an engine to a group::
+
+$ accel-config config-engine dsa0/engine0.0 --group-id=0
+$ accel-config config-engine dsa0/engine0.1 --group-id=1
+
+To assign work queues to groups for passing descriptors to the engines a 
similar accel-config command can be used.
+However, the work queues also need to be configured depending on the use case.
+Some configuration options include:
+
+* mode (Dedicated/Shared): Indicates whether a WQ may accept jobs from 
multiple queues simultaneously.
+* priority: WQ priority between 1 and 15. Larger value means higher priority.
+* wq-size: the size of the WQ. Sum of all WQ sizes must be less that the 
total-size defined by the device.
+* type: WQ type (kernel/mdev/user). Determines how the device is presented.
+* name: identifier given to the WQ.
+
+Example configuration for a work queue::
+
+$ accel-config config-wq dsa0/wq0.0 --group-id=0 \
+   --mode=dedicated --priority=10 --wq-size=8 \
+   --type=user --name=dpdk_app1
+
+Once the devices have been configured, they need to be enabled::
+
+$ accel-config enable-device dsa0
+$ accel-config enable-wq dsa0/wq0.0
+
+Check the device configuration::
+
+$ accel-config list
+
 Devices using VFIO/UIO drivers
 ~~~
 
@@ -56,3 +106,17 @@ If fewer workqueues are required, then the ``max_queues`` 
parameter may be passe
 the device driver on the EAL commandline, via the ``allowlist`` or ``-a`` flag 
e.g.::
 
$ dpdk-test -a ,max_queues=4
+
+For devices bound to the IDXD kernel driver,
+the DPDK IDXD driver will automatically perform a scan for available workqueues
+to use. Any workqueues found listed in ``/dev/dsa`` on the system will be 
checked
+in ``/sys``, and any which have ``dpdk_`` prefix in their name will be 
automatically
+probed by the driver to make them available to the application.
+Alternatively, to support use by multiple DPDK processes simultaneously,
+the value used as the DPDK ``--file-prefix`` parameter may be used as a 
workqueue
+name prefix, instead of ``dpdk_``, allowing each DPDK application instance to 
only
+use a subset of configured queues.
+
+Once probed successfully, irrespective of kernel driver, the device will 
appear as a ``dmadev``,
+that is a "DMA device type" inside DPDK, and can be accessed using APIs from 
the
+``rte_dmadev`` library.
diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
new file mode 100644
index 00..4097ecd940
--- /dev/null
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2021 Intel Corporation
+ */
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include 
+#include 
+#include 
+#include 
+
+#include "idxd_internal.h"
+
+/* default value for DSA paths, but allow override in environment for testing 
*/
+#define DSA_DEV_PATH "/dev/dsa"
+#define DSA_SYSFS_PATH "/sys/bus/dsa/devices"
+
+static unsigned int devcount;
+
+/** unique identifier for a DSA device/WQ instance */
+str

[dpdk-dev] [PATCH v3 05/17] dma/idxd: create dmadev instances on bus probe

2021-09-08 Thread Kevin Laatz
When a suitable device is found during the bus scan/probe, create a dmadev
instance for each HW queue. Internal structures required for device
creation are also added.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 drivers/dma/idxd/idxd_bus.c  | 20 -
 drivers/dma/idxd/idxd_common.c   | 75 
 drivers/dma/idxd/idxd_internal.h | 40 +
 drivers/dma/idxd/meson.build |  1 +
 4 files changed, 135 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma/idxd/idxd_common.c

diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 4097ecd940..9b55451ad2 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -85,6 +85,18 @@ dsa_get_sysfs_path(void)
return path ? path : DSA_SYSFS_PATH;
 }
 
+static int
+idxd_dev_close(struct rte_dmadev *dev)
+{
+   struct idxd_dmadev *idxd = dev->data->dev_private;
+   munmap(idxd->portal, 0x1000);
+   return 0;
+}
+
+static const struct rte_dmadev_ops idxd_vdev_ops = {
+   .dev_close = idxd_dev_close,
+};
+
 static void *
 idxd_vdev_mmap_wq(struct rte_dsa_device *dev)
 {
@@ -206,7 +218,7 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
return -1;
idxd.max_batch_size = ret;
idxd.qid = dev->addr.wq_id;
-   idxd.u.vdev.dsa_id = dev->addr.device_id;
+   idxd.u.bus.dsa_id = dev->addr.device_id;
idxd.sva_support = 1;
 
idxd.portal = idxd_vdev_mmap_wq(dev);
@@ -215,6 +227,12 @@ idxd_probe_dsa(struct rte_dsa_device *dev)
return -ENOENT;
}
 
+   ret = idxd_dmadev_create(dev->wq_name, &dev->device, &idxd, 
&idxd_vdev_ops);
+   if (ret) {
+   IDXD_PMD_ERR("Failed to create rawdev %s", dev->wq_name);
+   return ret;
+   }
+
return 0;
 }
 
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
new file mode 100644
index 00..7770b2e264
--- /dev/null
+++ b/drivers/dma/idxd/idxd_common.c
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 Intel Corporation
+ */
+
+#include 
+#include 
+#include 
+
+#include "idxd_internal.h"
+
+#define IDXD_PMD_NAME_STR "dmadev_idxd"
+
+int
+idxd_dmadev_create(const char *name, struct rte_device *dev,
+  const struct idxd_dmadev *base_idxd,
+  const struct rte_dmadev_ops *ops)
+{
+   struct idxd_dmadev *idxd;
+   struct rte_dmadev *dmadev = NULL;
+   int ret = 0;
+
+   if (!name) {
+   IDXD_PMD_ERR("Invalid name of the device!");
+   ret = -EINVAL;
+   goto cleanup;
+   }
+
+   /* Allocate device structure */
+   dmadev = rte_dmadev_pmd_allocate(name);
+   if (dmadev == NULL) {
+   IDXD_PMD_ERR("Unable to allocate raw device");
+   ret = -ENOMEM;
+   goto cleanup;
+   }
+   dmadev->dev_ops = ops;
+   dmadev->device = dev;
+
+   idxd = rte_malloc_socket(NULL, sizeof(struct idxd_dmadev), 0, 
dev->numa_node);
+   if (idxd == NULL) {
+   IDXD_PMD_ERR("Unable to allocate memory for device");
+   ret = -ENOMEM;
+   goto cleanup;
+   }
+   dmadev->data->dev_private = idxd;
+   dmadev->dev_private = idxd;
+   *idxd = *base_idxd; /* copy over the main fields already passed in */
+   idxd->dmadev = dmadev;
+
+   /* allocate batch index ring and completion ring.
+* The +1 is because we can never fully use
+* the ring, otherwise read == write means both full and empty.
+*/
+   idxd->batch_comp_ring = rte_zmalloc(NULL, 
(sizeof(idxd->batch_idx_ring[0]) +
+   sizeof(idxd->batch_comp_ring[0]))   * 
(idxd->max_batches + 1),
+   sizeof(idxd->batch_comp_ring[0]));
+   if (idxd->batch_comp_ring == NULL) {
+   IDXD_PMD_ERR("Unable to reserve memory for batch data\n");
+   ret = -ENOMEM;
+   goto cleanup;
+   }
+   idxd->batch_idx_ring = (void 
*)&idxd->batch_comp_ring[idxd->max_batches+1];
+   idxd->batch_iova = rte_mem_virt2iova(idxd->batch_comp_ring);
+
+   return 0;
+
+cleanup:
+   if (dmadev)
+   rte_dmadev_pmd_release(dmadev);
+
+   return ret;
+}
+
+int idxd_pmd_logtype;
+
+RTE_LOG_REGISTER_DEFAULT(idxd_pmd_logtype, WARNING);
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index c6a7dcd72f..99ab2df925 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -24,4 +24,44 @@ extern int idxd_pmd_logtype;
 #define IDXD_PMD_ERR(fmt, args...)IDXD_PMD_LOG(ERR, fmt, ## args)
 #define IDXD_PMD_WARN(fmt, args...)   IDXD_PMD_LOG(WARNING, fmt, ## args)
 
+struct idxd_dmadev {
+   /* counters to track the batches */
+   unsigned short max_batches;
+   unsigned short batch_idx_read;
+   unsigned short batch_idx_write;
+
+ 

[dpdk-dev] [PATCH v3 06/17] dma/idxd: create dmadev instances on pci probe

2021-09-08 Thread Kevin Laatz
When a suitable device is found during the PCI probe, create a dmadev
instance for each HW queue. HW definitions required are also included.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 drivers/dma/idxd/idxd_hw_defs.h  |  71 
 drivers/dma/idxd/idxd_internal.h |  16 ++
 drivers/dma/idxd/idxd_pci.c  | 272 ++-
 3 files changed, 356 insertions(+), 3 deletions(-)
 create mode 100644 drivers/dma/idxd/idxd_hw_defs.h

diff --git a/drivers/dma/idxd/idxd_hw_defs.h b/drivers/dma/idxd/idxd_hw_defs.h
new file mode 100644
index 00..ea627cba6d
--- /dev/null
+++ b/drivers/dma/idxd/idxd_hw_defs.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2021 Intel Corporation
+ */
+
+#ifndef _IDXD_HW_DEFS_H_
+#define _IDXD_HW_DEFS_H_
+
+/*** Definitions for Intel(R) Data Streaming Accelerator  ***/
+
+#define IDXD_CMD_SHIFT 20
+enum rte_idxd_cmds {
+   idxd_enable_dev = 1,
+   idxd_disable_dev,
+   idxd_drain_all,
+   idxd_abort_all,
+   idxd_reset_device,
+   idxd_enable_wq,
+   idxd_disable_wq,
+   idxd_drain_wq,
+   idxd_abort_wq,
+   idxd_reset_wq,
+};
+
+/* General bar0 registers */
+struct rte_idxd_bar0 {
+   uint32_t __rte_cache_aligned version;/* offset 0x00 */
+   uint64_t __rte_aligned(0x10) gencap; /* offset 0x10 */
+   uint64_t __rte_aligned(0x10) wqcap;  /* offset 0x20 */
+   uint64_t __rte_aligned(0x10) grpcap; /* offset 0x30 */
+   uint64_t __rte_aligned(0x08) engcap; /* offset 0x38 */
+   uint64_t __rte_aligned(0x10) opcap;  /* offset 0x40 */
+   uint64_t __rte_aligned(0x20) offsets[2]; /* offset 0x60 */
+   uint32_t __rte_aligned(0x20) gencfg; /* offset 0x80 */
+   uint32_t __rte_aligned(0x08) genctrl;/* offset 0x88 */
+   uint32_t __rte_aligned(0x10) gensts; /* offset 0x90 */
+   uint32_t __rte_aligned(0x08) intcause;   /* offset 0x98 */
+   uint32_t __rte_aligned(0x10) cmd;/* offset 0xA0 */
+   uint32_t __rte_aligned(0x08) cmdstatus;  /* offset 0xA8 */
+   uint64_t __rte_aligned(0x20) swerror[4]; /* offset 0xC0 */
+};
+
+/* workqueue config is provided by array of uint32_t. */
+enum rte_idxd_wqcfg {
+   wq_size_idx,   /* size is in first 32-bit value */
+   wq_threshold_idx,  /* WQ threshold second 32-bits */
+   wq_mode_idx,   /* WQ mode and other flags */
+   wq_sizes_idx,  /* WQ transfer and batch sizes */
+   wq_occ_int_idx,/* WQ occupancy interrupt handle */
+   wq_occ_limit_idx,  /* WQ occupancy limit */
+   wq_state_idx,  /* WQ state and occupancy state */
+};
+
+#define WQ_MODE_SHARED0
+#define WQ_MODE_DEDICATED 1
+#define WQ_PRIORITY_SHIFT 4
+#define WQ_BATCH_SZ_SHIFT 5
+#define WQ_STATE_SHIFT 30
+#define WQ_STATE_MASK 0x3
+
+struct rte_idxd_grpcfg {
+   uint64_t grpwqcfg[4]  __rte_cache_aligned; /* 64-byte register set */
+   uint64_t grpengcfg;  /* offset 32 */
+   uint32_t grpflags;   /* offset 40 */
+};
+
+#define GENSTS_DEV_STATE_MASK 0x03
+#define CMDSTATUS_ACTIVE_SHIFT 31
+#define CMDSTATUS_ACTIVE_MASK (1 << 31)
+#define CMDSTATUS_ERR_MASK 0xFF
+
+#endif
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index 99ab2df925..d92d7b3e6f 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -5,6 +5,10 @@
 #ifndef _IDXD_INTERNAL_H_
 #define _IDXD_INTERNAL_H_
 
+#include 
+
+#include "idxd_hw_defs.h"
+
 /**
  * @file idxd_internal.h
  *
@@ -24,6 +28,16 @@ extern int idxd_pmd_logtype;
 #define IDXD_PMD_ERR(fmt, args...)IDXD_PMD_LOG(ERR, fmt, ## args)
 #define IDXD_PMD_WARN(fmt, args...)   IDXD_PMD_LOG(WARNING, fmt, ## args)
 
+struct idxd_pci_common {
+   rte_spinlock_t lk;
+
+   uint8_t wq_cfg_sz;
+   volatile struct rte_idxd_bar0 *regs;
+   volatile uint32_t *wq_regs_base;
+   volatile struct rte_idxd_grpcfg *grp_regs;
+   volatile void *portals;
+};
+
 struct idxd_dmadev {
/* counters to track the batches */
unsigned short max_batches;
@@ -58,6 +72,8 @@ struct idxd_dmadev {
struct {
unsigned int dsa_id;
} bus;
+
+   struct idxd_pci_common *pci;
} u;
 };
 
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 79e4aadcab..318931713c 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -3,6 +3,9 @@
  */
 
 #include 
+#include 
+#include 
+#include 
 
 #include "idxd_internal.h"
 
@@ -16,17 +19,280 @@ const struct rte_pci_id pci_id_idxd_map[] = {
{ .vendor_id = 0, /* sentinel */ },
 };
 
+static inline int
+idxd_pci_dev_command(struct idxd_dmadev *idxd, enum rte_idxd_cmds command)
+{
+   uint8_t err_code;
+   uint16_t qid = idxd->qid;
+   int i = 0;
+
+   if (command >= idxd_disable_wq && command <= idxd_reset_wq)
+   qid = (1 << qid);
+   rte_spi

[dpdk-dev] [PATCH v3 07/17] dma/idxd: add datapath structures

2021-09-08 Thread Kevin Laatz
Add data structures required for the data path for IDXD devices.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 

---
v2: add completion status for invalid opcode
---
 drivers/dma/idxd/idxd_bus.c  |  1 +
 drivers/dma/idxd/idxd_common.c   | 33 ++
 drivers/dma/idxd/idxd_hw_defs.h  | 60 
 drivers/dma/idxd/idxd_internal.h |  3 ++
 drivers/dma/idxd/idxd_pci.c  |  2 +-
 5 files changed, 98 insertions(+), 1 deletion(-)

diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 9b55451ad2..20d17c20ca 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -95,6 +95,7 @@ idxd_dev_close(struct rte_dmadev *dev)
 
 static const struct rte_dmadev_ops idxd_vdev_ops = {
.dev_close = idxd_dev_close,
+   .dev_dump = idxd_dump,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 7770b2e264..9490439fdc 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -10,6 +10,35 @@
 
 #define IDXD_PMD_NAME_STR "dmadev_idxd"
 
+int
+idxd_dump(const struct rte_dmadev *dev, FILE *f)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   unsigned int i;
+
+   fprintf(f, "== Private Data ==\n");
+   fprintf(f, "  Portal: %p\n", idxd->portal);
+   fprintf(f, "  Config: { ring_size: %u }\n",
+   idxd->qcfg.nb_desc);
+   fprintf(f, "  Batch ring (sz = %u, max_batches = %u):\n\t",
+   idxd->max_batches + 1, idxd->max_batches);
+   for (i = 0; i <= idxd->max_batches; i++) {
+   fprintf(f, " %u ", idxd->batch_idx_ring[i]);
+   if (i == idxd->batch_idx_read && i == idxd->batch_idx_write)
+   fprintf(f, "[rd ptr, wr ptr] ");
+   else if (i == idxd->batch_idx_read)
+   fprintf(f, "[rd ptr] ");
+   else if (i == idxd->batch_idx_write)
+   fprintf(f, "[wr ptr] ");
+   if (i == idxd->max_batches)
+   fprintf(f, "\n");
+   }
+
+   fprintf(f, "  Curr batch: start = %u, size = %u\n", idxd->batch_start, 
idxd->batch_size);
+   fprintf(f, "  IDS: avail = %u, returned: %u\n", idxd->ids_avail, 
idxd->ids_returned);
+   return 0;
+}
+
 int
 idxd_dmadev_create(const char *name, struct rte_device *dev,
   const struct idxd_dmadev *base_idxd,
@@ -19,6 +48,10 @@ idxd_dmadev_create(const char *name, struct rte_device *dev,
struct rte_dmadev *dmadev = NULL;
int ret = 0;
 
+   RTE_BUILD_BUG_ON(sizeof(struct idxd_hw_desc) != 64);
+   RTE_BUILD_BUG_ON(offsetof(struct idxd_hw_desc, size) != 32);
+   RTE_BUILD_BUG_ON(sizeof(struct idxd_completion) != 32);
+
if (!name) {
IDXD_PMD_ERR("Invalid name of the device!");
ret = -EINVAL;
diff --git a/drivers/dma/idxd/idxd_hw_defs.h b/drivers/dma/idxd/idxd_hw_defs.h
index ea627cba6d..55ca9f7f52 100644
--- a/drivers/dma/idxd/idxd_hw_defs.h
+++ b/drivers/dma/idxd/idxd_hw_defs.h
@@ -5,6 +5,66 @@
 #ifndef _IDXD_HW_DEFS_H_
 #define _IDXD_HW_DEFS_H_
 
+/*
+ * Defines used in the data path for interacting with IDXD hardware.
+ */
+#define IDXD_CMD_OP_SHIFT 24
+enum rte_idxd_ops {
+   idxd_op_nop = 0,
+   idxd_op_batch,
+   idxd_op_drain,
+   idxd_op_memmove,
+   idxd_op_fill
+};
+
+#define IDXD_FLAG_FENCE (1 << 0)
+#define IDXD_FLAG_COMPLETION_ADDR_VALID (1 << 2)
+#define IDXD_FLAG_REQUEST_COMPLETION(1 << 3)
+#define IDXD_FLAG_CACHE_CONTROL (1 << 8)
+
+/**
+ * Hardware descriptor used by DSA hardware, for both bursts and
+ * for individual operations.
+ */
+struct idxd_hw_desc {
+   uint32_t pasid;
+   uint32_t op_flags;
+   rte_iova_t completion;
+
+   RTE_STD_C11
+   union {
+   rte_iova_t src;  /* source address for copy ops etc. */
+   rte_iova_t desc_addr; /* descriptor pointer for batch */
+   };
+   rte_iova_t dst;
+
+   uint32_t size;/* length of data for op, or batch size */
+
+   uint16_t intr_handle; /* completion interrupt handle */
+
+   /* remaining 26 bytes are reserved */
+   uint16_t __reserved[13];
+} __rte_aligned(64);
+
+#define IDXD_COMP_STATUS_INCOMPLETE0
+#define IDXD_COMP_STATUS_SUCCESS   1
+#define IDXD_COMP_STATUS_INVALID_OPCODE 0x10
+#define IDXD_COMP_STATUS_INVALID_SIZE   0x13
+#define IDXD_COMP_STATUS_SKIPPED0xFF /* not official IDXD error, 
needed as placeholder */
+
+/**
+ * Completion record structure written back by DSA
+ */
+struct idxd_completion {
+   uint8_t status;
+   uint8_t result;
+   /* 16-bits pad here */
+   uint32_t completed_size; /* data length, or descriptors for batch */
+
+   rte_iova_t fault_address;
+   uint32_t invalid_flags;
+} __rte_aligned(32);
+
 /*** Definitions for Intel(R) Data Streaming A

[dpdk-dev] [PATCH v3 08/17] dma/idxd: add configure and info_get functions

2021-09-08 Thread Kevin Laatz
Add functions for device configuration. The info_get function is included
here since it can be useful for checking successful configuration.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 

---
v2:
   - fix reconfigure bug in idxd_vchan_setup()
   - add literal include comment for the docs to pick up
v3:
   - fixes needed after changes from rebasing
---
 app/test/test_dmadev.c   |  2 +
 doc/guides/dmadevs/idxd.rst  | 33 +++
 drivers/dma/idxd/idxd_bus.c  |  3 ++
 drivers/dma/idxd/idxd_common.c   | 73 
 drivers/dma/idxd/idxd_internal.h |  6 +++
 drivers/dma/idxd/idxd_pci.c  |  3 ++
 6 files changed, 120 insertions(+)

diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c
index 98dddae6d6..a5276d4cce 100644
--- a/app/test/test_dmadev.c
+++ b/app/test/test_dmadev.c
@@ -739,6 +739,7 @@ test_dmadev_instance(uint16_t dev_id)
 {
 #define TEST_RINGSIZE 512
 #define CHECK_ERRStrue
+   /* Setup of the dmadev device. 8< */
struct rte_dmadev_stats stats;
struct rte_dmadev_info info;
const struct rte_dmadev_conf conf = { .nb_vchans = 1};
@@ -759,6 +760,7 @@ test_dmadev_instance(uint16_t dev_id)
 
if (rte_dmadev_vchan_setup(dev_id, vchan, &qconf) < 0)
ERR_RETURN("Error with queue configuration\n");
+   /* >8 End of setup of the dmadev device. */
 
rte_dmadev_info_get(dev_id, &info);
if (info.nb_vchans != 1)
diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index ce33e2857a..66bc9fe744 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -120,3 +120,36 @@ use a subset of configured queues.
 Once probed successfully, irrespective of kernel driver, the device will 
appear as a ``dmadev``,
 that is a "DMA device type" inside DPDK, and can be accessed using APIs from 
the
 ``rte_dmadev`` library.
+
+Using IDXD DMAdev Devices
+--
+
+To use the devices from an application, the dmadev API can be used.
+
+Getting Device Information
+~~~
+
+Basic information about each dmadev device can be queried using the
+``rte_dmadev_info_get()`` API. This will return basic device information such 
as
+the ``rte_device`` structure, device capabilities and other device specific 
values.
+
+Device Configuration
+~
+
+Configuring an IDXD dmadev device is done using the ``rte_dmadev_configure()`` 
and
+``rte_dmadev_vchan_setup`` APIs. The configurations are passed to these APIs 
using
+the ``rte_dmadev_conf`` and ``rte_dmadev_vchan_conf`` structures, 
respectively. For
+example, these can be used to configure the number of ``vchans`` per device, 
the
+ring size, etc. The ring size must be a power of two, between 64 and 4096.
+
+The following code shows how the device is configured in
+``test_dmadev.c``:
+
+.. literalinclude:: ../../../app/test/test_dmadev.c
+   :language: c
+   :start-after: Setup of the dmadev device. 8<
+   :end-before: >8 End of setup of the dmadev device.
+   :dedent: 1
+
+Once configured, the device can then be made ready for use by calling the
+``rte_dmadev_start()`` API.
diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 20d17c20ca..7a6afabd27 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -96,6 +96,9 @@ idxd_dev_close(struct rte_dmadev *dev)
 static const struct rte_dmadev_ops idxd_vdev_ops = {
.dev_close = idxd_dev_close,
.dev_dump = idxd_dump,
+   .dev_configure = idxd_configure,
+   .vchan_setup = idxd_vchan_setup,
+   .dev_info_get = idxd_info_get,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 9490439fdc..9949608293 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -39,6 +39,79 @@ idxd_dump(const struct rte_dmadev *dev, FILE *f)
return 0;
 }
 
+int
+idxd_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *info, 
uint32_t size)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+
+   if (size < sizeof(*info))
+   return -EINVAL;
+
+   *info = (struct rte_dmadev_info) {
+   .device = dev->device,
+   .dev_capa = RTE_DMADEV_CAPA_MEM_TO_MEM |
+   RTE_DMADEV_CAPA_OPS_COPY | 
RTE_DMADEV_CAPA_OPS_FILL,
+   .max_vchans = 1,
+   .max_desc = 4096,
+   .min_desc = 64,
+   .nb_vchans = (idxd->desc_ring != NULL), /* returns 1 or 
0 */
+   };
+   if (idxd->sva_support)
+   info->dev_capa |= RTE_DMADEV_CAPA_SVA;
+   return 0;
+}
+
+int
+idxd_configure(struct rte_dmadev *dev __rte_unused, const struct 
rte_dmadev_conf *dev_conf,
+   uint32_t conf_sz)
+{
+   if (sizeof(struct rte_dmadev_conf) != conf_sz)
+   retur

[dpdk-dev] [PATCH v3 09/17] dma/idxd: add start and stop functions for pci devices

2021-09-08 Thread Kevin Laatz
Add device start/stop functions for DSA devices bound to vfio. For devices
bound to the IDXD kernel driver, these are not required since the IDXD
kernel driver takes care of this.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 drivers/dma/idxd/idxd_pci.c | 52 +
 1 file changed, 52 insertions(+)

diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index 569df8d04c..3c0e3086f7 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -59,11 +59,63 @@ idxd_is_wq_enabled(struct idxd_dmadev *idxd)
return ((state >> WQ_STATE_SHIFT) & WQ_STATE_MASK) == 0x1;
 }
 
+static int
+idxd_pci_dev_stop(struct rte_dmadev *dev)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   uint8_t err_code;
+
+   if (!idxd_is_wq_enabled(idxd)) {
+   IDXD_PMD_ERR("Work queue %d already disabled", idxd->qid);
+   return -EALREADY;
+   }
+
+   err_code = idxd_pci_dev_command(idxd, idxd_disable_wq);
+   if (err_code || idxd_is_wq_enabled(idxd)) {
+   IDXD_PMD_ERR("Failed disabling work queue %d, error code: %#x",
+   idxd->qid, err_code);
+   return -err_code;
+   }
+   IDXD_PMD_DEBUG("Work queue %d disabled OK", idxd->qid);
+
+   return 0;
+}
+
+static int
+idxd_pci_dev_start(struct rte_dmadev *dev)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   uint8_t err_code;
+
+   if (idxd_is_wq_enabled(idxd)) {
+   IDXD_PMD_WARN("WQ %d already enabled", idxd->qid);
+   return 0;
+   }
+
+   if (idxd->desc_ring == NULL) {
+   IDXD_PMD_ERR("WQ %d has not been fully configured", idxd->qid);
+   return -EINVAL;
+   }
+
+   err_code = idxd_pci_dev_command(idxd, idxd_enable_wq);
+   if (err_code || !idxd_is_wq_enabled(idxd)) {
+   IDXD_PMD_ERR("Failed enabling work queue %d, error code: %#x",
+   idxd->qid, err_code);
+   return err_code == 0 ? -1 : err_code;
+   }
+
+   IDXD_PMD_DEBUG("Work queue %d enabled OK", idxd->qid);
+
+   return 0;
+}
+
 static const struct rte_dmadev_ops idxd_pci_ops = {
.dev_dump = idxd_dump,
.dev_configure = idxd_configure,
.vchan_setup = idxd_vchan_setup,
.dev_info_get = idxd_info_get,
+   .dev_start = idxd_pci_dev_start,
+   .dev_stop = idxd_pci_dev_stop,
 };
 
 /* each portal uses 4 x 4k pages */
-- 
2.30.2



[dpdk-dev] [PATCH v3 10/17] dma/idxd: add data-path job submission functions

2021-09-08 Thread Kevin Laatz
Add data path functions for enqueuing and submitting operations to DSA
devices.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 doc/guides/dmadevs/idxd.rst  |  64 +++
 drivers/dma/idxd/idxd_common.c   | 137 +++
 drivers/dma/idxd/idxd_internal.h |   5 ++
 drivers/dma/idxd/meson.build |   1 +
 4 files changed, 207 insertions(+)

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index 66bc9fe744..0c4c105e0f 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -153,3 +153,67 @@ The following code shows how the device is configured in
 
 Once configured, the device can then be made ready for use by calling the
 ``rte_dmadev_start()`` API.
+
+Performing Data Copies
+~~~
+
+To perform data copies using IDXD dmadev devices, descriptors should be 
enqueued
+using the ``rte_dmadev_copy()`` API. The HW can be triggered to perform the 
copy
+in two ways, either via a ``RTE_DMA_OP_FLAG_SUBMIT`` flag or by calling
+``rte_dmadev_submit()``. Once copies have been completed, the completion will
+be reported back when the application calls ``rte_dmadev_completed()`` or
+``rte_dmadev_completed_status()``. The latter will also report the status of 
each
+completed operation.
+
+The ``rte_dmadev_copy()`` function enqueues a single copy to the device ring 
for
+copying at a later point. The parameters to that function include the IOVA 
addresses
+of both the source and destination buffers, as well as the length of the copy.
+
+The ``rte_dmadev_copy()`` function enqueues a copy operation on the device 
ring.
+If the ``RTE_DMA_OP_FLAG_SUBMIT`` flag is set when calling 
``rte_dmadev_copy()``,
+the device hardware will be informed of the elements. Alternatively, if the 
flag
+is not set, the application need to call the ``rte_dmadev_submit()`` function 
to
+notify the device hardware. Once the device hardware is informed of the 
elements
+enqueued on the ring, and the device will begin to process them. It is expected
+that, for efficiency reasons, a burst of operations will be enqueued to the
+device via multiple enqueue calls between calls to the ``rte_dmadev_submit()``
+function.
+
+The following code from demonstrates how to enqueue a burst of copies to the
+device and start the hardware processing of them:
+
+.. code-block:: C
+
+   struct rte_mbuf *srcs[COMP_BURST_SZ], *dsts[COMP_BURST_SZ];
+   unsigned int i;
+
+   for (i = 0; i < RTE_DIM(srcs); i++) {
+  uint64_t *src_data;
+
+  srcs[i] = rte_pktmbuf_alloc(pool);
+  dsts[i] = rte_pktmbuf_alloc(pool);
+  src_data = rte_pktmbuf_mtod(srcs[i], uint64_t *);
+  if (srcs[i] == NULL || dsts[i] == NULL) {
+ PRINT_ERR("Error allocating buffers\n");
+ return -1;
+  }
+
+  for (j = 0; j < COPY_LEN/sizeof(uint64_t); j++)
+ src_data[j] = rte_rand();
+
+  if (rte_dmadev_copy(dev_id, vchan, srcs[i]->buf_iova + srcs[i]->data_off,
+dsts[i]->buf_iova + dsts[i]->data_off, COPY_LEN, 0) < 0) {
+ PRINT_ERR("Error with rte_dmadev_copy for buffer %u\n", i);
+ return -1;
+  }
+   }
+   rte_dmadev_submit(dev_id, vchan);
+
+Filling an Area of Memory
+~~
+
+The IDXD driver also has support for the ``fill`` operation, where an area
+of memory is overwritten, or filled, with a short pattern of data.
+Fill operations can be performed in much the same was as copy operations
+described above, just using the ``rte_dmadev_fill()`` function rather than the
+``rte_dmadev_copy()`` function.
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 9949608293..69851defba 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -2,14 +2,147 @@
  * Copyright 2021 Intel Corporation
  */
 
+#include 
+
 #include 
 #include 
 #include 
+#include 
 
 #include "idxd_internal.h"
 
 #define IDXD_PMD_NAME_STR "dmadev_idxd"
 
+static __rte_always_inline rte_iova_t
+__desc_idx_to_iova(struct idxd_dmadev *idxd, uint16_t n)
+{
+   return idxd->desc_iova + (n * sizeof(struct idxd_hw_desc));
+}
+
+static __rte_always_inline void
+__idxd_movdir64b(volatile void *dst, const struct idxd_hw_desc *src)
+{
+   asm volatile (".byte 0x66, 0x0f, 0x38, 0xf8, 0x02"
+   :
+   : "a" (dst), "d" (src)
+   : "memory");
+}
+
+static __rte_always_inline void
+__submit(struct idxd_dmadev *idxd)
+{
+   rte_prefetch1(&idxd->batch_comp_ring[idxd->batch_idx_read]);
+
+   if (idxd->batch_size == 0)
+   return;
+
+   /* write completion to batch comp ring */
+   rte_iova_t comp_addr = idxd->batch_iova +
+   (idxd->batch_idx_write * sizeof(struct 
idxd_completion));
+
+   if (idxd->batch_size == 1) {
+   /* submit batch directly */
+   struct idxd_hw_desc desc =
+   idxd->desc_ring[idxd->batch_s

[dpdk-dev] [PATCH v3 11/17] dma/idxd: add data-path job completion functions

2021-09-08 Thread Kevin Laatz
Add the data path functions for gathering completed operations.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 

---
v2:
   - fixed typo in docs
   - add completion status for invalid opcode
---
 doc/guides/dmadevs/idxd.rst  |  25 
 drivers/dma/idxd/idxd_common.c   | 237 +++
 drivers/dma/idxd/idxd_internal.h |   5 +
 3 files changed, 267 insertions(+)

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index 0c4c105e0f..b0b5632b48 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -209,6 +209,31 @@ device and start the hardware processing of them:
}
rte_dmadev_submit(dev_id, vchan);
 
+To retrieve information about completed copies, ``rte_dmadev_completed()`` and
+``rte_dmadev_completed_status()`` APIs should be used. 
``rte_dmadev_completed()``
+will return the number of completed operations, along with the index of the 
last
+successful completed operation and whether or not an error was encountered. If 
an
+error was encountered, ``rte_dmadev_completed_status()`` must be used to kick 
the
+device off to continue processing operations and also to gather the status of 
each
+individual operations which is filled in to the ``status`` array provided as
+parameter by the application.
+
+The following code shows how to retrieve the number of successfully completed
+copies within a burst and then using ``rte_dmadev_completed_status()`` to check
+which operation failed and kick off the device to continue processing 
operations:
+
+.. code-block:: C
+
+   enum rte_dma_status_code status[COMP_BURST_SZ];
+   uint16_t count, idx, status_count;
+   bool error = 0;
+
+   count = rte_dmadev_completed(dev_id, vchan, COMP_BURST_SZ, &idx, &error);
+
+   if (error){
+  status_count = rte_dmadev_completed_status(dev_id, vchan, COMP_BURST_SZ, 
&idx, status);
+   }
+
 Filling an Area of Memory
 ~~
 
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 69851defba..8eb73fdcc6 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -143,6 +143,241 @@ idxd_submit(struct rte_dmadev *dev, uint16_t qid 
__rte_unused)
return 0;
 }
 
+static enum rte_dma_status_code
+get_comp_status(struct idxd_completion *c)
+{
+   uint8_t st = c->status;
+   switch (st) {
+   /* successful descriptors are not written back normally */
+   case IDXD_COMP_STATUS_INCOMPLETE:
+   case IDXD_COMP_STATUS_SUCCESS:
+   return RTE_DMA_STATUS_SUCCESSFUL;
+   case IDXD_COMP_STATUS_INVALID_OPCODE:
+   return RTE_DMA_STATUS_INVALID_OPCODE;
+   case IDXD_COMP_STATUS_INVALID_SIZE:
+   return RTE_DMA_STATUS_INVALID_LENGTH;
+   case IDXD_COMP_STATUS_SKIPPED:
+   return RTE_DMA_STATUS_NOT_ATTEMPTED;
+   default:
+   return RTE_DMA_STATUS_ERROR_UNKNOWN;
+   }
+}
+
+static __rte_always_inline int
+batch_ok(struct idxd_dmadev *idxd, uint8_t max_ops, enum rte_dma_status_code 
*status)
+{
+   uint16_t ret;
+   uint8_t bstatus;
+
+   if (max_ops == 0)
+   return 0;
+
+   /* first check if there are any unreturned handles from last time */
+   if (idxd->ids_avail != idxd->ids_returned) {
+   ret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), 
max_ops);
+   idxd->ids_returned += ret;
+   if (status)
+   memset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * 
sizeof(*status));
+   return ret;
+   }
+
+   if (idxd->batch_idx_read == idxd->batch_idx_write)
+   return 0;
+
+   bstatus = idxd->batch_comp_ring[idxd->batch_idx_read].status;
+   /* now check if next batch is complete and successful */
+   if (bstatus == IDXD_COMP_STATUS_SUCCESS) {
+   /* since the batch idx ring stores the start of each batch, 
pre-increment to lookup
+* start of next batch.
+*/
+   if (++idxd->batch_idx_read > idxd->max_batches)
+   idxd->batch_idx_read = 0;
+   idxd->ids_avail = idxd->batch_idx_ring[idxd->batch_idx_read];
+
+   ret = RTE_MIN((uint16_t)(idxd->ids_avail - idxd->ids_returned), 
max_ops);
+   idxd->ids_returned += ret;
+   if (status)
+   memset(status, RTE_DMA_STATUS_SUCCESSFUL, ret * 
sizeof(*status));
+   return ret;
+   }
+   /* check if batch is incomplete */
+   else if (bstatus == IDXD_COMP_STATUS_INCOMPLETE)
+   return 0;
+
+   return -1; /* error case */
+}
+
+static inline uint16_t
+batch_completed(struct idxd_dmadev *idxd, uint8_t max_ops, bool *has_error)
+{
+   uint16_t i;
+   uint16_t b_start, b_end, next_batch;
+
+   int ret = batch_ok(idxd, max_ops, NULL);
+   if (ret >= 0)
+   return ret;
+
+   /* ERROR case, not successful, not

[dpdk-dev] [PATCH v3 12/17] dma/idxd: add operation statistic tracking

2021-09-08 Thread Kevin Laatz
Add statistic tracking for DSA devices.

Signed-off-by: Bruce Richardson 
Signed-off-by: Kevin Laatz 
---
 doc/guides/dmadevs/idxd.rst  | 11 +++
 drivers/dma/idxd/idxd_bus.c  |  2 ++
 drivers/dma/idxd/idxd_common.c   | 27 +++
 drivers/dma/idxd/idxd_internal.h |  3 +++
 drivers/dma/idxd/idxd_pci.c  |  2 ++
 5 files changed, 45 insertions(+)

diff --git a/doc/guides/dmadevs/idxd.rst b/doc/guides/dmadevs/idxd.rst
index b0b5632b48..634ef58985 100644
--- a/doc/guides/dmadevs/idxd.rst
+++ b/doc/guides/dmadevs/idxd.rst
@@ -242,3 +242,14 @@ of memory is overwritten, or filled, with a short pattern 
of data.
 Fill operations can be performed in much the same was as copy operations
 described above, just using the ``rte_dmadev_fill()`` function rather than the
 ``rte_dmadev_copy()`` function.
+
+Querying Device Statistics
+~~~
+
+The statistics from the IDXD dmadev device can be got via the stats functions 
in
+the ``rte_dmadev`` library, i.e. ``rte_dmadev_stats_get()``. The statistics
+returned for each device instance are:
+
+* ``submitted``
+* ``completed``
+* ``errors``
diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 7a6afabd27..8781195d59 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -99,6 +99,8 @@ static const struct rte_dmadev_ops idxd_vdev_ops = {
.dev_configure = idxd_configure,
.vchan_setup = idxd_vchan_setup,
.dev_info_get = idxd_info_get,
+   .stats_get = idxd_stats_get,
+   .stats_reset = idxd_stats_reset,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 8eb73fdcc6..66d1b3432e 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -65,6 +65,8 @@ __submit(struct idxd_dmadev *idxd)
if (++idxd->batch_idx_write > idxd->max_batches)
idxd->batch_idx_write = 0;
 
+   idxd->stats.submitted += idxd->batch_size;
+
idxd->batch_start += idxd->batch_size;
idxd->batch_size = 0;
idxd->batch_idx_ring[idxd->batch_idx_write] = idxd->batch_start;
@@ -278,6 +280,8 @@ batch_completed_status(struct idxd_dmadev *idxd, uint16_t 
max_ops, enum rte_dma_
const uint16_t b_len = b_end - b_start;
if (b_len == 1) {/* not a batch */
*status = 
get_comp_status(&idxd->batch_comp_ring[idxd->batch_idx_read]);
+   if (status != RTE_DMA_STATUS_SUCCESSFUL)
+   idxd->stats.errors++;
idxd->ids_avail++;
idxd->ids_returned++;
idxd->batch_idx_read = next_batch;
@@ -299,6 +303,8 @@ batch_completed_status(struct idxd_dmadev *idxd, uint16_t 
max_ops, enum rte_dma_
struct idxd_completion *c = (void *)
&idxd->desc_ring[(b_start + ret) & 
idxd->desc_ring_mask];
status[ret] = (ret < bcount) ? get_comp_status(c) : 
RTE_DMA_STATUS_NOT_ATTEMPTED;
+   if (status[ret] != RTE_DMA_STATUS_SUCCESSFUL)
+   idxd->stats.errors++;
}
idxd->ids_avail = idxd->ids_returned += ret;
 
@@ -357,6 +363,7 @@ idxd_completed(struct rte_dmadev *dev, uint16_t qid 
__rte_unused, uint16_t max_o
ret += batch;
} while (batch > 0 && *has_error == false);
 
+   idxd->stats.completed += ret;
*last_idx = idxd->ids_returned - 1;
return ret;
 }
@@ -374,6 +381,7 @@ idxd_completed_status(struct rte_dmadev *dev, uint16_t qid 
__rte_unused, uint16_
ret += batch;
} while (batch > 0);
 
+   idxd->stats.completed += ret;
*last_idx = idxd->ids_returned - 1;
return ret;
 }
@@ -407,6 +415,25 @@ idxd_dump(const struct rte_dmadev *dev, FILE *f)
return 0;
 }
 
+int
+idxd_stats_get(const struct rte_dmadev *dev, uint16_t vchan __rte_unused,
+   struct rte_dmadev_stats *stats, uint32_t stats_sz)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   if (stats_sz < sizeof(*stats))
+   return -EINVAL;
+   *stats = idxd->stats;
+   return 0;
+}
+
+int
+idxd_stats_reset(struct rte_dmadev *dev, uint16_t vchan __rte_unused)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   idxd->stats = (struct rte_dmadev_stats){0};
+   return 0;
+}
+
 int
 idxd_info_get(const struct rte_dmadev *dev, struct rte_dmadev_info *info, 
uint32_t size)
 {
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index 84d45a09d6..c04ee002d8 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -98,5 +98,8 @@ uint16_t idxd_completed(struct rte_dmadev *dev, uint16_t qid, 
uint16_t max_ops,
 uint16_t idxd_completed_status(struct rte_dmadev *dev, uint16_t qid 
__rte_unused,
uint16_t max_ops, uint16_t *last_idx,
enum rte_dma_status_code *status);
+in

[dpdk-dev] [PATCH v3 13/17] dma/idxd: add vchan status function

2021-09-08 Thread Kevin Laatz
When testing dmadev drivers, it is useful to have the HW device in a known
state. This patch adds the implementation of the function which will wait
for the device to be idle (all jobs completed) before proceeding.

Signed-off-by: Kevin Laatz 

---
v3: update API name to vchan_status
---
 drivers/dma/idxd/idxd_bus.c  |  1 +
 drivers/dma/idxd/idxd_common.c   | 14 ++
 drivers/dma/idxd/idxd_internal.h |  2 ++
 drivers/dma/idxd/idxd_pci.c  |  1 +
 4 files changed, 18 insertions(+)

diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 8781195d59..8f0fcad87a 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -101,6 +101,7 @@ static const struct rte_dmadev_ops idxd_vdev_ops = {
.dev_info_get = idxd_info_get,
.stats_get = idxd_stats_get,
.stats_reset = idxd_stats_reset,
+   .vchan_status = idxd_vchan_status,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index 66d1b3432e..e20b41ae54 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -165,6 +165,20 @@ get_comp_status(struct idxd_completion *c)
}
 }
 
+int
+idxd_vchan_status(const struct rte_dmadev *dev, uint16_t vchan __rte_unused,
+   enum rte_dmadev_vchan_status *status)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   uint16_t last_batch_write = idxd->batch_idx_write == 0 ? 
idxd->max_batches :
+   idxd->batch_idx_write - 1;
+   uint8_t bstatus = (idxd->batch_comp_ring[last_batch_write].status != 0);
+
+   *status = bstatus ? RTE_DMA_VCHAN_IDLE : RTE_DMA_VCHAN_ACTIVE;
+
+   return 0;
+}
+
 static __rte_always_inline int
 batch_ok(struct idxd_dmadev *idxd, uint8_t max_ops, enum rte_dma_status_code 
*status)
 {
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index c04ee002d8..fcc0235a1d 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -101,5 +101,7 @@ uint16_t idxd_completed_status(struct rte_dmadev *dev, 
uint16_t qid __rte_unused
 int idxd_stats_get(const struct rte_dmadev *dev, uint16_t vchan,
struct rte_dmadev_stats *stats, uint32_t stats_sz);
 int idxd_stats_reset(struct rte_dmadev *dev, uint16_t vchan);
+int idxd_vchan_status(const struct rte_dmadev *dev, uint16_t vchan,
+   enum rte_dmadev_vchan_status *status);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index a84232b6e9..f3a5d2a970 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -118,6 +118,7 @@ static const struct rte_dmadev_ops idxd_pci_ops = {
.stats_reset = idxd_stats_reset,
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
+   .vchan_status = idxd_vchan_status,
 };
 
 /* each portal uses 4 x 4k pages */
-- 
2.30.2



[dpdk-dev] [PATCH v3 14/17] dma/idxd: add burst capacity API

2021-09-08 Thread Kevin Laatz
Add support for the burst capacity API. This API will provide the calling
application with the remaining capacity of the current burst (limited by
max HW batch size).

Signed-off-by: Kevin Laatz 
---
 drivers/dma/idxd/idxd_bus.c  |  1 +
 drivers/dma/idxd/idxd_common.c   | 20 
 drivers/dma/idxd/idxd_internal.h |  1 +
 drivers/dma/idxd/idxd_pci.c  |  2 ++
 4 files changed, 24 insertions(+)

diff --git a/drivers/dma/idxd/idxd_bus.c b/drivers/dma/idxd/idxd_bus.c
index 8f0fcad87a..e2bcca1c74 100644
--- a/drivers/dma/idxd/idxd_bus.c
+++ b/drivers/dma/idxd/idxd_bus.c
@@ -102,6 +102,7 @@ static const struct rte_dmadev_ops idxd_vdev_ops = {
.stats_get = idxd_stats_get,
.stats_reset = idxd_stats_reset,
.vchan_status = idxd_vchan_status,
+   .burst_capacity = idxd_burst_capacity,
 };
 
 static void *
diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c
index e20b41ae54..ced9f81772 100644
--- a/drivers/dma/idxd/idxd_common.c
+++ b/drivers/dma/idxd/idxd_common.c
@@ -470,6 +470,26 @@ idxd_info_get(const struct rte_dmadev *dev, struct 
rte_dmadev_info *info, uint32
return 0;
 }
 
+uint16_t
+idxd_burst_capacity(const struct rte_dmadev *dev, uint16_t vchan __rte_unused)
+{
+   struct idxd_dmadev *idxd = dev->dev_private;
+   uint16_t write_idx = idxd->batch_start + idxd->batch_size;
+   uint16_t used_space;
+
+   /* Check for space in the batch ring */
+   if ((idxd->batch_idx_read == 0 && idxd->batch_idx_write == 
idxd->max_batches) ||
+   idxd->batch_idx_write + 1 == idxd->batch_idx_read)
+   return 0;
+
+   /* For descriptors, check for wrap-around on write but not read */
+   if (idxd->ids_returned > write_idx)
+   write_idx += idxd->desc_ring_mask + 1;
+   used_space = write_idx - idxd->ids_returned;
+
+   return RTE_MIN((idxd->desc_ring_mask - used_space), 
idxd->max_batch_size);
+}
+
 int
 idxd_configure(struct rte_dmadev *dev __rte_unused, const struct 
rte_dmadev_conf *dev_conf,
uint32_t conf_sz)
diff --git a/drivers/dma/idxd/idxd_internal.h b/drivers/dma/idxd/idxd_internal.h
index fcc0235a1d..692d27cf72 100644
--- a/drivers/dma/idxd/idxd_internal.h
+++ b/drivers/dma/idxd/idxd_internal.h
@@ -103,5 +103,6 @@ int idxd_stats_get(const struct rte_dmadev *dev, uint16_t 
vchan,
 int idxd_stats_reset(struct rte_dmadev *dev, uint16_t vchan);
 int idxd_vchan_status(const struct rte_dmadev *dev, uint16_t vchan,
enum rte_dmadev_vchan_status *status);
+uint16_t idxd_burst_capacity(const struct rte_dmadev *dev, uint16_t vchan);
 
 #endif /* _IDXD_INTERNAL_H_ */
diff --git a/drivers/dma/idxd/idxd_pci.c b/drivers/dma/idxd/idxd_pci.c
index f3a5d2a970..5da14eb9a2 100644
--- a/drivers/dma/idxd/idxd_pci.c
+++ b/drivers/dma/idxd/idxd_pci.c
@@ -119,6 +119,7 @@ static const struct rte_dmadev_ops idxd_pci_ops = {
.dev_start = idxd_pci_dev_start,
.dev_stop = idxd_pci_dev_stop,
.vchan_status = idxd_vchan_status,
+   .burst_capacity = idxd_burst_capacity,
 };
 
 /* each portal uses 4 x 4k pages */
@@ -232,6 +233,7 @@ init_pci_device(struct rte_pci_device *dev, struct 
idxd_dmadev *idxd,
 
idxd->u.pci = pci;
idxd->max_batches = wq_size;
+   idxd->max_batch_size = 1 << lg2_max_batch;
 
/* enable the device itself */
err_code = idxd_pci_dev_command(idxd, idxd_enable_dev);
-- 
2.30.2



[dpdk-dev] [PATCH v3 15/17] dma/idxd: move dpdk_idxd_cfg.py from raw to dma

2021-09-08 Thread Kevin Laatz
From: Conor Walsh 

Move the example script for configuring IDXD devices bound to the IDXD
kernel driver from raw to dma, and create a symlink to still allow use from
raw.

Signed-off-by: Conor Walsh 
Signed-off-by: Kevin Laatz 
---
 drivers/dma/idxd/dpdk_idxd_cfg.py | 117 +
 drivers/raw/ioat/dpdk_idxd_cfg.py | 118 +-
 2 files changed, 118 insertions(+), 117 deletions(-)
 create mode 100755 drivers/dma/idxd/dpdk_idxd_cfg.py
 mode change 100755 => 12 drivers/raw/ioat/dpdk_idxd_cfg.py

diff --git a/drivers/dma/idxd/dpdk_idxd_cfg.py 
b/drivers/dma/idxd/dpdk_idxd_cfg.py
new file mode 100755
index 00..fcc27822ef
--- /dev/null
+++ b/drivers/dma/idxd/dpdk_idxd_cfg.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2020 Intel Corporation
+
+"""
+Configure an entire Intel DSA instance, using idxd kernel driver, for DPDK use
+"""
+
+import sys
+import argparse
+import os
+import os.path
+
+
+class SysfsDir:
+"Used to read/write paths in a sysfs directory"
+def __init__(self, path):
+self.path = path
+
+def read_int(self, filename):
+"Return a value from sysfs file"
+with open(os.path.join(self.path, filename)) as f:
+return int(f.readline())
+
+def write_values(self, values):
+"write dictionary, where key is filename and value is value to write"
+for filename, contents in values.items():
+with open(os.path.join(self.path, filename), "w") as f:
+f.write(str(contents))
+
+
+def reset_device(dsa_id):
+"Reset the DSA device and all its queues"
+drv_dir = SysfsDir("/sys/bus/dsa/drivers/dsa")
+drv_dir.write_values({"unbind": f"dsa{dsa_id}"})
+
+
+def get_pci_dir(pci):
+"Search for the sysfs directory of the PCI device"
+base_dir = '/sys/bus/pci/devices/'
+for path, dirs, files in os.walk(base_dir):
+for dir in dirs:
+if pci in dir:
+return os.path.join(base_dir, dir)
+sys.exit(f"Could not find sysfs directory for device {pci}")
+
+
+def get_dsa_id(pci):
+"Get the DSA instance ID using the PCI address of the device"
+pci_dir = get_pci_dir(pci)
+for path, dirs, files in os.walk(pci_dir):
+for dir in dirs:
+if dir.startswith('dsa') and 'wq' not in dir:
+return int(dir[3:])
+sys.exit(f"Could not get device ID for device {pci}")
+
+
+def configure_dsa(dsa_id, queues, prefix):
+"Configure the DSA instance with appropriate number of queues"
+dsa_dir = SysfsDir(f"/sys/bus/dsa/devices/dsa{dsa_id}")
+drv_dir = SysfsDir("/sys/bus/dsa/drivers/dsa")
+
+max_groups = dsa_dir.read_int("max_groups")
+max_engines = dsa_dir.read_int("max_engines")
+max_queues = dsa_dir.read_int("max_work_queues")
+max_work_queues_size = dsa_dir.read_int("max_work_queues_size")
+
+nb_queues = min(queues, max_queues)
+if queues > nb_queues:
+print(f"Setting number of queues to max supported value: {max_queues}")
+
+# we want one engine per group, and no more engines than queues
+nb_groups = min(max_engines, max_groups, nb_queues)
+for grp in range(nb_groups):
+dsa_dir.write_values({f"engine{dsa_id}.{grp}/group_id": grp})
+
+# configure each queue
+for q in range(nb_queues):
+wq_dir = SysfsDir(os.path.join(dsa_dir.path, f"wq{dsa_id}.{q}"))
+wq_dir.write_values({"group_id": q % nb_groups,
+ "type": "user",
+ "mode": "dedicated",
+ "name": f"{prefix}_wq{dsa_id}.{q}",
+ "priority": 1,
+ "size": int(max_work_queues_size / nb_queues)})
+
+# enable device and then queues
+drv_dir.write_values({"bind": f"dsa{dsa_id}"})
+for q in range(nb_queues):
+drv_dir.write_values({"bind": f"wq{dsa_id}.{q}"})
+
+
+def main(args):
+"Main function, does arg parsing and calls config function"
+arg_p = argparse.ArgumentParser(
+description="Configure whole DSA device instance for DPDK use")
+arg_p.add_argument('dsa_id',
+   help="Specify DSA instance either via DSA instance 
number or PCI address")
+arg_p.add_argument('-q', metavar='queues', type=int, default=255,
+   help="Number of queues to set up")
+arg_p.add_argument('--name-prefix', metavar='prefix', dest='prefix',
+   default="dpdk",
+   help="Prefix for workqueue name to mark for DPDK use 
[default: 'dpdk']")
+arg_p.add_argument('--reset', action='store_true',
+   help="Reset DSA device and its queues")
+parsed_args = arg_p.parse_args(args[1:])
+
+dsa_id = parsed_args.dsa_id
+dsa_id = get_dsa_id(dsa_id) if ':' in dsa_id else dsa_id
+if parsed_args.reset:
+reset_device(dsa_id)
+else:
+configure

[dpdk-dev] [PATCH v3 16/17] devbind: add dma device class

2021-09-08 Thread Kevin Laatz
Add a new class for DMA devices. Devices listed under the DMA class are to
be used with the dmadev library.

Signed-off-by: Kevin Laatz 
---
 usertools/dpdk-devbind.py | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index 74d16e4c4b..8bb573f4b0 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -69,12 +69,13 @@
 network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
 baseband_devices = [acceleration_class]
 crypto_devices = [encryption_class, intel_processor_class]
+dma_devices = []
 eventdev_devices = [cavium_sso, cavium_tim, intel_dlb, octeontx2_sso]
 mempool_devices = [cavium_fpa, octeontx2_npa]
 compress_devices = [cavium_zip]
 regex_devices = [octeontx2_ree]
-misc_devices = [cnxk_bphy, cnxk_bphy_cgx, intel_ioat_bdw, intel_ioat_skx, 
intel_ioat_icx, intel_idxd_spr,
-intel_ntb_skx, intel_ntb_icx,
+misc_devices = [cnxk_bphy, cnxk_bphy_cgx, intel_ioat_bdw, intel_ioat_skx,
+intel_ioat_icx, intel_idxd_spr, intel_ntb_skx, intel_ntb_icx,
 octeontx2_dma]
 
 # global dict ethernet devices present. Dictionary indexed by PCI address.
@@ -583,6 +584,9 @@ def show_status():
 if status_dev in ["crypto", "all"]:
 show_device_status(crypto_devices, "Crypto")
 
+if status_dev in ["dma", "all"]:
+show_device_status(dma_devices, "DMA")
+
 if status_dev in ["event", "all"]:
 show_device_status(eventdev_devices, "Eventdev")
 
@@ -651,7 +655,7 @@ def parse_args():
 parser.add_argument(
 '--status-dev',
 help="Print the status of given device group.",
-choices=['baseband', 'compress', 'crypto', 'event',
+choices=['baseband', 'compress', 'crypto', 'dma', 'event',
 'mempool', 'misc', 'net', 'regex'])
 bind_group = parser.add_mutually_exclusive_group()
 bind_group.add_argument(
@@ -732,6 +736,7 @@ def do_arg_actions():
 get_device_details(network_devices)
 get_device_details(baseband_devices)
 get_device_details(crypto_devices)
+get_device_details(dma_devices)
 get_device_details(eventdev_devices)
 get_device_details(mempool_devices)
 get_device_details(compress_devices)
@@ -754,6 +759,7 @@ def main():
 get_device_details(network_devices)
 get_device_details(baseband_devices)
 get_device_details(crypto_devices)
+get_device_details(dma_devices)
 get_device_details(eventdev_devices)
 get_device_details(mempool_devices)
 get_device_details(compress_devices)
-- 
2.30.2



[dpdk-dev] [PATCH v3 17/17] devbind: move idxd device ID to dmadev class

2021-09-08 Thread Kevin Laatz
The dmadev library is the preferred abstraction for using IDXD devices and
will replace the rawdev implementation in future. This patch moves the IDXD
device ID to the dmadev class.

Signed-off-by: Kevin Laatz 
---
 usertools/dpdk-devbind.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/usertools/dpdk-devbind.py b/usertools/dpdk-devbind.py
index 8bb573f4b0..98b698ccc0 100755
--- a/usertools/dpdk-devbind.py
+++ b/usertools/dpdk-devbind.py
@@ -69,13 +69,13 @@
 network_devices = [network_class, cavium_pkx, avp_vnic, ifpga_class]
 baseband_devices = [acceleration_class]
 crypto_devices = [encryption_class, intel_processor_class]
-dma_devices = []
+dma_devices = [intel_idxd_spr]
 eventdev_devices = [cavium_sso, cavium_tim, intel_dlb, octeontx2_sso]
 mempool_devices = [cavium_fpa, octeontx2_npa]
 compress_devices = [cavium_zip]
 regex_devices = [octeontx2_ree]
 misc_devices = [cnxk_bphy, cnxk_bphy_cgx, intel_ioat_bdw, intel_ioat_skx,
-intel_ioat_icx, intel_idxd_spr, intel_ntb_skx, intel_ntb_icx,
+intel_ioat_icx, intel_ntb_skx, intel_ntb_icx,
 octeontx2_dma]
 
 # global dict ethernet devices present. Dictionary indexed by PCI address.
-- 
2.30.2



Re: [dpdk-dev] [EXT] [PATCH 2/7] examples/ipsec-secgw: add support for NAT-T

2021-09-08 Thread Akhil Goyal
Hi Radu,

> Add support to the sample application to support IPsec NAT-T for both
> transport and tunnel modes, for both IPv4 and IPv6.
> 
> Signed-off-by: Declan Doherty 
> Signed-off-by: Radu Nicolau 
> ---

Udp-encapsulation is already supported in the app with the option
--udp-encap in sa configuration and it is enabled for INLINE PROTO and
LOOKASIDE PROTO for IPv4/IPv6. I believe the same can be used for inline crypto 
case
As well.
Did you try using it? Is there some specific reason for not using it? I believe
We can enhance that option if something is missing in that.

Regards,
Akhil


[dpdk-dev] [PATCH v3 00/11] dma: add dmadev driver for ioat devices

2021-09-08 Thread Conor Walsh
This patchset adds a dmadev driver and associated documentation to support
Intel QuickData Technology devices, part of the Intel I/O Acceleration
Technology (Intel I/OAT). This driver is intended to ultimately replace
the current IOAT part of the IOAT rawdev driver.
This patchset passes all the driver tests added in the dmadev test suite.

NOTE: This patchset has several dependencies:
- v21 of the dmadev set [1]
- v3 of the dmadev test suite [2]
- v3 of the IDXD driver [3]

[1] http://patches.dpdk.org/project/dpdk/list/?series=18738
[2] http://patches.dpdk.org/project/dpdk/list/?series=18744
[3] http://patches.dpdk.org/project/dpdk/list/?series=18762

---

v3:
 - Added burst capacity function.
 - Stop function now waits for suspend rather than just using a sleep.
 - Changed from vchan idle to vchan status function.
 - Other minor changes to update from dmadev v19 to v21.

v2:
 - Rebased on the above patchsets.
 - Added support for the vchan idle function.
 - Stop function now suspends IOAT channel to allow for reconfig.
 - dmadev_autotest can now be run multiple times using the IOAT driver
   without errors.
 - Added devbind updates for DMA devices
 - Removed some logically dead code found by coverity in the
   create function.

Conor Walsh (11):
  dma/ioat: add device probe and removal functions
  dma/ioat: create dmadev instances on PCI probe
  dma/ioat: add datapath structures
  dma/ioat: add configuration functions
  dma/ioat: add start and stop functions
  dma/ioat: add data path job submission functions
  dma/ioat: add data path completion functions
  dma/ioat: add statistics
  dma/ioat: add support for vchan status function
  dma/ioat: add burst capacity function
  devbind: move ioat device ID for ICX to dmadev category

 MAINTAINERS|   6 +
 doc/guides/dmadevs/index.rst   |   1 +
 doc/guides/dmadevs/ioat.rst| 214 
 doc/guides/rel_notes/release_21_11.rst |   7 +-
 drivers/dma/ioat/ioat_dmadev.c | 728 +
 drivers/dma/ioat/ioat_hw_defs.h| 294 ++
 drivers/dma/ioat/ioat_internal.h   |  44 ++
 drivers/dma/ioat/meson.build   |   7 +
 drivers/dma/ioat/version.map   |   3 +
 drivers/dma/meson.build|   1 +
 usertools/dpdk-devbind.py  |   5 +-
 11 files changed, 1304 insertions(+), 6 deletions(-)
 create mode 100644 doc/guides/dmadevs/ioat.rst
 create mode 100644 drivers/dma/ioat/ioat_dmadev.c
 create mode 100644 drivers/dma/ioat/ioat_hw_defs.h
 create mode 100644 drivers/dma/ioat/ioat_internal.h
 create mode 100644 drivers/dma/ioat/meson.build
 create mode 100644 drivers/dma/ioat/version.map

-- 
2.25.1



[dpdk-dev] [PATCH v3 01/11] dma/ioat: add device probe and removal functions

2021-09-08 Thread Conor Walsh
Add the basic device probe/remove skeleton code and initial documentation
for new IOAT DMA driver. Maintainers update is also included in this
patch.

Signed-off-by: Conor Walsh 
Reviewed-by: Kevin Laatz 
---
 MAINTAINERS|  6 +++
 doc/guides/dmadevs/index.rst   |  1 +
 doc/guides/dmadevs/ioat.rst| 64 
 doc/guides/rel_notes/release_21_11.rst |  7 +--
 drivers/dma/ioat/ioat_dmadev.c | 69 ++
 drivers/dma/ioat/ioat_hw_defs.h| 35 +
 drivers/dma/ioat/ioat_internal.h   | 20 
 drivers/dma/ioat/meson.build   |  7 +++
 drivers/dma/ioat/version.map   |  3 ++
 drivers/dma/meson.build|  1 +
 10 files changed, 210 insertions(+), 3 deletions(-)
 create mode 100644 doc/guides/dmadevs/ioat.rst
 create mode 100644 drivers/dma/ioat/ioat_dmadev.c
 create mode 100644 drivers/dma/ioat/ioat_hw_defs.h
 create mode 100644 drivers/dma/ioat/ioat_internal.h
 create mode 100644 drivers/dma/ioat/meson.build
 create mode 100644 drivers/dma/ioat/version.map

diff --git a/MAINTAINERS b/MAINTAINERS
index b4c614a229..00b319c811 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1208,6 +1208,12 @@ M: Kevin Laatz 
 F: drivers/dma/idxd/
 F: doc/guides/dmadevs/idxd.rst
 
+Intel IOAT - EXPERIMENTAL
+M: Bruce Richardson 
+M: Conor Walsh 
+F: drivers/dma/ioat/
+F: doc/guides/dmadevs/ioat.rst
+
 
 RegEx Drivers
 -
diff --git a/doc/guides/dmadevs/index.rst b/doc/guides/dmadevs/index.rst
index b30004fd65..0b01493c5a 100644
--- a/doc/guides/dmadevs/index.rst
+++ b/doc/guides/dmadevs/index.rst
@@ -12,3 +12,4 @@ application through DMAdev API.
 :numbered:
 
 idxd
+ioat
diff --git a/doc/guides/dmadevs/ioat.rst b/doc/guides/dmadevs/ioat.rst
new file mode 100644
index 00..45a2e65d70
--- /dev/null
+++ b/doc/guides/dmadevs/ioat.rst
@@ -0,0 +1,64 @@
+..  SPDX-License-Identifier: BSD-3-Clause
+Copyright(c) 2021 Intel Corporation.
+
+.. include:: 
+
+IOAT DMA Device Driver
+===
+
+The ``ioat`` dmadev driver provides a poll-mode driver (PMD) for Intel\
+|reg| QuickData Technology which is part of part of Intel\ |reg| I/O
+Acceleration Technology (`Intel I/OAT
+`_).
+This PMD, when used on supported hardware, allows data copies, for example,
+cloning packet data, to be accelerated by IOAT hardware rather than having to
+be done by software, freeing up CPU cycles for other tasks.
+
+Hardware Requirements
+--
+
+The ``dpdk-devbind.py`` script, included with DPDK, can be used to show the
+presence of supported hardware. Running ``dpdk-devbind.py --status-dev dma``
+will show all the DMA devices on the system, IOAT devices are included in this
+list. For Intel\ |reg| IOAT devices, the hardware will often be listed as
+"Crystal Beach DMA", or "CBDMA" or on some newer systems '0b00' due to the
+absence of pci-id database entries for them at this point.
+
+Compilation
+
+
+For builds using ``meson`` and ``ninja``, the driver will be built when the
+target platform is x86-based. No additional compilation steps are necessary.
+
+Device Setup
+-
+
+Intel\ |reg| IOAT devices will need to be bound to a suitable DPDK-supported
+user-space IO driver such as ``vfio-pci`` in order to be used by DPDK.
+
+The ``dpdk-devbind.py`` script can be used to view the state of the devices 
using::
+
+   $ dpdk-devbind.py --status-dev dma
+
+The ``dpdk-devbind.py`` script can also be used to bind devices to a suitable 
driver.
+For example::
+
+   $ dpdk-devbind.py -b vfio-pci 00:01.0 00:01.1
+
+Device Probing and Initialization
+~~
+
+For devices bound to a suitable DPDK-supported driver (``vfio-pci``), the HW
+devices will be found as part of the device scan done at application
+initialization time without the need to pass parameters to the application.
+
+If the application does not require all the devices available an allowlist can
+be used in the same way that other DPDK devices use them.
+
+For example::
+
+   $ dpdk-test -a 
+
+Once probed successfully, the device will appear as a ``dmadev``, that is a
+"DMA device type" inside DPDK, and can be accessed using APIs from the
+``rte_dmadev`` library.
diff --git a/doc/guides/rel_notes/release_21_11.rst 
b/doc/guides/rel_notes/release_21_11.rst
index 8526646b13..fcc9cbd841 100644
--- a/doc/guides/rel_notes/release_21_11.rst
+++ b/doc/guides/rel_notes/release_21_11.rst
@@ -67,10 +67,11 @@ New Features
   The dmadev library provides a DMA device framework for management and
   provision of hardware and software DMA devices.
 
-* **Added IDXD dmadev driver implementation.**
+* **Added Intel dmadev driver implementations.**
 
-  The IDXD dmadev driver provide device drivers for the Intel DSA devices.
-  This device driver can be used through the generic dmadev API.

[dpdk-dev] [PATCH v3 02/11] dma/ioat: create dmadev instances on PCI probe

2021-09-08 Thread Conor Walsh
When a suitable device is found during the PCI probe, create a dmadev
instance for each channel. Internal structures and HW definitions required
for device creation are also included.

Signed-off-by: Conor Walsh 
Reviewed-by: Kevin Laatz 
---
 drivers/dma/ioat/ioat_dmadev.c   | 108 ++-
 drivers/dma/ioat/ioat_hw_defs.h  |  45 +
 drivers/dma/ioat/ioat_internal.h |  24 +++
 3 files changed, 175 insertions(+), 2 deletions(-)

diff --git a/drivers/dma/ioat/ioat_dmadev.c b/drivers/dma/ioat/ioat_dmadev.c
index f3491d45b1..aa03dd1cd2 100644
--- a/drivers/dma/ioat/ioat_dmadev.c
+++ b/drivers/dma/ioat/ioat_dmadev.c
@@ -4,6 +4,7 @@
 
 #include 
 #include 
+#include 
 
 #include "ioat_internal.h"
 
@@ -14,6 +15,109 @@ RTE_LOG_REGISTER_DEFAULT(ioat_pmd_logtype, INFO);
 #define IOAT_PMD_NAME dmadev_ioat
 #define IOAT_PMD_NAME_STR RTE_STR(IOAT_PMD_NAME)
 
+/* Create a DMA device. */
+static int
+ioat_dmadev_create(const char *name, struct rte_pci_device *dev)
+{
+   static const struct rte_dmadev_ops ioat_dmadev_ops = { };
+
+   struct rte_dmadev *dmadev = NULL;
+   struct ioat_dmadev *ioat = NULL;
+   int retry = 0;
+
+   if (!name) {
+   IOAT_PMD_ERR("Invalid name of the device!");
+   return -EINVAL;
+   }
+
+   /* Allocate device structure. */
+   dmadev = rte_dmadev_pmd_allocate(name);
+   if (dmadev == NULL) {
+   IOAT_PMD_ERR("Unable to allocate dma device");
+   return -ENOMEM;
+   }
+
+   dmadev->device = &dev->device;
+
+   dmadev->data->dev_private = rte_malloc_socket(NULL, sizeof(*ioat),
+   0, dmadev->device->numa_node);
+   dmadev->dev_private = dmadev->data->dev_private;
+
+   dmadev->dev_ops = &ioat_dmadev_ops;
+
+   ioat = dmadev->data->dev_private;
+   ioat->dmadev = dmadev;
+   ioat->regs = dev->mem_resource[0].addr;
+   ioat->doorbell = &ioat->regs->dmacount;
+   ioat->qcfg.nb_desc = 0;
+   ioat->desc_ring = NULL;
+
+   /* Do device initialization - reset and set error behaviour. */
+   if (ioat->regs->chancnt != 1)
+   IOAT_PMD_WARN("%s: Channel count == %d\n", __func__,
+   ioat->regs->chancnt);
+
+   /* Locked by someone else. */
+   if (ioat->regs->chanctrl & IOAT_CHANCTRL_CHANNEL_IN_USE) {
+   IOAT_PMD_WARN("%s: Channel appears locked\n", __func__);
+   ioat->regs->chanctrl = 0;
+   }
+
+   /* clear any previous errors */
+   if (ioat->regs->chanerr != 0) {
+   uint32_t val = ioat->regs->chanerr;
+   ioat->regs->chanerr = val;
+   }
+
+   ioat->regs->chancmd = IOAT_CHANCMD_SUSPEND;
+   rte_delay_ms(1);
+   ioat->regs->chancmd = IOAT_CHANCMD_RESET;
+   rte_delay_ms(1);
+   while (ioat->regs->chancmd & IOAT_CHANCMD_RESET) {
+   ioat->regs->chainaddr = 0;
+   rte_delay_ms(1);
+   if (++retry >= 200) {
+   IOAT_PMD_ERR("%s: cannot reset device. CHANCMD=%#"PRIx8
+   ", CHANSTS=%#"PRIx64", 
CHANERR=%#"PRIx32"\n",
+   __func__,
+   ioat->regs->chancmd,
+   ioat->regs->chansts,
+   ioat->regs->chanerr);
+   return -EIO;
+   }
+   }
+   ioat->regs->chanctrl = IOAT_CHANCTRL_ANY_ERR_ABORT_EN |
+   IOAT_CHANCTRL_ERR_COMPLETION_EN;
+
+   return 0;
+
+}
+
+/* Destroy a DMA device. */
+static int
+ioat_dmadev_destroy(const char *name)
+{
+   struct rte_dmadev *dev;
+   int ret;
+
+   if (!name) {
+   IOAT_PMD_ERR("Invalid device name");
+   return -EINVAL;
+   }
+
+   dev = rte_dmadev_get_device_by_name(name);
+   if (dev == NULL) {
+   IOAT_PMD_ERR("Invalid device name (%s)", name);
+   return -EINVAL;
+   }
+
+   ret = rte_dmadev_pmd_release(dev);
+   if (ret)
+   IOAT_PMD_DEBUG("Device cleanup failed");
+
+   return 0;
+}
+
 /* Probe DMA device. */
 static int
 ioat_dmadev_probe(struct rte_pci_driver *drv, struct rte_pci_device *dev)
@@ -24,7 +128,7 @@ ioat_dmadev_probe(struct rte_pci_driver *drv, struct 
rte_pci_device *dev)
IOAT_PMD_INFO("Init %s on NUMA node %d", name, dev->device.numa_node);
 
dev->device.driver = &drv->driver;
-   return 0;
+   return ioat_dmadev_create(name, dev);
 }
 
 /* Remove DMA device. */
@@ -38,7 +142,7 @@ ioat_dmadev_remove(struct rte_pci_device *dev)
IOAT_PMD_INFO("Closing %s on NUMA node %d",
name, dev->device.numa_node);
 
-   return 0;
+   return ioat_dmadev_destroy(name);
 }
 
 static const struct rte_pci_id pci_id_ioat_map[] = {
diff --git a/drivers/dma/ioat/ioat_hw_defs.h b/drivers/dma/ioat

  1   2   3   >