[PATCH v3 0/3] SM2 crypto algorithm support

2023-06-04 Thread Gowrishankar Muthukrishnan
This patch series adds SM2 crypto algorithm support, along with tests
verified using Openssl.

v3:
 - sm2 xform contains hash param and key pair info moved into op.

Gowrishankar Muthukrishnan (3):
  cryptodev: add SM2 asymmetric crypto algorithm
  test/crypto: add asymmetric SM2 test cases
  crypto/openssl: add SM2 asymmetric crypto support

 app/test/test_cryptodev_asym.c   | 581 +++
 app/test/test_cryptodev_sm2_test_vectors.h   | 129 
 doc/guides/cryptodevs/features/default.ini   |   1 +
 doc/guides/cryptodevs/features/openssl.ini   |   1 +
 doc/guides/cryptodevs/openssl.rst|   1 +
 doc/guides/rel_notes/release_23_07.rst   |   9 +
 drivers/crypto/openssl/openssl_pmd_private.h |   6 +
 drivers/crypto/openssl/rte_openssl_pmd.c | 297 ++
 drivers/crypto/openssl/rte_openssl_pmd_ops.c |  48 ++
 lib/cryptodev/rte_crypto_asym.h  |  87 +++
 lib/cryptodev/rte_cryptodev.c|   1 +
 11 files changed, 1161 insertions(+)
 create mode 100644 app/test/test_cryptodev_sm2_test_vectors.h

-- 
2.25.1



[PATCH v3 1/3] cryptodev: add SM2 asymmetric crypto algorithm

2023-06-04 Thread Gowrishankar Muthukrishnan
ShangMi 2 (SM2) is set of public-key cryptography algorithms
based on elliptic curves.

Added support for asymmetric SM2 in cryptodev along with prime
field curve, as referenced in RFC:
https://datatracker.ietf.org/doc/html/draft-shen-sm2-ecdsa-02

Signed-off-by: Gowrishankar Muthukrishnan 
---
 doc/guides/cryptodevs/features/default.ini |  1 +
 doc/guides/rel_notes/release_23_07.rst |  5 ++
 lib/cryptodev/rte_crypto_asym.h| 87 ++
 lib/cryptodev/rte_cryptodev.c  |  1 +
 4 files changed, 94 insertions(+)

diff --git a/doc/guides/cryptodevs/features/default.ini 
b/doc/guides/cryptodevs/features/default.ini
index 523da0cfa8..a69967bb9e 100644
--- a/doc/guides/cryptodevs/features/default.ini
+++ b/doc/guides/cryptodevs/features/default.ini
@@ -125,6 +125,7 @@ Diffie-hellman  =
 ECDSA   =
 ECPM=
 ECDH=
+SM2 =
 
 ;
 ; Supported Operating systems of a default crypto driver.
diff --git a/doc/guides/rel_notes/release_23_07.rst 
b/doc/guides/rel_notes/release_23_07.rst
index a9b1293689..8b8e69d619 100644
--- a/doc/guides/rel_notes/release_23_07.rst
+++ b/doc/guides/rel_notes/release_23_07.rst
@@ -55,6 +55,11 @@ New Features
  Also, make sure to start the actual text at the margin.
  ===
 
+* **Added SM2 asymmetric algorithm in cryptodev.**
+
+  Added support for ShamMi 2 (SM2) asymmetric crypto algorithm
+  along with prime field curve support.
+
 
 Removed Items
 -
diff --git a/lib/cryptodev/rte_crypto_asym.h b/lib/cryptodev/rte_crypto_asym.h
index 989f38323f..ab0b4abea7 100644
--- a/lib/cryptodev/rte_crypto_asym.h
+++ b/lib/cryptodev/rte_crypto_asym.h
@@ -119,6 +119,11 @@ enum rte_crypto_asym_xform_type {
/**< Elliptic Curve Point Multiplication */
RTE_CRYPTO_ASYM_XFORM_ECFPM,
/**< Elliptic Curve Fixed Point Multiplication */
+   RTE_CRYPTO_ASYM_XFORM_SM2,
+   /**< ShangMi 2
+* Performs Encrypt, Decrypt, Sign and Verify.
+* Refer to rte_crypto_asym_op_type.
+*/
RTE_CRYPTO_ASYM_XFORM_TYPE_LIST_END
/**< End of list */
 };
@@ -382,6 +387,17 @@ struct rte_crypto_ec_xform {
/**< Pre-defined ec groups */
 };
 
+/**
+ * Asymmetric SM2 transform data
+ *
+ * Structure describing SM2 xform params
+ *
+ */
+struct rte_crypto_sm2_xform {
+   enum rte_crypto_auth_algorithm hash;
+   /**< Hash algorithm used in SM2 op. */
+};
+
 /**
  * Operations params for modular operations:
  * exponentiation and multiplicative inverse
@@ -637,9 +653,79 @@ struct rte_crypto_asym_xform {
/**< EC xform parameters, used by elliptic curve based
 * operations.
 */
+
+   struct rte_crypto_sm2_xform sm2;
+   /**< SM2 xform parameters */
};
 };
 
+/**
+ * SM2 operation params
+ */
+struct rte_crypto_sm2_op_param {
+   enum rte_crypto_asym_op_type op_type;
+   /**< Signature generation or verification */
+
+   rte_crypto_uint pkey;
+   /**< Private key for encryption or sign generation */
+
+   struct rte_crypto_ec_point q;
+   /**< Public key for decryption or verification */
+
+   rte_crypto_param message;
+   /**<
+* Pointer to input data
+* - to be encrypted for SM2 public encrypt.
+* - to be signed for SM2 sign generation.
+* - to be authenticated for SM2 sign verification.
+*
+* Pointer to output data
+* - for SM2 private decrypt.
+* In this case the underlying array should have been
+* allocated with enough memory to hold plaintext output
+* (at least encrypted text length). The message.length field
+* will be overwritten by the PMD with the decrypted length.
+*/
+
+   rte_crypto_param cipher;
+   /**<
+* Pointer to input data
+* - to be decrypted for SM2 private decrypt.
+*
+* Pointer to output data
+* - for SM2 public encrypt.
+* In this case the underlying array should have been allocated
+* with enough memory to hold ciphertext output (at least X bytes
+* for prime field curve of N bytes and for message M bytes,
+* where X = (C1 + C2 + C3) and computed based on SM2 RFC as
+* C1 (1 + N + N), C2 = M, C3 = N. The cipher.length field will
+* be overwritten by the PMD with the encrypted length.
+*/
+
+   rte_crypto_uint id;
+   /**< The SM2 id used by signer and verifier and is in interval (1, 
n-1). */
+
+   rte_crypto_uint k;
+   /**< The SM2 per-message secret number, which is an integer
+* in the interval (1, n-1).
+* If the random number is generated by the PMD,
+* the 'rte_crypto_param.data' parameter should be set to NULL.
+*/
+
+   rte_crypto_uint r;
+   /**< r component of elliptic curv

[PATCH v3 2/3] test/crypto: add asymmetric SM2 test cases

2023-06-04 Thread Gowrishankar Muthukrishnan
Added test cases for asymmetric SM2 crypto validation.
Test cases are added for sign/verify/encrypt/decrypt.

Signed-off-by: Gowrishankar Muthukrishnan 
---
 app/test/test_cryptodev_asym.c | 581 +
 app/test/test_cryptodev_sm2_test_vectors.h | 129 +
 2 files changed, 710 insertions(+)
 create mode 100644 app/test/test_cryptodev_sm2_test_vectors.h

diff --git a/app/test/test_cryptodev_asym.c b/app/test/test_cryptodev_asym.c
index 9236817650..baa8f89325 100644
--- a/app/test/test_cryptodev_asym.c
+++ b/app/test/test_cryptodev_asym.c
@@ -21,6 +21,7 @@
 #include "test_cryptodev_ecpm_test_vectors.h"
 #include "test_cryptodev_mod_test_vectors.h"
 #include "test_cryptodev_rsa_test_vectors.h"
+#include "test_cryptodev_sm2_test_vectors.h"
 #include "test_cryptodev_asym_util.h"
 #include "test.h"
 
@@ -2196,6 +2197,582 @@ test_ecpm_all_curve(void)
return overall_status;
 }
 
+static int
+_test_sm2_sign(bool rnd_secret)
+{
+   struct crypto_testsuite_params_asym *ts_params = &testsuite_params;
+   struct crypto_testsuite_sm2_params input_params = sm2_param_fp256;
+   struct rte_mempool *sess_mpool = ts_params->session_mpool;
+   struct rte_mempool *op_mpool = ts_params->op_mpool;
+   uint8_t dev_id = ts_params->valid_devs[0];
+   struct rte_crypto_op *result_op = NULL;
+   uint8_t output_buf_r[TEST_DATA_SIZE];
+   uint8_t output_buf_s[TEST_DATA_SIZE];
+   struct rte_crypto_asym_xform xform;
+   struct rte_crypto_asym_op *asym_op;
+   struct rte_crypto_op *op = NULL;
+   int ret, status = TEST_SUCCESS;
+   void *sess = NULL;
+
+   /* Setup crypto op data structure */
+   op = rte_crypto_op_alloc(op_mpool, RTE_CRYPTO_OP_TYPE_ASYMMETRIC);
+   if (op == NULL) {
+   RTE_LOG(ERR, USER1,
+   "line %u FAILED: %s", __LINE__,
+   "Failed to allocate asymmetric crypto "
+   "operation struct\n");
+   status = TEST_FAILED;
+   goto exit;
+   }
+
+   asym_op = op->asym;
+
+   /* Setup asym xform */
+   xform.next = NULL;
+   xform.xform_type = RTE_CRYPTO_ASYM_XFORM_SM2;
+   xform.sm2.hash = RTE_CRYPTO_AUTH_SM3;
+
+   ret = rte_cryptodev_asym_session_create(dev_id, &xform, sess_mpool, 
&sess);
+   if (ret < 0) {
+   RTE_LOG(ERR, USER1,
+   "line %u FAILED: %s", __LINE__,
+   "Session creation failed\n");
+   status = (ret == -ENOTSUP) ? TEST_SKIPPED : TEST_FAILED;
+   goto exit;
+   }
+
+   /* Attach asymmetric crypto session to crypto operations */
+   rte_crypto_op_attach_asym_session(op, sess);
+
+   /* Compute sign */
+
+   /* Populate op with operational details */
+   asym_op->sm2.op_type = RTE_CRYPTO_ASYM_OP_SIGN;
+   asym_op->sm2.message.data = input_params.message.data;
+   asym_op->sm2.message.length = input_params.message.length;
+   asym_op->sm2.pkey.data = input_params.pkey.data;
+   asym_op->sm2.pkey.length = input_params.pkey.length;
+   asym_op->sm2.q.x.data = input_params.pubkey_qx.data;
+   asym_op->sm2.q.x.length = input_params.pubkey_qx.length;
+   asym_op->sm2.q.y.data = input_params.pubkey_qy.data;
+   asym_op->sm2.q.y.length = input_params.pubkey_qy.length;
+   asym_op->sm2.id.data = input_params.id.data;
+   asym_op->sm2.id.length = input_params.id.length;
+   if (rnd_secret) {
+   asym_op->sm2.k.data = NULL;
+   asym_op->sm2.k.length = 0;
+   } else {
+   asym_op->sm2.k.data = input_params.k.data;
+   asym_op->sm2.k.length = input_params.k.length;
+   }
+
+   /* Init out buf */
+   asym_op->sm2.r.data = output_buf_r;
+   asym_op->sm2.s.data = output_buf_s;
+
+   RTE_LOG(DEBUG, USER1, "Process ASYM operation\n");
+
+   /* Process crypto operation */
+   if (rte_cryptodev_enqueue_burst(dev_id, 0, &op, 1) != 1) {
+   RTE_LOG(ERR, USER1,
+   "line %u FAILED: %s", __LINE__,
+   "Error sending packet for operation\n");
+   status = TEST_FAILED;
+   goto exit;
+   }
+
+   while (rte_cryptodev_dequeue_burst(dev_id, 0, &result_op, 1) == 0)
+   rte_pause();
+
+   if (result_op == NULL) {
+   RTE_LOG(ERR, USER1,
+   "line %u FAILED: %s", __LINE__,
+   "Failed to process asym crypto op\n");
+   status = TEST_FAILED;
+   goto exit;
+   }
+
+   if (result_op->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+   RTE_LOG(ERR, USER1,
+   "line %u FAILED: %s", __LINE__,
+   "Failed to process asym crypto op\n");
+   status = TEST_FAILED;
+

[PATCH v3 3/3] crypto/openssl: add SM2 asymmetric crypto support

2023-06-04 Thread Gowrishankar Muthukrishnan
Add SM2 asymmetric algorithm support in openssl PMD.

Signed-off-by: Gowrishankar Muthukrishnan 
---
 doc/guides/cryptodevs/features/openssl.ini   |   1 +
 doc/guides/cryptodevs/openssl.rst|   1 +
 doc/guides/rel_notes/release_23_07.rst   |   4 +
 drivers/crypto/openssl/openssl_pmd_private.h |   6 +
 drivers/crypto/openssl/rte_openssl_pmd.c | 297 +++
 drivers/crypto/openssl/rte_openssl_pmd_ops.c |  48 +++
 6 files changed, 357 insertions(+)

diff --git a/doc/guides/cryptodevs/features/openssl.ini 
b/doc/guides/cryptodevs/features/openssl.ini
index 4b0f9b162e..b64c8ec4a5 100644
--- a/doc/guides/cryptodevs/features/openssl.ini
+++ b/doc/guides/cryptodevs/features/openssl.ini
@@ -65,6 +65,7 @@ DSA = Y
 Modular Exponentiation = Y
 Modular Inversion = Y
 Diffie-hellman = Y
+SM2 = Y
 
 ;
 ; Supported Operating systems of the 'openssl' crypto driver.
diff --git a/doc/guides/cryptodevs/openssl.rst 
b/doc/guides/cryptodevs/openssl.rst
index 03041ceda1..ff21d21b23 100644
--- a/doc/guides/cryptodevs/openssl.rst
+++ b/doc/guides/cryptodevs/openssl.rst
@@ -53,6 +53,7 @@ Supported Asymmetric Crypto algorithms:
 * ``RTE_CRYPTO_ASYM_XFORM_DH``
 * ``RTE_CRYPTO_ASYM_XFORM_MODINV``
 * ``RTE_CRYPTO_ASYM_XFORM_MODEX``
+* ``RTE_CRYPTO_ASYM_XFORM_SM2``
 
 
 Installation
diff --git a/doc/guides/rel_notes/release_23_07.rst 
b/doc/guides/rel_notes/release_23_07.rst
index 8b8e69d619..aeebcffb60 100644
--- a/doc/guides/rel_notes/release_23_07.rst
+++ b/doc/guides/rel_notes/release_23_07.rst
@@ -61,6 +61,10 @@ New Features
   along with prime field curve support.
 
 
+* **Updated OpenSSL crypto driver for SM2 support.**
+
+  Added SM2 algorithm support in asymmetric crypto operations.
+
 Removed Items
 -
 
diff --git a/drivers/crypto/openssl/openssl_pmd_private.h 
b/drivers/crypto/openssl/openssl_pmd_private.h
index ed6841e460..1edb669dfd 100644
--- a/drivers/crypto/openssl/openssl_pmd_private.h
+++ b/drivers/crypto/openssl/openssl_pmd_private.h
@@ -12,6 +12,7 @@
 #include 
 #include 
 #include 
+#include 
 #if (OPENSSL_VERSION_NUMBER >= 0x3000L)
 #include 
 #include 
@@ -200,6 +201,11 @@ struct openssl_asym_session {
OSSL_PARAM_BLD * param_bld;
 #endif
} s;
+   struct {
+#if (OPENSSL_VERSION_NUMBER >= 0x3000L)
+   OSSL_PARAM * params;
+#endif
+   } sm2;
} u;
 } __rte_cache_aligned;
 /** Set and validate OPENSSL crypto session parameters */
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c 
b/drivers/crypto/openssl/rte_openssl_pmd.c
index 384d262621..9442d39907 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -13,6 +13,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "openssl_pmd_private.h"
 #include "compat.h"
@@ -2662,6 +2663,286 @@ process_openssl_rsa_op_evp(struct rte_crypto_op *cop,
return ret;
 
 }
+
+static int
+process_openssl_sm2_op_evp(struct rte_crypto_op *cop,
+   struct openssl_asym_session *sess)
+{
+   EVP_PKEY_CTX *kctx = NULL, *sctx = NULL, *cctx = NULL;
+   struct rte_crypto_asym_op *op = cop->asym;
+   OSSL_PARAM_BLD *param_bld = NULL;
+   OSSL_PARAM *params = NULL;
+   EVP_PKEY *pkey = NULL;
+   BIGNUM *pkey_bn = NULL;
+   uint8_t pubkey[64];
+   size_t len = 0;
+   int ret = -1;
+
+   cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+   if (cop->asym->sm2.k.data != NULL)
+   goto err_sm2;
+
+   param_bld = OSSL_PARAM_BLD_new();
+   if (!param_bld) {
+   OPENSSL_LOG(ERR, "failed to allocate params\n");
+   goto err_sm2;
+   }
+
+   ret = OSSL_PARAM_BLD_push_utf8_string(param_bld,
+   OSSL_PKEY_PARAM_GROUP_NAME, "SM2", 0);
+   if (!ret) {
+   OPENSSL_LOG(ERR, "failed to push params\n");
+   goto err_sm2;
+   }
+
+   pkey_bn = BN_bin2bn((const unsigned char *)op->sm2.pkey.data,
+   op->sm2.pkey.length, pkey_bn);
+
+   memset(pubkey, 0, RTE_DIM(pubkey));
+   pubkey[0] = 0x04;
+   len += 1;
+   memcpy(&pubkey[len], op->sm2.q.x.data, op->sm2.q.x.length);
+   len += op->sm2.q.x.length;
+   memcpy(&pubkey[len], op->sm2.q.y.data, op->sm2.q.y.length);
+   len += op->sm2.q.y.length;
+
+   ret = OSSL_PARAM_BLD_push_BN(param_bld, OSSL_PKEY_PARAM_PRIV_KEY,
+pkey_bn);
+   if (!ret) {
+   OPENSSL_LOG(ERR, "failed to push params\n");
+   goto err_sm2;
+   }
+
+   ret = OSSL_PARAM_BLD_push_octet_string(param_bld,
+   OSSL_PKEY_PARAM_PUB_KEY, pubkey, len);
+   if (!ret) {
+   OPENSSL_LOG(ERR, "failed to push params\n");
+   goto err_sm2;
+   }
+
+   params = OSSL_PARAM_BLD_to_param(param_bld);
+   if (!params) {
+   OP

RE: [PATCH v2 1/3] cryptodev: add SM2 asymmetric crypto algorithm

2023-06-04 Thread Gowrishankar Muthukrishnan
Hi,
I have simplified sm2 xform to only have hash algo and rest in crypto op. 
Please review v3 and let me know if changes are good.
Also, I need to test plain scalar k value but I don't have support in openSSL 
to do that. So, I will meanwhile get back once I am
done with that part as well, but v3 is good to go for openSSL pmd.

Thanks,
Gowrishankar


[PATCH v3 00/34] net/sfc: support HW conntrack assistance

2023-06-04 Thread Ivan Malov
On EF100 hardware, match-action engine (MAE) can be equipped
with an assistance table for connection tracking (CT). In it,
an entry key is a set of exact match fields: an EtherType, a
pair of IP addresses, a L4 protocol ID and a pair of L4 port
numbers. An entry response can provide matching packets with
a mark value and additional data to be plumbed to NAT action.
In addition, an update to mark-and-sweep counter can be done.

This table was designed with larger capacity in mind,
so moving the above match criteria out of an action
rule (AR) specification to a CT entry increases the
likelihood of reusing AR entries and improves the
total flow engine capacity. Make use of that.

Changes in v2:
* Found and fixed my own mistake in [15/34]
* Fixed [16/34] as per 0-day robot report
* Added missing release notes to [27/34]
* Added missing release notes to [30/34]

Changes in v3:
* Fixed release notes in [27/34]
* Rebased on dpdk to fix CI

Denis Pryazhennikov (11):
  common/sfc_efx/base: update MCDI headers
  common/sfc_efx/base: detect MCDI Table Access API support
  common/sfc_efx/base: add API to list HW tables
  common/sfc_efx/base: add macro to get indexed QWORD field
  common/sfc_efx/base: add API to get HW table desc
  common/sfc_efx/base: add API to insert data to HW table
  common/sfc_efx/base: add API to delete entry from HW table
  net/sfc: add MCDI wrappers for BCAM tables
  net/sfc: add functions to manipulate MCDI table fields
  net/sfc: attach to HW table API
  net/sfc: add API to manage HW Conntrack table

Ivan Malov (23):
  net/sfc: make entry pointer optional in MAE resource helpers
  net/sfc: turn flow create/destroy methods into lock wrappers
  net/sfc: let driver-internal flows use VF representor action
  net/sfc: extend generic flow API to allow for internal flows
  net/sfc: switch driver-internal flows to use generic methods
  net/sfc: move MAE flow parsing method to MAE-specific source
  net/sfc: move MAE counter stream start to action set handler
  net/sfc: prepare MAE outer rules for action rule indirection
  net/sfc: turn MAE flow action rules into shareable resources
  common/sfc_efx/base: provide an API to clone MAE match specs
  common/sfc_efx/base: add API to read back MAE match criteria
  common/sfc_efx/base: match on conntrack mark in action rules
  common/sfc_efx/base: add API to request MAE conntrack lookup
  net/sfc: make use of conntrack assistance for transfer flows
  common/sfc_efx/base: support NAT edits in MAE
  net/sfc: add support for IPv4 NAT offload to MAE backend
  net/sfc: rename SW structures used by transfer flow counters
  net/sfc: rework MAE action rule counter representation in SW
  net/sfc: support indirect count action in transfer flows
  common/sfc_efx/base: rework MAE counter provisioning helpers
  net/sfc: indicate MAE counter type in use for transfer flows
  common/sfc_efx/base: support conntrack assistance counters
  net/sfc: use conntrack assistance counters in transfer flows

 doc/guides/nics/features/sfc.ini|4 +
 doc/guides/nics/sfc_efx.rst |   10 +
 doc/guides/rel_notes/release_23_07.rst  |   18 +
 drivers/common/sfc_efx/base/efx.h   |  198 +-
 drivers/common/sfc_efx/base/efx_impl.h  |5 +-
 drivers/common/sfc_efx/base/efx_mae.c   |  383 ++-
 drivers/common/sfc_efx/base/efx_mcdi.h  |4 +
 drivers/common/sfc_efx/base/efx_regs_mcdi.h | 2557 ++-
 drivers/common/sfc_efx/base/efx_table.c |  506 
 drivers/common/sfc_efx/base/hunt_nic.c  |2 +
 drivers/common/sfc_efx/base/medford2_nic.c  |2 +
 drivers/common/sfc_efx/base/medford_nic.c   |2 +
 drivers/common/sfc_efx/base/meson.build |1 +
 drivers/common/sfc_efx/base/rhead_nic.c |9 +
 drivers/common/sfc_efx/base/siena_nic.c |2 +
 drivers/common/sfc_efx/version.map  |   14 +
 drivers/net/sfc/meson.build |6 +-
 drivers/net/sfc/sfc.c   |   27 +-
 drivers/net/sfc/sfc.h   |3 +
 drivers/net/sfc/sfc_flow.c  |  246 +-
 drivers/net/sfc/sfc_flow.h  |   45 +-
 drivers/net/sfc/sfc_mae.c   | 1899 ++
 drivers/net/sfc/sfc_mae.h   |  131 +-
 drivers/net/sfc/sfc_mae_counter.c   |  146 +-
 drivers/net/sfc/sfc_mae_counter.h   |   16 +-
 drivers/net/sfc/sfc_mae_ct.c|  201 ++
 drivers/net/sfc/sfc_mae_ct.h|   68 +
 drivers/net/sfc/sfc_repr.c  |   20 +-
 drivers/net/sfc/sfc_repr_proxy.c|   53 +-
 drivers/net/sfc/sfc_repr_proxy.h|2 +-
 drivers/net/sfc/sfc_repr_proxy_api.h|3 +
 drivers/net/sfc/sfc_switch.c|7 +-
 drivers/net/sfc/sfc_switch.h|   10 +
 drivers/net/sfc/sfc_tbl_meta.c  |   71 +
 drivers/net/sfc/sfc_tbl_meta.h  |   37 +
 drivers/net/sfc/sfc_tbl_meta_cache.c|  253 ++
 drivers/net/sfc/sfc_

[PATCH v3 02/34] common/sfc_efx/base: detect MCDI Table Access API support

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

Future patches will add an implementation of MCDI Table
Access API in libefx. This patch adds a way to determine
if this API is supported.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h  | 2 ++
 drivers/common/sfc_efx/base/hunt_nic.c | 2 ++
 drivers/common/sfc_efx/base/medford2_nic.c | 2 ++
 drivers/common/sfc_efx/base/medford_nic.c  | 2 ++
 drivers/common/sfc_efx/base/rhead_nic.c| 9 +
 drivers/common/sfc_efx/base/siena_nic.c| 2 ++
 6 files changed, 19 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index f4fa88f169..520674a602 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -1672,6 +1672,8 @@ typedef struct efx_nic_cfg_s {
boolean_t   enc_mae_admin;
/* NIC support for MAE action set v2 features. */
boolean_t   enc_mae_aset_v2_supported;
+   /* NIC support for MCDI Table Access API. */
+   boolean_t   enc_table_api_supported;
/* Firmware support for "FLAG" and "MARK" filter actions */
boolean_t   enc_filter_action_flag_supported;
boolean_t   enc_filter_action_mark_supported;
diff --git a/drivers/common/sfc_efx/base/hunt_nic.c 
b/drivers/common/sfc_efx/base/hunt_nic.c
index 08ae324482..04595a39c8 100644
--- a/drivers/common/sfc_efx/base/hunt_nic.c
+++ b/drivers/common/sfc_efx/base/hunt_nic.c
@@ -192,6 +192,8 @@ hunt_board_cfg(
/* All Huntington devices have a PCIe Gen3, 8 lane connector */
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
 
+   encp->enc_table_api_supported = B_FALSE;
+
return (0);
 
 fail4:
diff --git a/drivers/common/sfc_efx/base/medford2_nic.c 
b/drivers/common/sfc_efx/base/medford2_nic.c
index 6d19524573..49adabffb2 100644
--- a/drivers/common/sfc_efx/base/medford2_nic.c
+++ b/drivers/common/sfc_efx/base/medford2_nic.c
@@ -152,6 +152,8 @@ medford2_board_cfg(
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
 
+   encp->enc_table_api_supported = B_FALSE;
+
return (0);
 
 fail4:
diff --git a/drivers/common/sfc_efx/base/medford_nic.c 
b/drivers/common/sfc_efx/base/medford_nic.c
index b111e3eded..9a460b2b9b 100644
--- a/drivers/common/sfc_efx/base/medford_nic.c
+++ b/drivers/common/sfc_efx/base/medford_nic.c
@@ -150,6 +150,8 @@ medford_board_cfg(
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
 
+   encp->enc_table_api_supported = B_FALSE;
+
return (0);
 
 fail4:
diff --git a/drivers/common/sfc_efx/base/rhead_nic.c 
b/drivers/common/sfc_efx/base/rhead_nic.c
index eda6c1c4f9..a773aea38d 100644
--- a/drivers/common/sfc_efx/base/rhead_nic.c
+++ b/drivers/common/sfc_efx/base/rhead_nic.c
@@ -176,6 +176,15 @@ rhead_board_cfg(
encp->enc_required_pcie_bandwidth_mbps = bandwidth;
encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3;
 
+   /*
+* FIXME: MCDI table API support depends on an EF100 firmware build
+* and an EF100 platform. It should be discovered by using a capability
+* flag from MCDI that is not implemented yet.
+* Right now we can safely rely on the return code from the libefx
+* MCDI Table API.
+*/
+   encp->enc_table_api_supported = B_TRUE;
+
return (0);
 
 fail3:
diff --git a/drivers/common/sfc_efx/base/siena_nic.c 
b/drivers/common/sfc_efx/base/siena_nic.c
index 9f14faf271..1f1fb7d2af 100644
--- a/drivers/common/sfc_efx/base/siena_nic.c
+++ b/drivers/common/sfc_efx/base/siena_nic.c
@@ -205,6 +205,8 @@ siena_board_cfg(
encp->enc_mae_supported = B_FALSE;
encp->enc_mae_admin = B_FALSE;
 
+   encp->enc_table_api_supported = B_FALSE;
+
encp->enc_dma_mapping = EFX_NIC_DMA_MAPPING_FLAT;
 
return (0);
-- 
2.30.2



[PATCH v3 03/34] common/sfc_efx/base: add API to list HW tables

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

New MCDI Table Access API allows management of
the HW tables' content.
This part of API helps to list all supported tables.
In the near future, only the CT table is planned
to be used, so only one identifier for this table
was added to the efx.
New table IDs will be added as needed.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h   | 15 
 drivers/common/sfc_efx/base/efx_table.c | 94 +
 drivers/common/sfc_efx/base/meson.build |  1 +
 drivers/common/sfc_efx/version.map  |  2 +
 4 files changed, 112 insertions(+)
 create mode 100644 drivers/common/sfc_efx/base/efx_table.c

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 520674a602..2de08d1230 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5070,6 +5070,21 @@ efx_nic_dma_map(
__insize_t len,
__out   efsys_dma_addr_t *nic_addrp);
 
+/* Unique IDs for HW tables */
+typedef enum efx_table_id_e {
+   EFX_TABLE_ID_CONNTRACK = 0x10300,
+} efx_table_id_t;
+
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_table_list(
+   __inefx_nic_t *enp,
+   __inuint32_t entry_ofst,
+   __out_opt   unsigned int *total_n_tablesp,
+   __out_ecount_opt(n_table_ids)   efx_table_id_t *table_ids,
+   __inunsigned int n_table_ids,
+   __out_opt   unsigned int 
*n_table_ids_writtenp);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_table.c 
b/drivers/common/sfc_efx/base/efx_table.c
new file mode 100644
index 00..7cfdfea36e
--- /dev/null
+++ b/drivers/common/sfc_efx/base/efx_table.c
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ */
+
+#include "efx.h"
+#include "efx_impl.h"
+
+   __checkReturn   efx_rc_t
+efx_table_list(
+   __inefx_nic_t *enp,
+   __inuint32_t entry_ofst,
+   __out_opt   unsigned int *total_n_tablesp,
+   __out_ecount_opt(n_table_ids)   efx_table_id_t *table_ids,
+   __inunsigned int n_table_ids,
+   __out_opt   unsigned int 
*n_table_ids_writtenp)
+{
+   const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+   unsigned int n_entries;
+   efx_mcdi_req_t req;
+   unsigned int i;
+   efx_rc_t rc;
+   EFX_MCDI_DECLARE_BUF(payload,
+   MC_CMD_TABLE_LIST_IN_LEN,
+   MC_CMD_TABLE_LIST_OUT_LENMAX_MCDI2);
+
+   /* Ensure EFX and MCDI use same values for table IDs */
+   EFX_STATIC_ASSERT(EFX_TABLE_ID_CONNTRACK == TABLE_ID_CONNTRACK_TABLE);
+
+   if (encp->enc_table_api_supported == B_FALSE) {
+   rc = ENOTSUP;
+   goto fail1;
+   }
+
+   if ((n_table_ids != 0) &&
+  ((table_ids == NULL) || (n_table_ids_writtenp == NULL))) {
+   rc = EINVAL;
+   goto fail2;
+   }
+
+   req.emr_cmd = MC_CMD_TABLE_LIST;
+   req.emr_in_buf = payload;
+   req.emr_in_length = MC_CMD_TABLE_LIST_IN_LEN;
+   req.emr_out_buf = payload;
+   req.emr_out_length = MC_CMD_TABLE_LIST_OUT_LENMAX_MCDI2;
+
+   MCDI_IN_SET_DWORD(req, TABLE_LIST_IN_FIRST_TABLE_ID_INDEX, entry_ofst);
+
+   efx_mcdi_execute(enp, &req);
+
+   if (req.emr_rc != 0) {
+   rc = req.emr_rc;
+   goto fail3;
+   }
+
+   if (req.emr_out_length_used < MC_CMD_TABLE_LIST_OUT_LENMIN) {
+   rc = EMSGSIZE;
+   goto fail4;
+   }
+
+   if (total_n_tablesp != NULL)
+   *total_n_tablesp = MCDI_OUT_DWORD(req, TABLE_LIST_OUT_N_TABLES);
+
+   n_entries = MC_CMD_TABLE_LIST_OUT_TABLE_ID_NUM(req.emr_out_length_used);
+
+   if (table_ids != NULL) {
+   if (n_entries > n_table_ids) {
+   rc = ENOMEM;
+   goto fail5;
+   }
+
+   for (i = 0; i < n_entries; i++) {
+   table_ids[i] = MCDI_OUT_INDEXED_DWORD(req,
+   TABLE_LIST_OUT_TABLE_ID, i);
+   }
+   }
+
+   if (n_table_ids_writtenp != NULL)
+   *n_table_ids_writtenp = n_entries;
+
+   return (0);
+
+fail5:
+   EFSYS_PROBE(fail5);
+fail4:
+   EFSYS_PROBE(fail4);
+fail3:
+   EFSYS_PROBE(fail3);
+fail2:
+   EFSYS_PROBE(fail2);
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
diff --git a/drivers/common/sfc_efx/base/meson.build 
b/drivers/common/sfc_efx/base/meson.build
index ff7f33fb44..7fc04aa57b 100644
--- a/

[PATCH v3 04/34] common/sfc_efx/base: add macro to get indexed QWORD field

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

Extend MCDI macros to manipulate with fields in indexed QWORDs.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Viacheslav Galaktionov 
---
 drivers/common/sfc_efx/base/efx_mcdi.h | 4 
 1 file changed, 4 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx_mcdi.h 
b/drivers/common/sfc_efx/base/efx_mcdi.h
index 14a3833567..f13bf43da6 100644
--- a/drivers/common/sfc_efx/base/efx_mcdi.h
+++ b/drivers/common/sfc_efx/base/efx_mcdi.h
@@ -504,6 +504,10 @@ efx_mcdi_set_nic_addr_regions(
EFX_DWORD_FIELD(*(MCDI_OUT2(_emr, efx_dword_t, _ofst) + \
(_idx)), _field)
 
+#defineMCDI_OUT_INDEXED_QWORD_FIELD(_emr, _ofst, _idx, _field) 
\
+   EFX_QWORD_FIELD(*(MCDI_OUT2(_emr, efx_qword_t, _ofst) + \
+   (_idx)), _field)
+
 #defineMCDI_OUT_INDEXED_STRUCT_MEMBER(_emr, _type, _arr_ofst, _idx,
\
_member_ofst)   \
((_type *)(MCDI_OUT2(_emr, uint8_t, _arr_ofst) +\
-- 
2.30.2



[PATCH v3 05/34] common/sfc_efx/base: add API to get HW table desc

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

Table's descriptor and fields' descriptors can be taken
by table ID using a new API.
In the near future, only the CT table is planned
to be used, so only fields that are required for these
purposes were added to the efx.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Viacheslav Galaktionov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h   |  67 +++
 drivers/common/sfc_efx/base/efx_table.c | 256 
 drivers/common/sfc_efx/version.map  |   3 +
 3 files changed, 326 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 2de08d1230..8860e4ebbe 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5085,6 +5085,73 @@ efx_table_list(
__inunsigned int n_table_ids,
__out_opt   unsigned int 
*n_table_ids_writtenp);
 
+LIBEFX_API
+extern __checkReturn   size_t
+efx_table_supported_num_get(
+   __invoid);
+
+LIBEFX_API
+extern __checkReturn   boolean_t
+efx_table_is_supported(
+   __inefx_table_id_t table_id);
+
+/* Unique IDs for table fields */
+typedef enum efx_table_field_id_e {
+   EFX_TABLE_FIELD_ID_UNUSED = 0x0,
+   EFX_TABLE_FIELD_ID_COUNTER_ID = 0xa,
+   EFX_TABLE_FIELD_ID_ETHER_TYPE = 0x1c,
+   EFX_TABLE_FIELD_ID_SRC_IP = 0x1d,
+   EFX_TABLE_FIELD_ID_DST_IP = 0x1e,
+   EFX_TABLE_FIELD_ID_IP_PROTO = 0x20,
+   EFX_TABLE_FIELD_ID_SRC_PORT = 0x21,
+   EFX_TABLE_FIELD_ID_DST_PORT = 0x22,
+   EFX_TABLE_FIELD_ID_NAT_PORT = 0x7a,
+   EFX_TABLE_FIELD_ID_NAT_IP = 0x7b,
+   EFX_TABLE_FIELD_ID_NAT_DIR = 0x7c,
+   EFX_TABLE_FIELD_ID_CT_MARK = 0x7d,
+} efx_table_field_id_t;
+
+/* Table fields mask types */
+typedef enum efx_table_field_mask_type_e {
+   EFX_TABLE_FIELD_MASK_NEVER = 0x0,
+   EFX_TABLE_FIELD_MASK_EXACT = 0x1,
+} efx_table_field_mask_type_t;
+
+typedef struct efx_table_field_desc_s {
+   efx_table_field_id_tfield_id;
+   uint16_tlbn;
+   uint16_twidth;
+   efx_table_field_mask_type_t mask_type;
+   uint8_t scheme;
+} efx_table_field_descriptor_t;
+
+/* Types of HW tables */
+typedef enum efx_table_type_e {
+   /* Exact match to all key fields of table entry. */
+   EFX_TABLE_TYPE_BCAM = 0x2,
+} efx_table_type_t;
+
+typedef struct efx_table_descriptor_s {
+   efx_table_type_ttype;
+   uint16_tkey_width;
+   uint16_tresp_width;
+   /* Number of key's fields to match data */
+   uint16_tn_key_fields;
+   /* Number of fields in match response */
+   uint16_tn_resp_fields;
+} efx_table_descriptor_t;
+
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_table_describe(
+   __inefx_nic_t *enp,
+   __inefx_table_id_t table_id,
+   __inuint32_t field_offset,
+   __out_opt   efx_table_descriptor_t 
*table_descp,
+   __out_ecount_opt(*n_fields_descsp)  efx_table_field_descriptor_t 
*fields_descs,
+   __inunsigned int n_field_descs,
+   __out_opt   unsigned int 
*n_field_descs_writtenp);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_table.c 
b/drivers/common/sfc_efx/base/efx_table.c
index 7cfdfea36e..115d86502f 100644
--- a/drivers/common/sfc_efx/base/efx_table.c
+++ b/drivers/common/sfc_efx/base/efx_table.c
@@ -6,6 +6,11 @@
 #include "efx.h"
 #include "efx_impl.h"
 
+/* List of HW tables that have support in efx */
+static const efx_table_id_t efx_supported_table_ids[] = {
+   EFX_TABLE_ID_CONNTRACK,
+};
+
__checkReturn   efx_rc_t
 efx_table_list(
__inefx_nic_t *enp,
@@ -80,6 +85,257 @@ efx_table_list(
 
return (0);
 
+fail5:
+   EFSYS_PROBE(fail5);
+fail4:
+   EFSYS_PROBE(fail4);
+fail3:
+   EFSYS_PROBE(fail3);
+fail2:
+   EFSYS_PROBE(fail2);
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   size_t
+efx_table_supported_num_get(
+   __invoid)
+{
+   return EFX_ARRAY_SIZE(efx_supported_table_ids);
+}
+
+   __checkReturn   boolean_t
+efx_table_is_supported(
+   __inefx_table_id_t table_id)
+{
+   size_t i;
+
+   for (i = 0; i < efx_table_supported_num_get(); i++) {
+   if (efx_supported_table_ids[i] == table_id)
+   return B_TRUE;
+   }
+
+   return B_FALSE;
+}
+
+static __checkReturn   

[PATCH v3 06/34] common/sfc_efx/base: add API to insert data to HW table

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

API allows to insert data to any supported HW table.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Viacheslav Galaktionov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h   | 16 +
 drivers/common/sfc_efx/base/efx_table.c | 79 +
 drivers/common/sfc_efx/version.map  |  1 +
 3 files changed, 96 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 8860e4ebbe..1b01a8ec8b 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5152,6 +5152,22 @@ efx_table_describe(
__inunsigned int n_field_descs,
__out_opt   unsigned int 
*n_field_descs_writtenp);
 
+/* Maximum possible size of data for manipulation of the tables */
+#define EFX_TABLE_ENTRY_LENGTH_MAX 1008
+
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_table_entry_insert(
+   __inefx_nic_t *enp,
+   __inefx_table_id_t table_id,
+   __inuint16_t priority,
+   __inuint16_t mask_id,
+   __inuint16_t key_width,
+   __inuint16_t mask_width,
+   __inuint16_t resp_width,
+   __in_bcount(data_size)  uint8_t *entry_datap,
+   __inunsigned int data_size);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_table.c 
b/drivers/common/sfc_efx/base/efx_table.c
index 115d86502f..63fc319666 100644
--- a/drivers/common/sfc_efx/base/efx_table.c
+++ b/drivers/common/sfc_efx/base/efx_table.c
@@ -338,6 +338,85 @@ efx_table_describe(
EFSYS_PROBE(fail6);
 fail5:
EFSYS_PROBE(fail5);
+fail4:
+   EFSYS_PROBE(fail4);
+fail3:
+   EFSYS_PROBE(fail3);
+fail2:
+   EFSYS_PROBE(fail2);
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   efx_rc_t
+efx_table_entry_insert(
+   __inefx_nic_t *enp,
+   __inefx_table_id_t table_id,
+   __inuint16_t priority,
+   __inuint16_t mask_id,
+   __inuint16_t key_width,
+   __inuint16_t mask_width,
+   __inuint16_t resp_width,
+   __in_bcount(data_size)  uint8_t *entry_datap,
+   __inunsigned int data_size)
+{
+   const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+   unsigned int n_dwords;
+   efx_mcdi_req_t req;
+   efx_rc_t rc;
+   EFX_MCDI_DECLARE_BUF(payload,
+   MC_CMD_TABLE_INSERT_IN_LENMAX_MCDI2,
+   MC_CMD_TABLE_INSERT_OUT_LEN);
+
+   /*
+* Ensure  MCDI number of 32bit units matches EFX maximum possible
+* data in bytes.
+*/
+   EFX_STATIC_ASSERT((MC_CMD_TABLE_INSERT_IN_LENMAX  * sizeof(uint32_t)) ==
+   EFX_TABLE_ENTRY_LENGTH_MAX);
+
+   if (encp->enc_table_api_supported == B_FALSE) {
+   rc = ENOTSUP;
+   goto fail1;
+   }
+
+   if ((data_size % sizeof(uint32_t)) != 0) {
+   rc = EINVAL;
+   goto fail2;
+   }
+
+   if ((data_size == 0) || (data_size > EFX_TABLE_ENTRY_LENGTH_MAX)) {
+   rc = EINVAL;
+   goto fail3;
+   }
+
+   n_dwords = data_size / sizeof(uint32_t);
+
+   req.emr_cmd = MC_CMD_TABLE_INSERT;
+   req.emr_in_buf = payload;
+   req.emr_in_length = MC_CMD_TABLE_INSERT_IN_LEN(n_dwords);
+   req.emr_out_buf = payload;
+   req.emr_out_length = MC_CMD_TABLE_INSERT_OUT_LEN;
+
+   MCDI_IN_SET_DWORD(req, TABLE_INSERT_IN_TABLE_ID, (uint32_t)table_id);
+   MCDI_IN_SET_WORD(req, TABLE_INSERT_IN_PRIORITY, priority);
+   MCDI_IN_SET_WORD(req, TABLE_INSERT_IN_MASK_ID, mask_id);
+   MCDI_IN_SET_WORD(req, TABLE_INSERT_IN_KEY_WIDTH, key_width);
+   MCDI_IN_SET_WORD(req, TABLE_INSERT_IN_MASK_WIDTH, mask_width);
+   MCDI_IN_SET_WORD(req, TABLE_INSERT_IN_RESP_WIDTH, resp_width);
+
+   memcpy(MCDI_IN2(req, uint8_t, TABLE_INSERT_IN_DATA), entry_datap, 
data_size);
+
+   efx_mcdi_execute(enp, &req);
+
+   if (req.emr_rc != 0) {
+   rc = req.emr_rc;
+   goto fail4;
+   }
+
+   return (0);
+
 fail4:
EFSYS_PROBE(fail4);
 fail3:
diff --git a/drivers/common/sfc_efx/version.map 
b/drivers/common/sfc_efx/version.map
index a87cb1bba5..2be519e0e1 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -233,6 +233,7 @@ INTERNAL {
efx_sram_buf_tbl_set;
 
efx_table_describe;
+   efx_table_entry_insert;

[PATCH v3 07/34] common/sfc_efx/base: add API to delete entry from HW table

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

API allows to delete entry from any supported HW table.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h   | 11 
 drivers/common/sfc_efx/base/efx_table.c | 77 +
 drivers/common/sfc_efx/version.map  |  1 +
 3 files changed, 89 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 1b01a8ec8b..f96e398460 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -5168,6 +5168,17 @@ efx_table_entry_insert(
__in_bcount(data_size)  uint8_t *entry_datap,
__inunsigned int data_size);
 
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_table_entry_delete(
+   __inefx_nic_t *enp,
+   __inefx_table_id_t table_id,
+   __inuint16_t mask_id,
+   __inuint16_t key_width,
+   __inuint16_t mask_width,
+   __in_bcount(data_size)  uint8_t *entry_datap,
+   __inunsigned int data_size);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/common/sfc_efx/base/efx_table.c 
b/drivers/common/sfc_efx/base/efx_table.c
index 63fc319666..10a1c2ddcd 100644
--- a/drivers/common/sfc_efx/base/efx_table.c
+++ b/drivers/common/sfc_efx/base/efx_table.c
@@ -419,6 +419,83 @@ efx_table_entry_insert(
 
 fail4:
EFSYS_PROBE(fail4);
+fail3:
+   EFSYS_PROBE(fail3);
+fail2:
+   EFSYS_PROBE(fail2);
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   efx_rc_t
+efx_table_entry_delete(
+   __inefx_nic_t *enp,
+   __inefx_table_id_t table_id,
+   __inuint16_t mask_id,
+   __inuint16_t key_width,
+   __inuint16_t mask_width,
+   __in_bcount(data_size)  uint8_t *entry_datap,
+   __inunsigned int data_size)
+{
+   const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
+   unsigned int n_dwords;
+   efx_mcdi_req_t req;
+   efx_rc_t rc;
+   EFX_MCDI_DECLARE_BUF(payload,
+   MC_CMD_TABLE_DELETE_IN_LENMAX_MCDI2,
+   MC_CMD_TABLE_DELETE_OUT_LEN);
+
+   /*
+* Ensure  MCDI number of 32bit units matches EFX maximum possible
+* data in bytes.
+*/
+   EFX_STATIC_ASSERT((MC_CMD_TABLE_DELETE_IN_LENMAX  * sizeof(uint32_t)) ==
+   EFX_TABLE_ENTRY_LENGTH_MAX);
+
+   if (encp->enc_table_api_supported == B_FALSE) {
+   rc = ENOTSUP;
+   goto fail1;
+   }
+
+   if ((data_size % sizeof(uint32_t)) != 0) {
+   rc = EINVAL;
+   goto fail2;
+   }
+
+   if ((data_size == 0) || (data_size > EFX_TABLE_ENTRY_LENGTH_MAX)) {
+   rc = EINVAL;
+   goto fail3;
+   }
+
+   n_dwords = data_size / sizeof(uint32_t);
+
+   req.emr_cmd = MC_CMD_TABLE_DELETE;
+   req.emr_in_buf = payload;
+   req.emr_in_length = MC_CMD_TABLE_DELETE_IN_LEN(n_dwords);
+   req.emr_out_buf = payload;
+   req.emr_out_length = MC_CMD_TABLE_DELETE_OUT_LEN;
+
+   MCDI_IN_SET_DWORD(req, TABLE_DELETE_IN_TABLE_ID, (uint32_t)table_id);
+   MCDI_IN_SET_WORD(req, TABLE_DELETE_IN_MASK_ID, mask_id);
+   MCDI_IN_SET_WORD(req, TABLE_DELETE_IN_KEY_WIDTH, key_width);
+   MCDI_IN_SET_WORD(req, TABLE_DELETE_IN_MASK_WIDTH, mask_width);
+
+
+   memcpy(MCDI_IN2(req, uint8_t, TABLE_DELETE_IN_DATA), entry_datap, 
data_size);
+
+   efx_mcdi_execute(enp, &req);
+
+   if (req.emr_rc != 0) {
+   rc = req.emr_rc;
+   goto fail4;
+   }
+
+   return (0);
+
+fail4:
+   EFSYS_PROBE(fail4);
+
 fail3:
EFSYS_PROBE(fail3);
 fail2:
diff --git a/drivers/common/sfc_efx/version.map 
b/drivers/common/sfc_efx/version.map
index 2be519e0e1..d083a54a03 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -233,6 +233,7 @@ INTERNAL {
efx_sram_buf_tbl_set;
 
efx_table_describe;
+   efx_table_entry_delete;
efx_table_entry_insert;
efx_table_is_supported;
efx_table_list;
-- 
2.30.2



[PATCH v3 08/34] net/sfc: add MCDI wrappers for BCAM tables

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

A "table" is structure used for lookups, consisting of a set of
entries which can be matched against an N-bit "request", to
return either a "hit" with an M-bit "response", or a "miss" if
there is no match. There are a number of HW tables of various
types that could be used in MAE.

In some types of table the entry may also be associated with an
N-bit "mask", allowing some bits of the request to be treated as
don't-care, and an integer "priority" to determine which entry is
used if more than one matches.

BCAM tables don't support "mask" and "priority", so the
corresponding fields must be zeroed.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_tbls.h | 69 ++
 1 file changed, 69 insertions(+)
 create mode 100644 drivers/net/sfc/sfc_tbls.h

diff --git a/drivers/net/sfc/sfc_tbls.h b/drivers/net/sfc/sfc_tbls.h
new file mode 100644
index 00..2a5c87b82c
--- /dev/null
+++ b/drivers/net/sfc/sfc_tbls.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ */
+
+#ifndef _SFC_TBLS_H
+#define _SFC_TBLS_H
+
+#include "efx.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Table types:
+ *   CAM - Content addressable memory
+ *  BCAM - Binary CAM
+ *  TCAM - Ternary CAM
+ * STCAM - Semi-ternary CAM
+ *
+ * Short description:
+ * TCAM:  Each entry has a key, mask, response and priority. An entry matches
+ *when (key & mask) == (request & mask). In the case of multiple
+ *matches, the entry with the highest priority wins; Each entry may
+ *have its own mask, but TCAM table definitions may place constraints
+ *on the possible masks allowed for each of the individual fields.
+ * STCAM: A limited form of TCAM in which only a limited number of masks and
+ *associated priorities), up to some maximum fixed by the definition
+ *of the table, may be in use at any one time.
+ * BCAM:  Each entry has only a key and response, with the whole request
+ *matched against the key (like a typical hash table or "map").
+ * Direct (sometimes "DCAM", although it's not really content-addressable):
+ *Essentially just an array, where the key bits are used simply as an
+ *index.
+ */
+
+/* Priority is used only for TCAM or STCAM, use 0 in case of BCAM */
+#define SFC_TBLS_BCAM_PRIORITY 0
+
+/* Mask ID is used only for STCAM with ALLOC_MASKS flag, use 0 for BCAM */
+#define SFC_TBLS_BCAM_MASK_ID  0
+
+/* Mask is used only for STCAM */
+#define SFC_TBLS_BCAM_MASK_WIDTH   0
+
+static inline int
+sfc_tbls_bcam_entry_insert(efx_nic_t *enp, efx_table_id_t table_id, uint16_t 
key_width,
+  uint16_t resp_width, uint8_t *data, unsigned int 
data_size)
+{
+   return efx_table_entry_insert(enp, table_id, SFC_TBLS_BCAM_PRIORITY,
+ SFC_TBLS_BCAM_MASK_ID, key_width,
+ SFC_TBLS_BCAM_MASK_WIDTH, resp_width,
+ data, data_size);
+}
+
+static inline int
+sfc_tbls_bcam_entry_delete(efx_nic_t *enp, efx_table_id_t table_id, uint16_t 
key_width,
+  uint8_t *data, unsigned int data_size)
+{
+   return efx_table_entry_delete(enp, table_id, SFC_TBLS_BCAM_MASK_ID,
+ key_width, SFC_TBLS_BCAM_MASK_WIDTH,
+ data, data_size);
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _SFC_TBLS_H */
-- 
2.30.2



[PATCH v3 09/34] net/sfc: add functions to manipulate MCDI table fields

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

Implemented functions that help to fill user data for
manipulation with HW tables in the required format.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/meson.build |   1 +
 drivers/net/sfc/sfc_tbls.c  | 140 
 drivers/net/sfc/sfc_tbls.h  | 135 ++
 3 files changed, 276 insertions(+)
 create mode 100644 drivers/net/sfc/sfc_tbls.c

diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index c2d8430810..39c7f24764 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -87,6 +87,7 @@ sources = files(
 'sfc_tso.c',
 'sfc_filter.c',
 'sfc_switch.c',
+'sfc_tbls.c',
 'sfc_mae.c',
 'sfc_mae_counter.c',
 'sfc_flow.c',
diff --git a/drivers/net/sfc/sfc_tbls.c b/drivers/net/sfc/sfc_tbls.c
new file mode 100644
index 00..db54fc0d40
--- /dev/null
+++ b/drivers/net/sfc/sfc_tbls.c
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ */
+
+#include "sfc_tbls.h"
+#include "sfc_debug.h"
+
+#include 
+
+/* Number of bits in uint32_t type */
+#define SFC_TBLS_U32_BITS (sizeof(uint32_t) * CHAR_BIT)
+
+static uint32_t
+sfc_tbls_field_update(uint32_t in, uint16_t lbn, uint16_t width, uint32_t 
value)
+{
+   uint32_t mask;
+
+   if (width == SFC_TBLS_U32_BITS)
+   return value;
+
+   mask = RTE_LEN2MASK(width, uint32_t);
+   value &= mask;
+
+   if (lbn != 0) {
+   mask <<= lbn;
+   value <<= lbn;
+   }
+
+   return (in & (~mask)) | value;
+}
+
+void
+sfc_tbls_field_set_u32(uint32_t data[], __rte_unused unsigned int data_size,
+  uint16_t lbn, uint16_t width, uint32_t value)
+{
+   uint32_t data_offset = 0;
+
+   if (lbn >= SFC_TBLS_U32_BITS) {
+   data_offset = lbn / SFC_TBLS_U32_BITS;
+
+   SFC_ASSERT(data_offset < data_size);
+
+   data += data_offset;
+   lbn %= SFC_TBLS_U32_BITS;
+   }
+
+   if (lbn + width <= SFC_TBLS_U32_BITS) {
+   *data = sfc_tbls_field_update(*data, lbn, width, value);
+   } else {
+   *data = sfc_tbls_field_update(*data, lbn,
+ SFC_TBLS_U32_BITS - lbn, value);
+   value >>= SFC_TBLS_U32_BITS - lbn;
+
+   data_offset++;
+   SFC_ASSERT(data_offset < data_size);
+
+   data++;
+   *data = sfc_tbls_field_update(*data, 0,
+ width + lbn - SFC_TBLS_U32_BITS,
+ value);
+   }
+}
+
+void
+sfc_tbls_field_set_u16(uint32_t data[], unsigned int data_size, uint16_t lbn,
+  uint16_t width, uint16_t value)
+{
+   sfc_tbls_field_set_u32(data, data_size, lbn, width, value);
+}
+
+void
+sfc_tbls_field_set_u8(uint32_t data[], unsigned int data_size, uint16_t lbn,
+ uint16_t width, uint8_t value)
+{
+   sfc_tbls_field_set_u32(data, data_size, lbn, width, value);
+}
+
+void
+sfc_tbls_field_set_ip(uint32_t data[], unsigned int data_size, uint16_t lbn,
+ __rte_unused uint16_t width, const uint32_t *ip)
+{
+   unsigned int i;
+   size_t ipv6_addr_len = RTE_SIZEOF_FIELD(struct rte_ipv6_hdr, src_addr);
+
+   SFC_ASSERT(width == ipv6_addr_len * CHAR_BIT);
+
+   for (i = 0; i < ipv6_addr_len / sizeof(*ip); i++) {
+   sfc_tbls_field_set_u32(data, data_size, lbn,
+  SFC_TBLS_U32_BITS, ip[i]);
+   lbn += SFC_TBLS_U32_BITS;
+   }
+}
+
+void
+sfc_tbls_field_set_u64(uint32_t data[], __rte_unused unsigned int data_size,
+  uint16_t lbn, uint16_t width, uint64_t value)
+{
+   uint32_t data_offset = 0;
+
+   if (lbn >= SFC_TBLS_U32_BITS) {
+   data_offset = lbn / SFC_TBLS_U32_BITS;
+
+   SFC_ASSERT(data_offset < data_size);
+
+   data += data_offset;
+   lbn %= SFC_TBLS_U32_BITS;
+   }
+
+   *data = sfc_tbls_field_update(*data, lbn, SFC_TBLS_U32_BITS - lbn, 
value);
+   value >>= SFC_TBLS_U32_BITS - lbn;
+   width -= SFC_TBLS_U32_BITS - lbn;
+
+   data_offset++;
+   SFC_ASSERT(data_offset < data_size);
+
+   data++;
+
+   if (width > SFC_TBLS_U32_BITS) {
+   *data = sfc_tbls_field_update(*data, 0, SFC_TBLS_U32_BITS, 
value);
+   value >>= SFC_TBLS_U32_BITS;
+   width -= SFC_TBLS_U32_BITS;
+
+   data_offset++;
+   SFC_ASSERT(data_offset < data_size);
+
+   data++;
+   }
+
+   *data = sfc_tbls_field_update(*data, 0, width, value);
+}
+
+void
+sfc_tbls_field_set_bit(uint32_t data[], unsigned int data_size, uint16_t lbn,
+   

[PATCH v3 10/34] net/sfc: attach to HW table API

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

The patch adds APIs to initialise, manipulate and finalise
HW tables API-specific context in NIC control structure.
The context itself will be used to store HW tables-related info,
like table descriptors and field descriptors.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/meson.build  |   4 +-
 drivers/net/sfc/sfc.c|  18 +-
 drivers/net/sfc/sfc.h|   2 +
 drivers/net/sfc/sfc_tbl_meta.c   |  71 
 drivers/net/sfc/sfc_tbl_meta.h   |  37 
 drivers/net/sfc/sfc_tbl_meta_cache.c | 253 +++
 drivers/net/sfc/sfc_tbl_meta_cache.h |  25 +++
 drivers/net/sfc/sfc_tbls.c   |  60 +++
 drivers/net/sfc/sfc_tbls.h   |  81 +
 9 files changed, 549 insertions(+), 2 deletions(-)
 create mode 100644 drivers/net/sfc/sfc_tbl_meta.c
 create mode 100644 drivers/net/sfc/sfc_tbl_meta.h
 create mode 100644 drivers/net/sfc/sfc_tbl_meta_cache.c
 create mode 100644 drivers/net/sfc/sfc_tbl_meta_cache.h

diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index 39c7f24764..c9d4264674 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -71,7 +71,7 @@ if not cc.links(atomic_check_code)
 ext_deps += libatomic_dep
 endif
 
-deps += ['common_sfc_efx', 'bus_pci']
+deps += ['common_sfc_efx', 'bus_pci', 'hash']
 sources = files(
 'sfc_ethdev.c',
 'sfc_kvargs.c',
@@ -88,6 +88,8 @@ sources = files(
 'sfc_filter.c',
 'sfc_switch.c',
 'sfc_tbls.c',
+'sfc_tbl_meta.c',
+'sfc_tbl_meta_cache.c',
 'sfc_mae.c',
 'sfc_mae_counter.c',
 'sfc_flow.c',
diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index 22753e3417..a56521696a 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -491,6 +491,10 @@ sfc_try_start(struct sfc_adapter *sa)
if (rc != 0)
goto fail_ev_start;
 
+   rc = sfc_tbls_start(sa);
+   if (rc != 0)
+   goto fail_tbls_start;
+
rc = sfc_port_start(sa);
if (rc != 0)
goto fail_port_start;
@@ -526,9 +530,12 @@ sfc_try_start(struct sfc_adapter *sa)
 fail_rx_start:
sfc_port_stop(sa);
 
-fail_port_start:
+fail_tbls_start:
sfc_ev_stop(sa);
 
+fail_port_start:
+   sfc_tbls_stop(sa);
+
 fail_ev_start:
sfc_intr_stop(sa);
 
@@ -626,6 +633,7 @@ sfc_stop(struct sfc_adapter *sa)
sfc_tx_stop(sa);
sfc_rx_stop(sa);
sfc_port_stop(sa);
+   sfc_tbls_stop(sa);
sfc_ev_stop(sa);
sfc_intr_stop(sa);
efx_nic_fini(sa->nic);
@@ -983,6 +991,10 @@ sfc_attach(struct sfc_adapter *sa)
if (rc != 0)
goto fail_mae_attach;
 
+   rc = sfc_tbls_attach(sa);
+   if (rc != 0)
+   goto fail_tables_attach;
+
rc = sfc_mae_switchdev_init(sa);
if (rc != 0)
goto fail_mae_switchdev_init;
@@ -1025,6 +1037,9 @@ sfc_attach(struct sfc_adapter *sa)
sfc_mae_switchdev_fini(sa);
 
 fail_mae_switchdev_init:
+   sfc_tbls_detach(sa);
+
+fail_tables_attach:
sfc_mae_detach(sa);
 
 fail_mae_attach:
@@ -1088,6 +1103,7 @@ sfc_detach(struct sfc_adapter *sa)
 
sfc_repr_proxy_detach(sa);
sfc_mae_switchdev_fini(sa);
+   sfc_tbls_detach(sa);
sfc_mae_detach(sa);
sfc_mae_counter_rxq_detach(sa);
sfc_filter_detach(sa);
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 730d054aea..6b301aad60 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -31,6 +31,7 @@
 #include "sfc_flow_tunnel.h"
 #include "sfc_sriov.h"
 #include "sfc_mae.h"
+#include "sfc_tbls.h"
 #include "sfc_dp.h"
 #include "sfc_sw_stats.h"
 #include "sfc_repr_proxy.h"
@@ -244,6 +245,7 @@ struct sfc_adapter {
struct sfc_ft_ctx   ft_ctx_pool[SFC_FT_MAX_NTUNNELS];
struct sfc_filter   filter;
struct sfc_mae  mae;
+   struct sfc_tbls hw_tables;
struct sfc_repr_proxy   repr_proxy;
 
struct sfc_flow_listflow_list;
diff --git a/drivers/net/sfc/sfc_tbl_meta.c b/drivers/net/sfc/sfc_tbl_meta.c
new file mode 100644
index 00..997082fd74
--- /dev/null
+++ b/drivers/net/sfc/sfc_tbl_meta.c
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ */
+
+#include "sfc.h"
+#include "sfc_tbl_meta.h"
+#include "sfc_tbl_meta_cache.h"
+
+const struct sfc_tbl_meta *
+sfc_tbl_meta_lookup(struct sfc_adapter *sa, efx_table_id_t table_id)
+{
+   struct sfc_tbl_meta *meta;
+   struct sfc_tbls *tables = &sa->hw_tables;
+   int rc;
+
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+   if (tables->status != SFC_TBLS_STATUS_SUPPORTED)
+   return NULL;
+
+   rc = rte_hash_lookup_data(tables->meta.cache, (const v

[PATCH v3 11/34] net/sfc: add API to manage HW Conntrack table

2023-06-04 Thread Ivan Malov
From: Denis Pryazhennikov 

The new API allows to manipulate entries in
the HW Conntrack table.
It uses a new Table Access API as a backend.

Signed-off-by: Denis Pryazhennikov 
Reviewed-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/meson.build  |   1 +
 drivers/net/sfc/sfc_mae_ct.c | 201 +++
 drivers/net/sfc/sfc_mae_ct.h |  68 
 3 files changed, 270 insertions(+)
 create mode 100644 drivers/net/sfc/sfc_mae_ct.c
 create mode 100644 drivers/net/sfc/sfc_mae_ct.h

diff --git a/drivers/net/sfc/meson.build b/drivers/net/sfc/meson.build
index c9d4264674..5adde68517 100644
--- a/drivers/net/sfc/meson.build
+++ b/drivers/net/sfc/meson.build
@@ -92,6 +92,7 @@ sources = files(
 'sfc_tbl_meta_cache.c',
 'sfc_mae.c',
 'sfc_mae_counter.c',
+'sfc_mae_ct.c',
 'sfc_flow.c',
 'sfc_flow_rss.c',
 'sfc_flow_tunnel.c',
diff --git a/drivers/net/sfc/sfc_mae_ct.c b/drivers/net/sfc/sfc_mae_ct.c
new file mode 100644
index 00..fd6819c8a5
--- /dev/null
+++ b/drivers/net/sfc/sfc_mae_ct.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2023 Advanced Micro Devices, Inc.
+ */
+
+#include "sfc.h"
+#include "sfc_mae_ct.h"
+
+/* SF-123102-TC-1A § 10.6.3: Conntrack_Table key */
+static void
+sfc_mae_ct_key_to_mcdi_key(const sfc_mae_conntrack_key_t *key,
+  const efx_table_field_descriptor_t *fields,
+  unsigned int n_fields, uint32_t *mcdi_key,
+  unsigned int key_size)
+{
+   unsigned int i;
+
+   for (i = 0; i < n_fields; i++) {
+   const efx_table_field_descriptor_t *desc = &fields[i];
+
+   if (desc->mask_type == EFX_TABLE_FIELD_MASK_NEVER)
+   continue;
+
+   switch (desc->field_id) {
+   case EFX_TABLE_FIELD_ID_IP_PROTO:
+   sfc_tbls_field_set_u8(mcdi_key, key_size, desc->lbn,
+ desc->width, key->ip_proto);
+   break;
+   case EFX_TABLE_FIELD_ID_ETHER_TYPE:
+   sfc_tbls_field_set_u16(mcdi_key, key_size, desc->lbn,
+  desc->width, key->ether_type_le);
+   break;
+   case EFX_TABLE_FIELD_ID_SRC_PORT:
+   sfc_tbls_field_set_u16(mcdi_key, key_size, desc->lbn,
+  desc->width, key->src_port_le);
+   break;
+   case EFX_TABLE_FIELD_ID_DST_PORT:
+   sfc_tbls_field_set_u16(mcdi_key, key_size, desc->lbn,
+  desc->width, key->dst_port_le);
+   break;
+   case EFX_TABLE_FIELD_ID_SRC_IP:
+   sfc_tbls_field_set_ip(mcdi_key, key_size, desc->lbn,
+ desc->width,
+ (const uint32_t 
*)key->src_addr_le);
+   break;
+   case EFX_TABLE_FIELD_ID_DST_IP:
+   sfc_tbls_field_set_ip(mcdi_key, key_size, desc->lbn,
+ desc->width,
+ (const uint32_t 
*)key->dst_addr_le);
+   break;
+
+   default:
+   break;
+   }
+   }
+}
+
+/* SF-123102-TC-1A § 10.6.4: Conntrack_Table response */
+static void
+sfc_mae_ct_response_to_mcdi_response(const sfc_mae_conntrack_response_t 
*response,
+const efx_table_field_descriptor_t *fields,
+unsigned int n_fields, uint32_t *mcdi_resp,
+unsigned int resp_size)
+{
+   unsigned int i;
+
+   for (i = 0; i < n_fields; i++) {
+   const efx_table_field_descriptor_t *desc = &fields[i];
+
+   if (desc->mask_type == EFX_TABLE_FIELD_MASK_NEVER)
+   continue;
+
+   /* Fields of responses are always reported with the EXACT type. 
*/
+   SFC_ASSERT(desc->mask_type == EFX_TABLE_FIELD_MASK_EXACT);
+
+   switch (desc->field_id) {
+   case EFX_TABLE_FIELD_ID_CT_MARK:
+   sfc_tbls_field_set_u32(mcdi_resp, resp_size, desc->lbn,
+  desc->width, response->ct_mark);
+   break;
+   case EFX_TABLE_FIELD_ID_COUNTER_ID:
+   sfc_tbls_field_set_u32(mcdi_resp, resp_size, desc->lbn,
+  desc->width, 
response->counter_id);
+   break;
+   case EFX_TABLE_FIELD_ID_NAT_DIR:
+   sfc_tbls_field_set_u8(mcdi_resp, resp_size, desc->lbn,
+  

[PATCH v3 12/34] net/sfc: make entry pointer optional in MAE resource helpers

2023-06-04 Thread Ivan Malov
Keep NULL object check in one place rather than repeat it in
all of the callers. That should make the code easier on eyes.
Future code for additional object types will follow this way.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c | 78 ++-
 1 file changed, 52 insertions(+), 26 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index e5e9257998..1928d58779 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -402,6 +402,9 @@ sfc_mae_outer_rule_del(struct sfc_adapter *sa,
 {
struct sfc_mae *mae = &sa->mae;
 
+   if (rule == NULL)
+   return;
+
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(rule->refcnt != 0);
 
@@ -429,11 +432,16 @@ sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
  struct sfc_mae_outer_rule *rule,
  efx_mae_match_spec_t *match_spec_action)
 {
-   struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
+   struct sfc_mae_fw_rsrc *fw_rsrc;
int rc;
 
+   if (rule == NULL)
+   return 0;
+
SFC_ASSERT(sfc_adapter_is_locked(sa));
 
+   fw_rsrc = &rule->fw_rsrc;
+
if (fw_rsrc->refcnt == 0) {
SFC_ASSERT(fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
SFC_ASSERT(rule->match_spec != NULL);
@@ -480,11 +488,16 @@ static void
 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
   struct sfc_mae_outer_rule *rule)
 {
-   struct sfc_mae_fw_rsrc *fw_rsrc = &rule->fw_rsrc;
+   struct sfc_mae_fw_rsrc *fw_rsrc;
int rc;
 
+   if (rule == NULL)
+   return;
+
SFC_ASSERT(sfc_adapter_is_locked(sa));
 
+   fw_rsrc = &rule->fw_rsrc;
+
if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
fw_rsrc->refcnt == 0) {
sfc_err(sa, "failed to disable outer_rule=%p: already disabled; 
OR_ID=0x%08x, refcnt=%u",
@@ -1057,6 +1070,9 @@ sfc_mae_action_set_del(struct sfc_adapter *sa,
 {
struct sfc_mae *mae = &sa->mae;
 
+   if (action_set == NULL)
+   return;
+
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(action_set->refcnt != 0);
 
@@ -1092,15 +1108,24 @@ static int
 sfc_mae_action_set_enable(struct sfc_adapter *sa,
  struct sfc_mae_action_set *action_set)
 {
-   struct sfc_mae_encap_header *encap_header = action_set->encap_header;
-   struct sfc_mae_mac_addr *dst_mac_addr = action_set->dst_mac_addr;
-   struct sfc_mae_mac_addr *src_mac_addr = action_set->src_mac_addr;
-   struct sfc_mae_counter_id *counters = action_set->counters;
-   struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
+   struct sfc_mae_encap_header *encap_header;
+   struct sfc_mae_mac_addr *dst_mac_addr;
+   struct sfc_mae_mac_addr *src_mac_addr;
+   struct sfc_mae_counter_id *counters;
+   struct sfc_mae_fw_rsrc *fw_rsrc;
int rc;
 
+   if (action_set == NULL)
+   return 0;
+
SFC_ASSERT(sfc_adapter_is_locked(sa));
 
+   encap_header = action_set->encap_header;
+   dst_mac_addr = action_set->dst_mac_addr;
+   src_mac_addr = action_set->src_mac_addr;
+   counters = action_set->counters;
+   fw_rsrc = &action_set->fw_rsrc;
+
if (fw_rsrc->refcnt == 0) {
SFC_ASSERT(fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID);
SFC_ASSERT(action_set->spec != NULL);
@@ -1167,11 +1192,16 @@ static void
 sfc_mae_action_set_disable(struct sfc_adapter *sa,
   struct sfc_mae_action_set *action_set)
 {
-   struct sfc_mae_fw_rsrc *fw_rsrc = &action_set->fw_rsrc;
+   struct sfc_mae_fw_rsrc *fw_rsrc;
int rc;
 
+   if (action_set == NULL)
+   return;
+
SFC_ASSERT(sfc_adapter_is_locked(sa));
 
+   fw_rsrc = &action_set->fw_rsrc;
+
if (fw_rsrc->aset_id.id == EFX_MAE_RSRC_ID_INVALID ||
fw_rsrc->refcnt == 0) {
sfc_err(sa, "failed to disable action_set=%p: already disabled; 
AS_ID=0x%08x, refcnt=%u",
@@ -1226,11 +1256,8 @@ sfc_mae_flow_cleanup(struct sfc_adapter *sa,
 
SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
 
-   if (spec_mae->outer_rule != NULL)
-   sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
-
-   if (spec_mae->action_set != NULL)
-   sfc_mae_action_set_del(sa, spec_mae->action_set);
+   sfc_mae_outer_rule_del(sa, spec_mae->outer_rule);
+   sfc_mae_action_set_del(sa, spec_mae->action_set);
 
if (spec_mae->match_spec != NULL)
efx_mae_match_spec_fini(sa->nic, spec_mae->match_spec);
@@ -2575,9 +2602,7 @@ sfc_mae_rule_process_outer(struct sfc_adapter *sa,
rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
  &invalid_rule_id);

[PATCH v3 13/34] net/sfc: turn flow create/destroy methods into lock wrappers

2023-06-04 Thread Ivan Malov
Doing so is useful to facilitate driver-internal flow rework.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_flow.c | 42 ++
 drivers/net/sfc/sfc_flow.h |  9 
 2 files changed, 42 insertions(+), 9 deletions(-)

diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index fe1f5ba55f..432295ea62 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -2610,16 +2610,32 @@ sfc_flow_create(struct rte_eth_dev *dev,
struct rte_flow_error *error)
 {
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+   struct rte_flow *flow;
+
+   sfc_adapter_lock(sa);
+   flow = sfc_flow_create_locked(sa, attr, pattern, actions, error);
+   sfc_adapter_unlock(sa);
+
+   return flow;
+}
+
+struct rte_flow *
+sfc_flow_create_locked(struct sfc_adapter *sa,
+  const struct rte_flow_attr *attr,
+  const struct rte_flow_item pattern[],
+  const struct rte_flow_action actions[],
+  struct rte_flow_error *error)
+{
struct rte_flow *flow = NULL;
int rc;
 
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
+
flow = sfc_flow_zmalloc(error);
if (flow == NULL)
goto fail_no_mem;
 
-   sfc_adapter_lock(sa);
-
-   rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+   rc = sfc_flow_parse(sa->eth_dev, attr, pattern, actions, flow, error);
if (rc != 0)
goto fail_bad_value;
 
@@ -2631,8 +2647,6 @@ sfc_flow_create(struct rte_eth_dev *dev,
goto fail_flow_insert;
}
 
-   sfc_adapter_unlock(sa);
-
return flow;
 
 fail_flow_insert:
@@ -2640,7 +2654,6 @@ sfc_flow_create(struct rte_eth_dev *dev,
 
 fail_bad_value:
sfc_flow_free(sa, flow);
-   sfc_adapter_unlock(sa);
 
 fail_no_mem:
return NULL;
@@ -2652,10 +2665,23 @@ sfc_flow_destroy(struct rte_eth_dev *dev,
 struct rte_flow_error *error)
 {
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+   int rc;
+
+   sfc_adapter_lock(sa);
+   rc = sfc_flow_destroy_locked(sa, flow, error);
+   sfc_adapter_unlock(sa);
+
+   return rc;
+}
+
+int
+sfc_flow_destroy_locked(struct sfc_adapter *sa, struct rte_flow *flow,
+   struct rte_flow_error *error)
+{
struct rte_flow *flow_ptr;
int rc = EINVAL;
 
-   sfc_adapter_lock(sa);
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
 
TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
if (flow_ptr == flow)
@@ -2675,8 +2701,6 @@ sfc_flow_destroy(struct rte_eth_dev *dev,
sfc_flow_free(sa, flow);
 
 fail_bad_value:
-   sfc_adapter_unlock(sa);
-
return -rc;
 }
 
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index 12875344b5..a3ca09f225 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -195,6 +195,15 @@ typedef int (sfc_flow_query_cb_t)(struct rte_eth_dev *dev,
  void *data,
  struct rte_flow_error *error);
 
+struct rte_flow *sfc_flow_create_locked(struct sfc_adapter *sa,
+   const struct rte_flow_attr *attr,
+   const struct rte_flow_item pattern[],
+   const struct rte_flow_action actions[],
+   struct rte_flow_error *error);
+
+int sfc_flow_destroy_locked(struct sfc_adapter *sa, struct rte_flow *flow,
+   struct rte_flow_error *error);
+
 #ifdef __cplusplus
 }
 #endif
-- 
2.30.2



[PATCH v3 14/34] net/sfc: let driver-internal flows use VF representor action

2023-06-04 Thread Ivan Malov
In the case of VF <--> VF representor pairs, these flows can
only collect VF traffic, so let them use generic flow action
PORT_REPRESENTOR, as part of re-using generic flow mechanism.

Currently, it does not allow to access VF representors since
they have no unique HW logical ports (m-ports). They all sit
on the same (representor proxy) m-port, while demultiplexing
of traffic uses ingress (VF) m-port value in packet metadata.
Traffic from arbitrary sources cannot be identified this way.
But, for VF traffic, it should be right to make an exception.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c| 25 +++--
 drivers/net/sfc/sfc_repr.c   | 20 
 drivers/net/sfc/sfc_repr_proxy.c | 15 +++
 drivers/net/sfc/sfc_repr_proxy_api.h |  3 +++
 drivers/net/sfc/sfc_switch.c |  7 ++-
 drivers/net/sfc/sfc_switch.h | 10 ++
 6 files changed, 65 insertions(+), 15 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index 1928d58779..89fa75281f 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -1525,6 +1525,7 @@ sfc_mae_rule_parse_item_port_id(const struct 
rte_flow_item *item,
const struct rte_flow_item_port_id *spec = NULL;
const struct rte_flow_item_port_id *mask = NULL;
efx_mport_sel_t mport_sel;
+   unsigned int type_mask;
int rc;
 
if (ctx_mae->match_mport_set) {
@@ -1556,8 +1557,10 @@ sfc_mae_rule_parse_item_port_id(const struct 
rte_flow_item *item,
  "The port ID is too large");
}
 
+   type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
+
rc = sfc_mae_switch_get_ethdev_mport(ctx_mae->sa->mae.switch_domain_id,
-spec->id, &mport_sel);
+spec->id, type_mask, &mport_sel);
if (rc != 0) {
return rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -1590,6 +1593,7 @@ sfc_mae_rule_parse_item_ethdev_based(const struct 
rte_flow_item *item,
const struct rte_flow_item_ethdev *spec = NULL;
const struct rte_flow_item_ethdev *mask = NULL;
efx_mport_sel_t mport_sel;
+   unsigned int type_mask;
int rc;
 
if (ctx_mae->match_mport_set) {
@@ -1617,9 +1621,11 @@ sfc_mae_rule_parse_item_ethdev_based(const struct 
rte_flow_item *item,
 
switch (item->type) {
case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR:
+   type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
+
rc = sfc_mae_switch_get_ethdev_mport(
ctx_mae->sa->mae.switch_domain_id,
-   spec->port_id, &mport_sel);
+   spec->port_id, type_mask, &mport_sel);
if (rc != 0) {
return rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -3529,6 +3535,7 @@ sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
 {
struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
struct sfc_mae *mae = &sa->mae;
+   unsigned int type_mask;
efx_mport_sel_t mport;
uint16_t port_id;
int rc;
@@ -3538,8 +3545,10 @@ sfc_mae_rule_parse_action_port_id(struct sfc_adapter *sa,
 
port_id = (conf->original != 0) ? sas->port_id : conf->id;
 
+   type_mask = 1U << SFC_MAE_SWITCH_PORT_INDEPENDENT;
+
rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
-port_id, &mport);
+port_id, type_mask, &mport);
if (rc != 0) {
sfc_err(sa, "failed to get m-port for the given ethdev 
(port_id=%u): %s",
port_id, strerror(rc));
@@ -3558,14 +3567,14 @@ sfc_mae_rule_parse_action_port_id(struct sfc_adapter 
*sa,
 static int
 sfc_mae_rule_parse_action_port_representor(struct sfc_adapter *sa,
const struct rte_flow_action_ethdev *conf,
-   efx_mae_actions_t *spec)
+   unsigned int type_mask, efx_mae_actions_t *spec)
 {
struct sfc_mae *mae = &sa->mae;
efx_mport_sel_t mport;
int rc;
 
rc = sfc_mae_switch_get_ethdev_mport(mae->switch_domain_id,
-conf->port_id, &mport);
+conf->port_id, type_mask, &mport);
if (rc != 0) {
sfc_err(sa, "failed to get m-port for the given ethdev 
(port_id=%u): %s",
conf->port_id, strerror(rc));
@@ -3641,6 +3650,7 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
const struct sfc_mae_outer_rule *outer_rule = spec_mae->outer_rule;
const uint64_t rx_metadata = sa->negotiated_rx_metadata;
   

[PATCH v3 15/34] net/sfc: extend generic flow API to allow for internal flows

2023-06-04 Thread Ivan Malov
At the moment, driver-internal flow rules are provisioned by
functions that are separate from the generic flow management
framework. In order to use the latter for such rules, extend
it accordingly. This will be actually used in the next patch.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc.c  |  9 -
 drivers/net/sfc/sfc_flow.c | 37 +++--
 drivers/net/sfc/sfc_flow.h |  5 -
 drivers/net/sfc/sfc_mae.c  | 31 +++
 drivers/net/sfc/sfc_mae.h  |  5 +++--
 5 files changed, 65 insertions(+), 22 deletions(-)

diff --git a/drivers/net/sfc/sfc.c b/drivers/net/sfc/sfc.c
index a56521696a..2cfff20f47 100644
--- a/drivers/net/sfc/sfc.c
+++ b/drivers/net/sfc/sfc.c
@@ -975,6 +975,8 @@ sfc_attach(struct sfc_adapter *sa)
if (rc != 0)
goto fail_rss_attach;
 
+   sfc_flow_init(sa);
+
rc = sfc_flow_rss_attach(sa);
if (rc != 0)
goto fail_flow_rss_attach;
@@ -1006,8 +1008,6 @@ sfc_attach(struct sfc_adapter *sa)
sfc_log_init(sa, "fini nic");
efx_nic_fini(enp);
 
-   sfc_flow_init(sa);
-
rc = sfc_sw_xstats_init(sa);
if (rc != 0)
goto fail_sw_xstats_init;
@@ -1030,7 +1030,6 @@ sfc_attach(struct sfc_adapter *sa)
sfc_sw_xstats_close(sa);
 
 fail_sw_xstats_init:
-   sfc_flow_fini(sa);
sfc_repr_proxy_detach(sa);
 
 fail_repr_proxy_attach:
@@ -1052,6 +1051,7 @@ sfc_attach(struct sfc_adapter *sa)
sfc_flow_rss_detach(sa);
 
 fail_flow_rss_attach:
+   sfc_flow_fini(sa);
sfc_rss_detach(sa);
 
 fail_rss_attach:
@@ -1099,8 +1099,6 @@ sfc_detach(struct sfc_adapter *sa)
 
sfc_sriov_vswitch_destroy(sa);
 
-   sfc_flow_fini(sa);
-
sfc_repr_proxy_detach(sa);
sfc_mae_switchdev_fini(sa);
sfc_tbls_detach(sa);
@@ -1108,6 +1106,7 @@ sfc_detach(struct sfc_adapter *sa)
sfc_mae_counter_rxq_detach(sa);
sfc_filter_detach(sa);
sfc_flow_rss_detach(sa);
+   sfc_flow_fini(sa);
sfc_rss_detach(sa);
sfc_port_detach(sa);
sfc_ev_detach(sa);
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 432295ea62..f6d1ae2a5b 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -7,6 +7,8 @@
  * for Solarflare) and Solarflare Communications, Inc.
  */
 
+#include 
+
 #include 
 #include 
 #include 
@@ -2405,7 +2407,7 @@ sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
if (rc != 0)
goto fail;
 
-   rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
+   rc = sfc_mae_rule_parse_pattern(sa, pattern, flow, error);
if (rc != 0)
goto fail;
 
@@ -2421,7 +2423,7 @@ sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
 */
}
 
-   rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
+   rc = sfc_mae_rule_parse_actions(sa, actions, flow, error);
if (rc != 0)
goto fail;
 
@@ -2613,14 +2615,14 @@ sfc_flow_create(struct rte_eth_dev *dev,
struct rte_flow *flow;
 
sfc_adapter_lock(sa);
-   flow = sfc_flow_create_locked(sa, attr, pattern, actions, error);
+   flow = sfc_flow_create_locked(sa, false, attr, pattern, actions, error);
sfc_adapter_unlock(sa);
 
return flow;
 }
 
 struct rte_flow *
-sfc_flow_create_locked(struct sfc_adapter *sa,
+sfc_flow_create_locked(struct sfc_adapter *sa, bool internal,
   const struct rte_flow_attr *attr,
   const struct rte_flow_item pattern[],
   const struct rte_flow_action actions[],
@@ -2635,13 +2637,15 @@ sfc_flow_create_locked(struct sfc_adapter *sa,
if (flow == NULL)
goto fail_no_mem;
 
+   flow->internal = internal;
+
rc = sfc_flow_parse(sa->eth_dev, attr, pattern, actions, flow, error);
if (rc != 0)
goto fail_bad_value;
 
TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
 
-   if (sa->state == SFC_ETHDEV_STARTED) {
+   if (flow->internal || sa->state == SFC_ETHDEV_STARTED) {
rc = sfc_flow_insert(sa, flow, error);
if (rc != 0)
goto fail_flow_insert;
@@ -2694,7 +2698,7 @@ sfc_flow_destroy_locked(struct sfc_adapter *sa, struct 
rte_flow *flow,
goto fail_bad_value;
}
 
-   if (sa->state == SFC_ETHDEV_STARTED)
+   if (flow->internal || sa->state == SFC_ETHDEV_STARTED)
rc = sfc_flow_remove(sa, flow, error);
 
TAILQ_REMOVE(&sa->flow_list, flow, entries);
@@ -2711,10 +2715,14 @@ sfc_flow_flush(struct rte_eth_dev *dev,
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct rte_flow *flow;
int ret = 0;
+   void *tmp;
 
sfc_adapter_lock(sa);
 
-   while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
+   RTE_TAILQ_F

[PATCH v3 16/34] net/sfc: switch driver-internal flows to use generic methods

2023-06-04 Thread Ivan Malov
Doing so helps to consolidate flow operation and ensure that
every FW-allocatable resource can be shared by several flows.
That is useful in the light of upcoming support for embedded
conntrack assistance, where several flows will ideally share
everything but unique 5-tuple entries in the conntrack table.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c| 186 +++
 drivers/net/sfc/sfc_mae.h|  51 ++---
 drivers/net/sfc/sfc_repr_proxy.c |  38 ++-
 drivers/net/sfc/sfc_repr_proxy.h |   2 +-
 4 files changed, 61 insertions(+), 216 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index f7bf682c11..51b2a22357 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -74,137 +74,48 @@ sfc_mae_counter_registry_fini(struct 
sfc_mae_counter_registry *registry)
sfc_mae_counters_fini(®istry->counters);
 }
 
-static int
-sfc_mae_internal_rule_find_empty_slot(struct sfc_adapter *sa,
- struct sfc_mae_rule **rule)
-{
-   struct sfc_mae *mae = &sa->mae;
-   struct sfc_mae_internal_rules *internal_rules = &mae->internal_rules;
-   unsigned int entry;
-   int rc;
-
-   for (entry = 0; entry < SFC_MAE_NB_RULES_MAX; entry++) {
-   if (internal_rules->rules[entry].spec == NULL)
-   break;
-   }
-
-   if (entry == SFC_MAE_NB_RULES_MAX) {
-   rc = ENOSPC;
-   sfc_err(sa, "failed too many rules (%u rules used)", entry);
-   goto fail_too_many_rules;
-   }
-
-   *rule = &internal_rules->rules[entry];
-
-   return 0;
-
-fail_too_many_rules:
-   return rc;
-}
-
-int
-sfc_mae_rule_add_mport_match_deliver(struct sfc_adapter *sa,
-const efx_mport_sel_t *mport_match,
-const efx_mport_sel_t *mport_deliver,
-int prio, struct sfc_mae_rule **rulep)
+struct rte_flow *
+sfc_mae_repr_flow_create(struct sfc_adapter *sa, int prio, uint16_t port_id,
+enum rte_flow_action_type dst_type,
+enum rte_flow_item_type src_type)
 {
+   const struct rte_flow_item_ethdev item_spec = { .port_id = port_id };
+   const struct rte_flow_action_ethdev action = { .port_id = port_id };
+   const void *item_mask = &rte_flow_item_ethdev_mask;
+   struct rte_flow_attr attr = { .transfer = 1 };
+   const struct rte_flow_action actions[] = {
+   { .type = dst_type, .conf = &action },
+   { .type = RTE_FLOW_ACTION_TYPE_END }
+   };
+   const struct rte_flow_item items[] = {
+   { .type = src_type, .mask = item_mask, .spec = &item_spec },
+   { .type = RTE_FLOW_ITEM_TYPE_END }
+   };
struct sfc_mae *mae = &sa->mae;
-   struct sfc_mae_rule *rule;
-   int rc;
-
-   sfc_log_init(sa, "entry");
+   struct rte_flow_error error;
 
if (prio > 0 && (unsigned int)prio >= mae->nb_action_rule_prios_max) {
-   rc = EINVAL;
sfc_err(sa, "failed: invalid priority %d (max %u)", prio,
mae->nb_action_rule_prios_max);
-   goto fail_invalid_prio;
+   return NULL;
}
if (prio < 0)
prio = mae->nb_action_rule_prios_max - 1;
 
-   rc = sfc_mae_internal_rule_find_empty_slot(sa, &rule);
-   if (rc != 0)
-   goto fail_find_empty_slot;
-
-   sfc_log_init(sa, "init MAE match spec");
-   rc = efx_mae_match_spec_init(sa->nic, EFX_MAE_RULE_ACTION,
-(uint32_t)prio, &rule->spec);
-   if (rc != 0) {
-   sfc_err(sa, "failed to init MAE match spec");
-   goto fail_match_init;
-   }
-
-   rc = efx_mae_match_spec_mport_set(rule->spec, mport_match, NULL);
-   if (rc != 0) {
-   sfc_err(sa, "failed to get MAE match mport selector");
-   goto fail_mport_set;
-   }
-
-   rc = efx_mae_action_set_spec_init(sa->nic, &rule->actions);
-   if (rc != 0) {
-   sfc_err(sa, "failed to init MAE action set");
-   goto fail_action_init;
-   }
-
-   rc = efx_mae_action_set_populate_deliver(rule->actions,
-mport_deliver);
-   if (rc != 0) {
-   sfc_err(sa, "failed to populate deliver action");
-   goto fail_populate_deliver;
-   }
-
-   rc = efx_mae_action_set_alloc(sa->nic, rule->actions,
- &rule->action_set);
-   if (rc != 0) {
-   sfc_err(sa, "failed to allocate action set");
-   goto fail_action_set_alloc;
-   }
-
-   rc = efx_mae_action_rule_insert(sa->nic, rule->spec, NULL,
-   &rule->action_set,
-  

[PATCH v3 17/34] net/sfc: move MAE flow parsing method to MAE-specific source

2023-06-04 Thread Ivan Malov
Doing so will facilitate easier code restructure in the next
patches required to rework flow housekeeping and indirection.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_flow.c | 47 +-
 drivers/net/sfc/sfc_mae.c  | 58 --
 drivers/net/sfc/sfc_mae.h  | 14 -
 3 files changed, 63 insertions(+), 56 deletions(-)

diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index f6d1ae2a5b..6dfbbfd022 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -2395,53 +2395,8 @@ sfc_flow_parse_rte_to_mae(struct rte_eth_dev *dev,
  struct rte_flow_error *error)
 {
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
-   struct sfc_flow_spec *spec = &flow->spec;
-   struct sfc_flow_spec_mae *spec_mae = &spec->mae;
-   int rc;
-
-   /*
-* If the flow is meant to be a TUNNEL rule in a FT context,
-* preparse its actions and save its properties in spec_mae.
-*/
-   rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
-   if (rc != 0)
-   goto fail;
-
-   rc = sfc_mae_rule_parse_pattern(sa, pattern, flow, error);
-   if (rc != 0)
-   goto fail;
-
-   if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
-   /*
-* By design, this flow should be represented solely by the
-* outer rule. But the HW/FW hasn't got support for setting
-* Rx mark from RECIRC_ID on outer rule lookup yet. Neither
-* does it support outer rule counters. As a workaround, an
-* action rule of lower priority is used to do the job.
-*
-* So don't skip sfc_mae_rule_parse_actions() below.
-*/
-   }
-
-   rc = sfc_mae_rule_parse_actions(sa, actions, flow, error);
-   if (rc != 0)
-   goto fail;
-
-   if (spec_mae->ft_ctx != NULL) {
-   if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
-   spec_mae->ft_ctx->tunnel_rule_is_set = B_TRUE;
 
-   ++(spec_mae->ft_ctx->refcnt);
-   }
-
-   return 0;
-
-fail:
-   /* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
-   spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
-   spec_mae->ft_ctx = NULL;
-
-   return rc;
+   return sfc_mae_rule_parse(sa, pattern, actions, flow, error);
 }
 
 static int
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index 51b2a22357..e2f098ea53 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -2745,7 +2745,7 @@ sfc_mae_rule_encap_parse_fini(struct sfc_adapter *sa,
efx_mae_match_spec_fini(sa->nic, ctx->match_spec_outer);
 }
 
-int
+static int
 sfc_mae_rule_parse_pattern(struct sfc_adapter *sa,
   const struct rte_flow_item pattern[],
   struct rte_flow *flow,
@@ -3770,7 +3770,7 @@ sfc_mae_process_encap_header(struct sfc_adapter *sa,
return sfc_mae_encap_header_add(sa, bounce_eh, encap_headerp);
 }
 
-int
+static int
 sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
   const struct rte_flow_action actions[],
   struct rte_flow *flow,
@@ -3933,6 +3933,60 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
return rc;
 }
 
+int
+sfc_mae_rule_parse(struct sfc_adapter *sa, const struct rte_flow_item 
pattern[],
+  const struct rte_flow_action actions[],
+  struct rte_flow *flow, struct rte_flow_error *error)
+{
+   struct sfc_flow_spec *spec = &flow->spec;
+   struct sfc_flow_spec_mae *spec_mae = &spec->mae;
+   int rc;
+
+   /*
+* If the flow is meant to be a TUNNEL rule in a FT context,
+* preparse its actions and save its properties in spec_mae.
+*/
+   rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
+   if (rc != 0)
+   goto fail;
+
+   rc = sfc_mae_rule_parse_pattern(sa, pattern, flow, error);
+   if (rc != 0)
+   goto fail;
+
+   if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
+   /*
+* By design, this flow should be represented solely by the
+* outer rule. But the HW/FW hasn't got support for setting
+* Rx mark from RECIRC_ID on outer rule lookup yet. Neither
+* does it support outer rule counters. As a workaround, an
+* action rule of lower priority is used to do the job.
+*
+* So don't skip sfc_mae_rule_parse_actions() below.
+*/
+   }
+
+   rc = sfc_mae_rule_parse_actions(sa, actions, flow, error);
+   if (rc != 0)
+   goto fail;
+
+   if (spec_mae->ft_ctx != NULL) {
+   if (spec_mae->ft_rule_type == SFC_FT_RU

[PATCH v3 19/34] net/sfc: prepare MAE outer rules for action rule indirection

2023-06-04 Thread Ivan Malov
Flows provided by match-action engine (MAE) will be reworked
by the next patch to make action rule (AR) entries shareable.
To ensure correct AR specification comparison on attach path,
augment the way outer rules (OR) are handled, namely, how OR
IDs are indicated in a AR specification on parse and disable.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c | 44 ---
 1 file changed, 32 insertions(+), 12 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index 37292f5d7c..624be53269 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -397,8 +397,10 @@ sfc_mae_outer_rule_enable(struct sfc_adapter *sa,
 
 static void
 sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
-  struct sfc_mae_outer_rule *rule)
+  struct sfc_mae_outer_rule *rule,
+  efx_mae_match_spec_t *match_spec_action)
 {
+   efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
struct sfc_mae_fw_rsrc *fw_rsrc;
int rc;
 
@@ -409,6 +411,18 @@ sfc_mae_outer_rule_disable(struct sfc_adapter *sa,
 
fw_rsrc = &rule->fw_rsrc;
 
+   if (match_spec_action == NULL)
+   goto skip_action_rule;
+
+   rc = efx_mae_match_spec_outer_rule_id_set(match_spec_action,
+ &invalid_rule_id);
+   if (rc != 0) {
+   sfc_err(sa, "cannot restore match on invalid outer rule ID: %s",
+   strerror(rc));
+   return;
+   }
+
+skip_action_rule:
if (fw_rsrc->rule_id.id == EFX_MAE_RSRC_ID_INVALID ||
fw_rsrc->refcnt == 0) {
sfc_err(sa, "failed to disable outer_rule=%p: already disabled; 
OR_ID=0x%08x, refcnt=%u",
@@ -2457,7 +2471,7 @@ sfc_mae_rule_process_outer(struct sfc_adapter *sa,
   struct sfc_mae_outer_rule **rulep,
   struct rte_flow_error *error)
 {
-   efx_mae_rule_id_t invalid_rule_id = { .id = EFX_MAE_RSRC_ID_INVALID };
+   efx_mae_rule_id_t or_id = { .id = EFX_MAE_RSRC_ID_INVALID };
int rc;
 
if (ctx->internal) {
@@ -2504,13 +2518,20 @@ sfc_mae_rule_process_outer(struct sfc_adapter *sa,
/* The spec has now been tracked by the outer rule entry. */
ctx->match_spec_outer = NULL;
 
+   or_id.id = (*rulep)->fw_rsrc.rule_id.id;
+
 no_or_id:
switch (ctx->ft_rule_type) {
case SFC_FT_RULE_NONE:
break;
case SFC_FT_RULE_TUNNEL:
-   /* No action rule */
-   return 0;
+   /*
+* Workaround. TUNNEL flows are not supposed to involve
+* MAE action rules, but, due to the currently limited
+* HW/FW implementation, action rules are still needed.
+* See sfc_mae_rule_parse_pattern().
+*/
+   break;
case SFC_FT_RULE_SWITCH:
/*
 * Match on recirculation ID rather than
@@ -2536,14 +2557,13 @@ sfc_mae_rule_process_outer(struct sfc_adapter *sa,
 * outer rule table. Set OR_ID match field to 0x/0x
 * in the action rule specification; this ensures correct behaviour.
 *
-* If, on the other hand, this flow does have an outer rule, its ID
-* may be unknown at the moment (not yet allocated), but OR_ID mask
-* has to be set to 0x anyway for correct class comparisons.
-* When the outer rule has been allocated, this match field will be
-* overridden by sfc_mae_outer_rule_enable() to use the right value.
+* If, however, this flow does have an outer rule, OR_ID match must
+* be set to the currently known value for that outer rule. It will
+* be either 0x or some valid ID, depending on whether this
+* outer rule is currently active (adapter state is STARTED) or not.
 */
rc = efx_mae_match_spec_outer_rule_id_set(ctx->match_spec_action,
- &invalid_rule_id);
+ &or_id);
if (rc != 0) {
sfc_mae_outer_rule_del(sa, *rulep);
*rulep = NULL;
@@ -4170,7 +4190,7 @@ sfc_mae_flow_insert(struct sfc_adapter *sa,
sfc_mae_action_set_disable(sa, action_set);
 
 fail_action_set_enable:
-   sfc_mae_outer_rule_disable(sa, outer_rule);
+   sfc_mae_outer_rule_disable(sa, outer_rule, spec_mae->match_spec);
 
 fail_outer_rule_enable:
return rc;
@@ -4205,7 +4225,7 @@ sfc_mae_flow_remove(struct sfc_adapter *sa,
sfc_mae_action_set_disable(sa, action_set);
 
 skip_action_rule:
-   sfc_mae_outer_rule_disable(sa, outer_rule);
+   sfc_mae_outer_rule_disable(sa, outer_rule, spec_mae->match_spec);
 
return 0;
 }
-- 
2.30.2



[PATCH v3 18/34] net/sfc: move MAE counter stream start to action set handler

2023-06-04 Thread Ivan Malov
Logically, starting flow counter streaming belongs in action
set enable path. Move it there as a preparation step for the
patch that will make action rules shareable by several flows.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c | 22 --
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index e2f098ea53..37292f5d7c 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -1063,6 +1063,18 @@ sfc_mae_action_set_enable(struct sfc_adapter *sa,
return rc;
}
 
+   if (action_set->n_counters > 0) {
+   rc = sfc_mae_counter_start(sa);
+   if (rc != 0) {
+   sfc_err(sa, "failed to start MAE counters 
support: %s",
+   rte_strerror(rc));
+   sfc_mae_encap_header_disable(sa, encap_header);
+   sfc_mae_mac_addr_disable(sa, src_mac_addr);
+   sfc_mae_mac_addr_disable(sa, dst_mac_addr);
+   return rc;
+   }
+   }
+
rc = sfc_mae_counters_enable(sa, counters,
 action_set->n_counters,
 action_set->spec);
@@ -4141,15 +4153,6 @@ sfc_mae_flow_insert(struct sfc_adapter *sa,
if (rc != 0)
goto fail_action_set_enable;
 
-   if (action_set->n_counters > 0) {
-   rc = sfc_mae_counter_start(sa);
-   if (rc != 0) {
-   sfc_err(sa, "failed to start MAE counters support: %s",
-   rte_strerror(rc));
-   goto fail_mae_counter_start;
-   }
-   }
-
fw_rsrc = &action_set->fw_rsrc;
 
rc = efx_mae_action_rule_insert(sa->nic, spec_mae->match_spec,
@@ -4164,7 +4167,6 @@ sfc_mae_flow_insert(struct sfc_adapter *sa,
return 0;
 
 fail_action_rule_insert:
-fail_mae_counter_start:
sfc_mae_action_set_disable(sa, action_set);
 
 fail_action_set_enable:
-- 
2.30.2



[PATCH v3 20/34] net/sfc: turn MAE flow action rules into shareable resources

2023-06-04 Thread Ivan Malov
Later patches of the series provide support for HW conntrack
assistance. With the new feature, multiple flows that differ
in the 5-tuple match fields but are otherwise identical will
be able to share all FW-allocatable objects except for those
of the conntrack table. That will boost flow engine capacity.

To prepare for that, action rules of the match-action engine
have to be turned into shareable objects, from SW standpoint.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_flow.c |   4 +-
 drivers/net/sfc/sfc_flow.h |  13 +-
 drivers/net/sfc/sfc_mae.c  | 362 +++--
 drivers/net/sfc/sfc_mae.h  |  13 ++
 4 files changed, 287 insertions(+), 105 deletions(-)

diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 6dfbbfd022..0abeabfbf2 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -1294,9 +1294,7 @@ sfc_flow_parse_attr(struct sfc_adapter *sa,
}
spec->type = SFC_FLOW_SPEC_MAE;
spec_mae->priority = attr->priority;
-   spec_mae->match_spec = NULL;
-   spec_mae->action_set = NULL;
-   spec_mae->rule_id.id = EFX_MAE_RSRC_ID_INVALID;
+   spec_mae->action_rule = NULL;
}
 
return 0;
diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index ec5e29f257..10c73d012f 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -75,14 +75,13 @@ struct sfc_flow_spec_mae {
struct sfc_ft_ctx   *ft_ctx;
/* Desired priority level */
unsigned intpriority;
-   /* Outer rule registry entry */
+   /*
+* Outer rule registry entry (points to below action_rule->outer_rule
+* when action_rule is not NULL; self-sufficient entry otherwise)
+*/
struct sfc_mae_outer_rule   *outer_rule;
-   /* EFX match specification */
-   efx_mae_match_spec_t*match_spec;
-   /* Action set registry entry */
-   struct sfc_mae_action_set   *action_set;
-   /* Firmware-allocated rule ID */
-   efx_mae_rule_id_t   rule_id;
+   /* Action rule registry entry */
+   struct sfc_mae_action_rule  *action_rule;
 };
 
 /* Flow specification */
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index 624be53269..addcad2843 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -204,6 +204,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
TAILQ_INIT(&mae->mac_addrs);
TAILQ_INIT(&mae->encap_headers);
TAILQ_INIT(&mae->action_sets);
+   TAILQ_INIT(&mae->action_rules);
 
if (encp->enc_mae_admin)
mae->status = SFC_MAE_STATUS_ADMIN;
@@ -1172,6 +1173,200 @@ sfc_mae_action_set_disable(struct sfc_adapter *sa,
--(fw_rsrc->refcnt);
 }
 
+struct sfc_mae_action_rule_ctx {
+   struct sfc_mae_outer_rule   *outer_rule;
+   struct sfc_mae_action_set   *action_set;
+   efx_mae_match_spec_t*match_spec;
+};
+
+static int
+sfc_mae_action_rule_attach(struct sfc_adapter *sa,
+  const struct sfc_mae_action_rule_ctx *ctx,
+  struct sfc_mae_action_rule **rulep,
+  __rte_unused struct rte_flow_error *error)
+{
+   struct sfc_mae_action_rule *rule;
+
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+   /*
+* It is assumed that the caller of this helper has already properly
+* tailored ctx->match_spec to match on OR_ID / 0x (when
+* ctx->outer_rule refers to a currently active outer rule) or
+* on 0x / 0x, so that specs compare correctly.
+*/
+   TAILQ_FOREACH(rule, &sa->mae.action_rules, entries) {
+   if (rule->outer_rule != ctx->outer_rule ||
+   rule->action_set != ctx->action_set)
+   continue;
+
+   if (efx_mae_match_specs_equal(rule->match_spec,
+ ctx->match_spec)) {
+   sfc_dbg(sa, "attaching to action_rule=%p", rule);
+   ++(rule->refcnt);
+   *rulep = rule;
+   return 0;
+   }
+   }
+
+   /*
+* No need to set RTE error, as this
+* code should be handled gracefully.
+*/
+   return -ENOENT;
+}
+
+static int
+sfc_mae_action_rule_add(struct sfc_adapter *sa,
+   const struct sfc_mae_action_rule_ctx *ctx,
+   struct sfc_mae_action_rule **rulep)
+{
+   struct sfc_mae_action_rule *rule;
+   struct sfc_mae *mae = &sa->mae;
+
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
+
+   rule = rte_zmalloc("sfc_mae_action_rule", sizeof(*rule), 0);
+   if (rule == NULL)
+   return ENOMEM;
+
+   rule->refcnt = 1;
+   rule->outer_ru

[PATCH v3 21/34] common/sfc_efx/base: provide an API to clone MAE match specs

2023-06-04 Thread Ivan Malov
The DPDK driver would like to have a means to make a copy of
the action rule match specification before trying to dissect
it to possibly move out the per-connection 5-tuple data from
it to build up an entry in the HW conntrack assistance table.

Making such a copy at the end of parsing should be preferred
over maintaining DPDK-level structures because the resulting
code is easier on eyes and less prone to errors in this case.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h |  7 +++
 drivers/common/sfc_efx/base/efx_mae.c | 26 ++
 drivers/common/sfc_efx/version.map|  1 +
 3 files changed, 34 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index f96e398460..ee1ea81a35 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4452,6 +4452,13 @@ efx_mae_match_spec_recirc_id_set(
__inefx_mae_match_spec_t *spec,
__inuint8_t recirc_id);
 
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_match_spec_clone(
+   __inefx_nic_t *enp,
+   __inefx_mae_match_spec_t *orig,
+   __out   efx_mae_match_spec_t **clonep);
+
 LIBEFX_API
 extern __checkReturn   boolean_t
 efx_mae_match_specs_equal(
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index 7732d2..43dfba518a 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -1163,6 +1163,32 @@ efx_mae_match_spec_mport_set(
 
 fail2:
EFSYS_PROBE(fail2);
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   efx_rc_t
+efx_mae_match_spec_clone(
+   __inefx_nic_t *enp,
+   __inefx_mae_match_spec_t *orig,
+   __out   efx_mae_match_spec_t **clonep)
+{
+   efx_mae_match_spec_t *clone;
+   efx_rc_t rc;
+
+   EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (*clone), clone);
+   if (clone == NULL) {
+   rc = ENOMEM;
+   goto fail1;
+   }
+
+   memcpy(clone, orig, sizeof (efx_mae_match_spec_t));
+
+   *clonep = clone;
+
+   return (0);
+
 fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
diff --git a/drivers/common/sfc_efx/version.map 
b/drivers/common/sfc_efx/version.map
index d083a54a03..931d556e80 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -124,6 +124,7 @@ INTERNAL {
efx_mae_mac_addr_alloc;
efx_mae_mac_addr_free;
efx_mae_match_spec_bit_set;
+   efx_mae_match_spec_clone;
efx_mae_match_spec_field_set;
efx_mae_match_spec_fini;
efx_mae_match_spec_init;
-- 
2.30.2



[PATCH v3 22/34] common/sfc_efx/base: add API to read back MAE match criteria

2023-06-04 Thread Ivan Malov
Later patches of the series provide support for HW conntrack
assistance in the DPDK driver. In order to detect flows that
are subject to such assistance, the driver needs to retrieve
5-tuple match data from an already constructed specification.

A dedicated API to selectively read back match criteria will
make a neat solution to keep the implementation less complex.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h |  10 ++
 drivers/common/sfc_efx/base/efx_mae.c | 131 ++
 drivers/common/sfc_efx/version.map|   1 +
 3 files changed, 142 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index ee1ea81a35..8c6095f747 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4430,6 +4430,16 @@ efx_mae_match_spec_field_set(
__insize_t mask_size,
__in_bcount(mask_size)  const uint8_t *mask);
 
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_match_spec_field_get(
+   __inconst efx_mae_match_spec_t *spec,
+   __inefx_mae_field_id_t field_id,
+   __insize_t value_size,
+   __out_bcount_opt(value_size)uint8_t *value,
+   __insize_t mask_size,
+   __out_bcount_opt(mask_size) uint8_t *mask);
+
 /* The corresponding mask will be set to B_TRUE. */
 LIBEFX_API
 extern __checkReturn   efx_rc_t
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index 43dfba518a..011f38d298 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -1054,6 +1054,137 @@ efx_mae_match_spec_field_set(
 
return (0);
 
+fail5:
+   EFSYS_PROBE(fail5);
+fail4:
+   EFSYS_PROBE(fail4);
+fail3:
+   EFSYS_PROBE(fail3);
+fail2:
+   EFSYS_PROBE(fail2);
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   efx_rc_t
+efx_mae_match_spec_field_get(
+   __inconst efx_mae_match_spec_t *spec,
+   __inefx_mae_field_id_t field_id,
+   __insize_t value_size,
+   __out_bcount_opt(value_size)uint8_t *value,
+   __insize_t mask_size,
+   __out_bcount_opt(mask_size) uint8_t *mask)
+{
+   const efx_mae_mv_desc_t *descp;
+   unsigned int desc_set_nentries;
+   const uint8_t *mvp;
+   efx_rc_t rc;
+
+   switch (spec->emms_type) {
+   case EFX_MAE_RULE_OUTER:
+   desc_set_nentries =
+   EFX_ARRAY_SIZE(__efx_mae_outer_rule_mv_desc_set);
+   descp = &__efx_mae_outer_rule_mv_desc_set[field_id];
+   mvp = spec->emms_mask_value_pairs.outer;
+   break;
+   case EFX_MAE_RULE_ACTION:
+   desc_set_nentries =
+   EFX_ARRAY_SIZE(__efx_mae_action_rule_mv_desc_set);
+   descp = &__efx_mae_action_rule_mv_desc_set[field_id];
+   mvp = spec->emms_mask_value_pairs.action;
+   break;
+   default:
+   rc = ENOTSUP;
+   goto fail1;
+   }
+
+   if ((unsigned int)field_id >= desc_set_nentries) {
+   rc = EINVAL;
+   goto fail2;
+   }
+
+   if (descp->emmd_mask_size == 0) {
+   /* The ID points to a gap in the array of field descriptors. */
+   rc = EINVAL;
+   goto fail3;
+   }
+
+   if (value != NULL && value_size != descp->emmd_value_size) {
+   rc = EINVAL;
+   goto fail4;
+   }
+
+   if (mask != NULL && mask_size != descp->emmd_mask_size) {
+   rc = EINVAL;
+   goto fail5;
+   }
+
+   if (value == NULL && value_size != 0) {
+   rc = EINVAL;
+   goto fail6;
+   }
+
+   if (mask == NULL && mask_size != 0) {
+   rc = EINVAL;
+   goto fail7;
+   }
+
+   if (descp->emmd_endianness == EFX_MAE_FIELD_BE) {
+   /*
+* The MCDI request field is in network (big endian) order.
+* The mask/value are also big endian.
+*/
+   memcpy(value, mvp + descp->emmd_value_offset, value_size);
+   memcpy(mask, mvp + descp->emmd_mask_offset, mask_size);
+   } else {
+   efx_dword_t dword;
+
+   /*
+* The MCDI request field is little endian.
+* The mask/value are in host byte order.
+*/
+   switch (value_size) {
+   case 4:
+   memcpy(&dword, mvp + descp->emmd_value_offset,
+   sizeof (dword));
+
+   

[PATCH v3 23/34] common/sfc_efx/base: match on conntrack mark in action rules

2023-06-04 Thread Ivan Malov
EF100 match-action engine (MAE) has conntrack assistance
table. A hit in this table can provide a mark value for
the following lookup stage, which is action rule lookup.

Provide support for setting match on conntrack mark.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h |  7 +++
 drivers/common/sfc_efx/base/efx_mae.c | 28 +++
 drivers/common/sfc_efx/version.map|  1 +
 3 files changed, 36 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 8c6095f747..dd9d4f29e8 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4276,6 +4276,7 @@ typedef enum efx_mae_field_id_e {
 * or by using dedicated field-specific helper APIs.
 */
EFX_MAE_FIELD_RECIRC_ID,
+   EFX_MAE_FIELD_CT_MARK,
EFX_MAE_FIELD_NIDS
 } efx_mae_field_id_t;
 
@@ -4462,6 +4463,12 @@ efx_mae_match_spec_recirc_id_set(
__inefx_mae_match_spec_t *spec,
__inuint8_t recirc_id);
 
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_match_spec_ct_mark_set(
+   __inefx_mae_match_spec_t *spec,
+   __inuint32_t ct_mark);
+
 LIBEFX_API
 extern __checkReturn   efx_rc_t
 efx_mae_match_spec_clone(
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index 011f38d298..b00ed2ec7a 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -474,6 +474,7 @@ typedef enum efx_mae_field_cap_id_e {
EFX_MAE_FIELD_ID_ENC_HAS_OVLAN = MAE_FIELD_ENC_HAS_OVLAN,
EFX_MAE_FIELD_ID_ENC_HAS_IVLAN = MAE_FIELD_ENC_HAS_IVLAN,
EFX_MAE_FIELD_ID_RECIRC_ID = MAE_FIELD_RECIRC_ID,
+   EFX_MAE_FIELD_ID_CT_MARK = MAE_FIELD_CT_MARK,
 
EFX_MAE_FIELD_CAP_NIDS
 } efx_mae_field_cap_id_t;
@@ -549,6 +550,7 @@ static const efx_mae_mv_desc_t 
__efx_mae_action_rule_mv_desc_set[] = {
EFX_MAE_MV_DESC(ENC_VNET_ID_BE, EFX_MAE_FIELD_BE),
EFX_MAE_MV_DESC(OUTER_RULE_ID, EFX_MAE_FIELD_LE),
EFX_MAE_MV_DESC(RECIRC_ID, EFX_MAE_FIELD_LE),
+   EFX_MAE_MV_DESC(CT_MARK, EFX_MAE_FIELD_LE),
 
 #undef EFX_MAE_MV_DESC
 };
@@ -910,6 +912,32 @@ efx_mae_match_spec_recirc_id_set(
 
return (0);
 
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   efx_rc_t
+efx_mae_match_spec_ct_mark_set(
+   __inefx_mae_match_spec_t *spec,
+   __inuint32_t ct_mark)
+{
+   uint32_t full_mask = UINT32_MAX;
+   const uint8_t *vp;
+   const uint8_t *mp;
+   efx_rc_t rc;
+
+   mp = (const uint8_t *)&full_mask;
+   vp = (const uint8_t *)&ct_mark;
+
+   rc = efx_mae_match_spec_field_set(spec, EFX_MAE_FIELD_CT_MARK,
+ sizeof (ct_mark), vp,
+ sizeof (full_mask), mp);
+   if (rc != 0)
+   goto fail1;
+
+   return (0);
+
 fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
diff --git a/drivers/common/sfc_efx/version.map 
b/drivers/common/sfc_efx/version.map
index b1ca8e1215..d972896210 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -125,6 +125,7 @@ INTERNAL {
efx_mae_mac_addr_free;
efx_mae_match_spec_bit_set;
efx_mae_match_spec_clone;
+   efx_mae_match_spec_ct_mark_set;
efx_mae_match_spec_field_get;
efx_mae_match_spec_field_set;
efx_mae_match_spec_fini;
-- 
2.30.2



[PATCH v3 24/34] common/sfc_efx/base: add API to request MAE conntrack lookup

2023-06-04 Thread Ivan Malov
Such can be initiated when a packet hits an outer rule.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h  |  9 +
 drivers/common/sfc_efx/base/efx_impl.h |  1 +
 drivers/common/sfc_efx/base/efx_mae.c  | 26 ++
 drivers/common/sfc_efx/version.map |  1 +
 4 files changed, 37 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index dd9d4f29e8..99ef0ce957 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4679,6 +4679,15 @@ efx_mae_outer_rule_recirc_id_set(
__inefx_mae_match_spec_t *spec,
__inuint8_t recirc_id);
 
+/*
+ * Request that packets hitting this rule be submitted
+ * for a lookup in the conntrack assistance table.
+ */
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_outer_rule_do_ct_set(
+   __inefx_mae_match_spec_t *spec);
+
 LIBEFX_API
 extern __checkReturn   efx_rc_t
 efx_mae_outer_rule_insert(
diff --git a/drivers/common/sfc_efx/base/efx_impl.h 
b/drivers/common/sfc_efx/base/efx_impl.h
index 9a5d465fa0..0a6a489d2c 100644
--- a/drivers/common/sfc_efx/base/efx_impl.h
+++ b/drivers/common/sfc_efx/base/efx_impl.h
@@ -1761,6 +1761,7 @@ struct efx_mae_match_spec_s {
uint8_t outer[MAE_ENC_FIELD_PAIRS_LEN];
} emms_mask_value_pairs;
uint8_t emms_outer_rule_recirc_id;
+   boolean_t   emms_outer_rule_do_ct;
 };
 
 typedef enum efx_mae_action_e {
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index b00ed2ec7a..546c743a02 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -2369,6 +2369,26 @@ efx_mae_outer_rule_recirc_id_set(
 
return (0);
 
+fail1:
+   EFSYS_PROBE1(fail1, efx_rc_t, rc);
+   return (rc);
+}
+
+   __checkReturn   efx_rc_t
+efx_mae_outer_rule_do_ct_set(
+   __inefx_mae_match_spec_t *spec)
+{
+   efx_rc_t rc;
+
+   if (spec->emms_type != EFX_MAE_RULE_OUTER) {
+   rc = EINVAL;
+   goto fail1;
+   }
+
+   spec->emms_outer_rule_do_ct = B_TRUE;
+
+   return (0);
+
 fail1:
EFSYS_PROBE1(fail1, efx_rc_t, rc);
return (rc);
@@ -2389,6 +2409,7 @@ efx_mae_outer_rule_insert(
uint32_t encap_type_mcdi;
efx_mae_rule_id_t or_id;
size_t offset;
+   uint8_t do_ct;
efx_rc_t rc;
 
EFX_STATIC_ASSERT(sizeof (or_idp->id) ==
@@ -2451,6 +2472,11 @@ efx_mae_outer_rule_insert(
MAE_OUTER_RULE_INSERT_IN_RECIRC_ID,
spec->emms_outer_rule_recirc_id);
 
+   do_ct = (spec->emms_outer_rule_do_ct == B_FALSE) ? 0 : 1;
+
+   MCDI_IN_SET_DWORD_FIELD(req, MAE_OUTER_RULE_INSERT_IN_LOOKUP_CONTROL,
+   MAE_OUTER_RULE_INSERT_IN_DO_CT, do_ct);
+
efx_mcdi_execute(enp, &req);
 
if (req.emr_rc != 0) {
diff --git a/drivers/common/sfc_efx/version.map 
b/drivers/common/sfc_efx/version.map
index d972896210..28a2be0a95 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -143,6 +143,7 @@ INTERNAL {
efx_mae_mport_free;
efx_mae_mport_id_by_selector;
efx_mae_mport_invalid;
+   efx_mae_outer_rule_do_ct_set;
efx_mae_outer_rule_insert;
efx_mae_outer_rule_recirc_id_set;
efx_mae_outer_rule_remove;
-- 
2.30.2



[PATCH v3 25/34] net/sfc: make use of conntrack assistance for transfer flows

2023-06-04 Thread Ivan Malov
On EF100 hardware, match-action engine (MAE) can be equipped
with an assistance table for connection tracking (CT). In it,
an entry key is a set of exact match fields: an EtherType, a
pair of IP addresses, a L4 protocol ID and a pair of L4 port
numbers. An entry response can provide matching packets with
a mark value and additional data to be plumbed to NAT action.
In addition, an update to mark-and-sweep counter can be done.

This table was designed with larger capacity in mind,
so moving the above match criteria out of an action
rule (AR) specification to a CT entry increases the
likelihood of reusing AR entries and improves the
total flow engine capacity. Make use of that.

NAT and CT counters will be supported later.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_flow.h |   4 +
 drivers/net/sfc/sfc_mae.c  | 314 +++--
 drivers/net/sfc/sfc_mae.h  |   1 +
 3 files changed, 310 insertions(+), 9 deletions(-)

diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index 10c73d012f..8f706fc589 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -18,6 +18,7 @@
 #include "efx.h"
 
 #include "sfc_flow_rss.h"
+#include "sfc_mae_ct.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -82,6 +83,9 @@ struct sfc_flow_spec_mae {
struct sfc_mae_outer_rule   *outer_rule;
/* Action rule registry entry */
struct sfc_mae_action_rule  *action_rule;
+   /* Conntrack (CT) assistance table entry key and response */
+   sfc_mae_conntrack_response_tct_resp;
+   sfc_mae_conntrack_key_t ct_key;
 };
 
 /* Flow specification */
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index addcad2843..d3b4099213 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -18,6 +18,7 @@
 #include "sfc.h"
 #include "sfc_flow_tunnel.h"
 #include "sfc_mae_counter.h"
+#include "sfc_mae_ct.h"
 #include "sfc_log.h"
 #include "sfc_switch.h"
 #include "sfc_service.h"
@@ -1177,18 +1178,23 @@ struct sfc_mae_action_rule_ctx {
struct sfc_mae_outer_rule   *outer_rule;
struct sfc_mae_action_set   *action_set;
efx_mae_match_spec_t*match_spec;
+   uint32_tct_mark;
 };
 
 static int
 sfc_mae_action_rule_attach(struct sfc_adapter *sa,
-  const struct sfc_mae_action_rule_ctx *ctx,
+  struct sfc_mae_action_rule_ctx *ctx,
   struct sfc_mae_action_rule **rulep,
-  __rte_unused struct rte_flow_error *error)
+  struct rte_flow_error *error)
 {
+   uint32_t new_ct_mark = ctx->ct_mark;
struct sfc_mae_action_rule *rule;
+   int rc;
 
SFC_ASSERT(sfc_adapter_is_locked(sa));
 
+   SFC_ASSERT(ctx->ct_mark <= 1);
+
/*
 * It is assumed that the caller of this helper has already properly
 * tailored ctx->match_spec to match on OR_ID / 0x (when
@@ -1196,10 +1202,24 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa,
 * on 0x / 0x, so that specs compare correctly.
 */
TAILQ_FOREACH(rule, &sa->mae.action_rules, entries) {
+   if (rule->ct_mark == new_ct_mark)
+   ++new_ct_mark;
+
if (rule->outer_rule != ctx->outer_rule ||
-   rule->action_set != ctx->action_set)
+   rule->action_set != ctx->action_set ||
+   !!rule->ct_mark != !!ctx->ct_mark)
continue;
 
+   if (ctx->ct_mark != 0) {
+   rc = efx_mae_match_spec_ct_mark_set(ctx->match_spec,
+   rule->ct_mark);
+   if (rc != 0) {
+   return rte_flow_error_set(error, EFAULT,
+   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+   NULL, "AR: failed to set CT mark for 
comparison");
+   }
+   }
+
if (efx_mae_match_specs_equal(rule->match_spec,
  ctx->match_spec)) {
sfc_dbg(sa, "attaching to action_rule=%p", rule);
@@ -1209,6 +1229,24 @@ sfc_mae_action_rule_attach(struct sfc_adapter *sa,
}
}
 
+   if (ctx->ct_mark != 0) {
+   if (new_ct_mark == UINT32_MAX) {
+   return rte_flow_error_set(error, ERANGE,
+   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+   NULL, "AR: failed to allocate CT mark");
+   }
+
+   rc = efx_mae_match_spec_ct_mark_set(ctx->match_spec,
+   new_ct_mark);
+   if (rc != 0) {
+   return rte_flow_error_set(error

[PATCH v3 26/34] common/sfc_efx/base: support NAT edits in MAE

2023-06-04 Thread Ivan Malov
NAT goes after IP TTL decrement. It can operate
on the outermost frame only. In the case of
prior decapsulation, that maps to the frame
which was (originally) the inner one. Input
data for the action comes from the response
of the HW conntrack assistance table hit.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h  | 13 +
 drivers/common/sfc_efx/base/efx_impl.h |  1 +
 drivers/common/sfc_efx/base/efx_mae.c  | 17 +
 drivers/common/sfc_efx/version.map |  1 +
 4 files changed, 32 insertions(+)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 99ef0ce957..0a08b490e5 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4560,6 +4560,19 @@ extern   __checkReturn   efx_rc_t
 efx_mae_action_set_populate_decr_ip_ttl(
__inefx_mae_actions_t *spec);
 
+/*
+ * This only requests NAT action. The replacement IP address and
+ * L4 port number, as well as the edit direction (DST/SRC), come
+ * from the response to a hit in the conntrack assistance table.
+ *
+ * The action amends the outermost frame. In the case of prior
+ * decapsulation, that maps to the (originally) inner frame.
+ */
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_action_set_populate_nat(
+   __inefx_mae_actions_t *spec);
+
 LIBEFX_API
 extern __checkReturn   efx_rc_t
 efx_mae_action_set_populate_vlan_push(
diff --git a/drivers/common/sfc_efx/base/efx_impl.h 
b/drivers/common/sfc_efx/base/efx_impl.h
index 0a6a489d2c..e978ad0de8 100644
--- a/drivers/common/sfc_efx/base/efx_impl.h
+++ b/drivers/common/sfc_efx/base/efx_impl.h
@@ -1771,6 +1771,7 @@ typedef enum efx_mae_action_e {
EFX_MAE_ACTION_SET_DST_MAC,
EFX_MAE_ACTION_SET_SRC_MAC,
EFX_MAE_ACTION_DECR_IP_TTL,
+   EFX_MAE_ACTION_NAT,
EFX_MAE_ACTION_VLAN_PUSH,
EFX_MAE_ACTION_COUNT,
EFX_MAE_ACTION_ENCAP,
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index 546c743a02..aaea38c933 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -1837,6 +1837,9 @@ static const efx_mae_action_desc_t 
efx_mae_actions[EFX_MAE_NACTIONS] = {
[EFX_MAE_ACTION_DECR_IP_TTL] = {
.emad_add = efx_mae_action_set_no_op
},
+   [EFX_MAE_ACTION_NAT] = {
+   .emad_add = efx_mae_action_set_no_op
+   },
[EFX_MAE_ACTION_VLAN_PUSH] = {
.emad_add = efx_mae_action_set_add_vlan_push
},
@@ -1863,6 +1866,7 @@ static const uint32_t efx_mae_action_ordered_map =
(1U << EFX_MAE_ACTION_SET_DST_MAC) |
(1U << EFX_MAE_ACTION_SET_SRC_MAC) |
(1U << EFX_MAE_ACTION_DECR_IP_TTL) |
+   (1U << EFX_MAE_ACTION_NAT) |
(1U << EFX_MAE_ACTION_VLAN_PUSH) |
/*
 * HW will conduct action COUNT after
@@ -2038,6 +2042,14 @@ efx_mae_action_set_populate_decr_ip_ttl(
return (rc);
 }
 
+   __checkReturn   efx_rc_t
+efx_mae_action_set_populate_nat(
+   __inefx_mae_actions_t *spec)
+{
+   return (efx_mae_action_set_spec_populate(spec,
+   EFX_MAE_ACTION_NAT, 0, NULL));
+}
+
__checkReturn   efx_rc_t
 efx_mae_action_set_populate_vlan_push(
__inefx_mae_actions_t *spec,
@@ -3093,6 +3105,11 @@ efx_mae_action_set_alloc(
MAE_ACTION_SET_ALLOC_IN_DO_DECR_IP_TTL, 1);
}
 
+   if ((spec->ema_actions & (1U << EFX_MAE_ACTION_NAT)) != 0) {
+   MCDI_IN_SET_DWORD_FIELD(req, MAE_ACTION_SET_ALLOC_IN_FLAGS,
+   MAE_ACTION_SET_ALLOC_IN_DO_NAT, 1);
+   }
+
if (spec->ema_n_vlan_tags_to_push > 0) {
unsigned int outer_tag_idx;
 
diff --git a/drivers/common/sfc_efx/version.map 
b/drivers/common/sfc_efx/version.map
index 28a2be0a95..1ff760a024 100644
--- a/drivers/common/sfc_efx/version.map
+++ b/drivers/common/sfc_efx/version.map
@@ -104,6 +104,7 @@ INTERNAL {
efx_mae_action_set_populate_flag;
efx_mae_action_set_populate_mark;
efx_mae_action_set_populate_mark_reset;
+   efx_mae_action_set_populate_nat;
efx_mae_action_set_populate_set_dst_mac;
efx_mae_action_set_populate_set_src_mac;
efx_mae_action_set_populate_vlan_pop;
-- 
2.30.2



[PATCH v3 27/34] net/sfc: add support for IPv4 NAT offload to MAE backend

2023-06-04 Thread Ivan Malov
For this offload to work, the innermost pattern items must
provide the full set of exact match criteria, which are as
follows: EtherType, IP DST, IP SRC, TP protocol ID, TP DST
and TP SRC, where the protocol types can be autodetected.

The offload requires that the IPv4 and the TP actions be
requested simultaneously in the same flow by the caller:
SET_IPV4_DST + SET_TP_DST or SET_IPV4_SRC + SET_TP_SRC.

The offload operates on the outermost frame, which,
if action VXLAN_DECAP was requested, maps to the
inner frame of the original packet. The caller
is responsible to request this offload only
when the target header is an IPv4-based one.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 doc/guides/nics/features/sfc.ini   |  4 ++
 doc/guides/nics/sfc_efx.rst|  8 +++
 doc/guides/rel_notes/release_23_07.rst | 15 +
 drivers/net/sfc/sfc_mae.c  | 77 +++---
 4 files changed, 96 insertions(+), 8 deletions(-)

diff --git a/doc/guides/nics/features/sfc.ini b/doc/guides/nics/features/sfc.ini
index f5ac644278..19d4935ce6 100644
--- a/doc/guides/nics/features/sfc.ini
+++ b/doc/guides/nics/features/sfc.ini
@@ -75,8 +75,12 @@ port_representor = Y
 represented_port = Y
 queue= Y
 rss  = Y
+set_ipv4_dst = Y
+set_ipv4_src = Y
 set_mac_dst  = Y
 set_mac_src  = Y
+set_tp_dst   = Y
+set_tp_src   = Y
 vf   = Y
 vxlan_decap  = Y
 vxlan_encap  = Y
diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index de0656876b..6e974c3720 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -270,10 +270,18 @@ Supported actions (***transfer*** rules):
 
 - OF_VLAN_SET_PCP
 
+- SET_IPV4_DST
+
+- SET_IPV4_SRC
+
 - SET_MAC_DST
 
 - SET_MAC_SRC
 
+- SET_TP_DST
+
+- SET_TP_SRC
+
 - OF_DEC_NW_TTL
 
 - DEC_TTL
diff --git a/doc/guides/rel_notes/release_23_07.rst 
b/doc/guides/rel_notes/release_23_07.rst
index a9b1293689..6fae4eb0a7 100644
--- a/doc/guides/rel_notes/release_23_07.rst
+++ b/doc/guides/rel_notes/release_23_07.rst
@@ -55,6 +55,21 @@ New Features
  Also, make sure to start the actual text at the margin.
  ===
 
+* **Updated Solarflare network PMD.**
+
+  Updated the Solarflare ``sfc_efx`` driver with changes including:
+
+  * Added partial support for transfer flow actions SET_IPV4_DST,
+SET_TP_DST, SET_IPV4_SRC and SET_TP_SRC on SN1000 SmartNICs.
+It is required that the innermost pattern items provide the
+full set of exact match criteria: EtherType, IP DST, IP SRC,
+TP protocol ID, TP DST and TP SRC. The IPv4 and TP actions
+must be requested simultaneously in the same flow. These
+actions operate on the outermost frame, at the point
+where action VXLAN_DECAP (if any) has done its job.
+The caller is responsible to request this offload
+only when the target header is an IPv4-based one.
+
 
 Removed Items
 -
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index d3b4099213..c58a2520da 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -9,6 +9,7 @@
 
 #include 
 
+#include 
 #include 
 #include 
 #include 
@@ -3444,6 +3445,8 @@ sfc_mae_rule_parse_action_set_mac(struct sfc_adapter *sa,
 enum sfc_mae_actions_bundle_type {
SFC_MAE_ACTIONS_BUNDLE_EMPTY = 0,
SFC_MAE_ACTIONS_BUNDLE_VLAN_PUSH,
+   SFC_MAE_ACTIONS_BUNDLE_NAT_DST,
+   SFC_MAE_ACTIONS_BUNDLE_NAT_SRC,
 };
 
 struct sfc_mae_actions_bundle {
@@ -3464,7 +3467,8 @@ struct sfc_mae_actions_bundle {
  */
 static int
 sfc_mae_actions_bundle_submit(const struct sfc_mae_actions_bundle *bundle,
- efx_mae_actions_t *spec)
+ struct sfc_flow_spec_mae *flow_spec,
+ bool ct, efx_mae_actions_t *spec)
 {
int rc = 0;
 
@@ -3475,6 +3479,16 @@ sfc_mae_actions_bundle_submit(const struct 
sfc_mae_actions_bundle *bundle,
rc = efx_mae_action_set_populate_vlan_push(
spec, bundle->vlan_push_tpid, bundle->vlan_push_tci);
break;
+   case SFC_MAE_ACTIONS_BUNDLE_NAT_DST:
+   flow_spec->ct_resp.nat.dir_is_dst = true;
+   /* FALLTHROUGH */
+   case SFC_MAE_ACTIONS_BUNDLE_NAT_SRC:
+   if (ct && flow_spec->ct_resp.nat.ip_le != 0 &&
+   flow_spec->ct_resp.nat.port_le != 0)
+   rc = efx_mae_action_set_populate_nat(spec);
+   else
+   rc = EINVAL;
+   break;
default:
SFC_ASSERT(B_FALSE);
break;
@@ -3491,7 +3505,8 @@ sfc_mae_actions_bundle_submit(const struct 
sfc_mae_actions_bundle *bundle,
 static int
 sfc_mae_actions_bundle_sync(const struct rte_flow_action *action,
struct sfc_mae_actions_bundl

[PATCH v3 28/34] net/sfc: rename SW structures used by transfer flow counters

2023-06-04 Thread Ivan Malov
Doing so facilitates rearrangements of the next patch needed
to make software counter objects shareable across many flows.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c | 14 +++---
 drivers/net/sfc/sfc_mae.h | 14 +++---
 drivers/net/sfc/sfc_mae_counter.c | 31 ---
 drivers/net/sfc/sfc_mae_counter.h | 16 
 4 files changed, 38 insertions(+), 37 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index c58a2520da..2b4c821883 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -151,7 +151,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
if (rc != 0)
goto fail_mae_get_limits;
 
-   sfc_log_init(sa, "init MAE counter registry");
+   sfc_log_init(sa, "init MAE counter record registry");
rc = sfc_mae_counter_registry_init(&mae->counter_registry,
   limits.eml_max_n_counters);
if (rc != 0) {
@@ -817,7 +817,7 @@ sfc_mae_encap_header_disable(struct sfc_adapter *sa,
 
 static int
 sfc_mae_counters_enable(struct sfc_adapter *sa,
-   struct sfc_mae_counter_id *counters,
+   struct sfc_mae_counter *counters,
unsigned int n_counters,
efx_mae_actions_t *action_set_spec)
 {
@@ -833,7 +833,7 @@ sfc_mae_counters_enable(struct sfc_adapter *sa,
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(n_counters == 1);
 
-   rc = sfc_mae_counter_enable(sa, &counters[0]);
+   rc = sfc_mae_counter_fw_rsrc_enable(sa, &counters[0]);
if (rc != 0) {
sfc_err(sa, "failed to enable MAE counter %u: %s",
counters[0].mae_id.id, rte_strerror(rc));
@@ -851,7 +851,7 @@ sfc_mae_counters_enable(struct sfc_adapter *sa,
return 0;
 
 fail_fill_in_id:
-   (void)sfc_mae_counter_disable(sa, &counters[0]);
+   (void)sfc_mae_counter_fw_rsrc_disable(sa, &counters[0]);
 
 fail_counter_add:
sfc_log_init(sa, "failed: %s", rte_strerror(rc));
@@ -860,7 +860,7 @@ sfc_mae_counters_enable(struct sfc_adapter *sa,
 
 static int
 sfc_mae_counters_disable(struct sfc_adapter *sa,
-struct sfc_mae_counter_id *counters,
+struct sfc_mae_counter *counters,
 unsigned int n_counters)
 {
if (n_counters == 0)
@@ -874,7 +874,7 @@ sfc_mae_counters_disable(struct sfc_adapter *sa,
return EALREADY;
}
 
-   return sfc_mae_counter_disable(sa, &counters[0]);
+   return sfc_mae_counter_fw_rsrc_disable(sa, &counters[0]);
 }
 
 struct sfc_mae_aset_ctx {
@@ -1039,7 +1039,7 @@ sfc_mae_action_set_enable(struct sfc_adapter *sa,
struct sfc_mae_encap_header *encap_header;
struct sfc_mae_mac_addr *dst_mac_addr;
struct sfc_mae_mac_addr *src_mac_addr;
-   struct sfc_mae_counter_id *counters;
+   struct sfc_mae_counter *counters;
struct sfc_mae_fw_rsrc *fw_rsrc;
int rc;
 
diff --git a/drivers/net/sfc/sfc_mae.h b/drivers/net/sfc/sfc_mae.h
index 67fa2ca5c9..7337fcf14d 100644
--- a/drivers/net/sfc/sfc_mae.h
+++ b/drivers/net/sfc/sfc_mae.h
@@ -68,7 +68,7 @@ struct sfc_mae_encap_header {
 TAILQ_HEAD(sfc_mae_encap_headers, sfc_mae_encap_header);
 
 /* Counter ID */
-struct sfc_mae_counter_id {
+struct sfc_mae_counter {
/* ID of a counter in MAE */
efx_counter_t   mae_id;
/* ID of a counter in RTE */
@@ -86,7 +86,7 @@ struct sfc_mae_counter_id {
 struct sfc_mae_action_set {
TAILQ_ENTRY(sfc_mae_action_set) entries;
unsigned intrefcnt;
-   struct sfc_mae_counter_id   *counters;
+   struct sfc_mae_counter  *counters;
uint32_tn_counters;
efx_mae_actions_t   *spec;
struct sfc_mae_encap_header *encap_header;
@@ -129,7 +129,7 @@ struct sfc_mae_bounce_eh {
 };
 
 /** Counter collection entry */
-struct sfc_mae_counter {
+struct sfc_mae_counter_record {
boolinuse;
uint32_tgeneration_count;
union sfc_pkts_bytesvalue;
@@ -143,9 +143,9 @@ struct sfc_mae_counters_xstats {
uint64_trealloc_update;
 };
 
-struct sfc_mae_counters {
+struct sfc_mae_counter_records {
/** An array of all MAE counters */
-   struct sfc_mae_counter  *mae_counters;
+   struct sfc_mae_counter_record   *mae_counters;
/** Extra statistics for counters */
struct sfc_mae_counters_xstats  xstats;
/** Count of all MAE counters */
@@ -162,7 +162,7 @@ enum sfc_mae_counter_polling_mode {
 struct sfc_mae_counter_registry {
/* Common counter information */
/** Counters collection */
-   

[PATCH v3 29/34] net/sfc: rework MAE action rule counter representation in SW

2023-06-04 Thread Ivan Malov
Such rework is needed to prepare for INDIRECT action support
and in order to align with the latest HW support perspective.

Currently, the driver supports only one counter per flow. It
was once thought that MAE would support multiple counters in
one action set. That was partly envisaged in code and naming.
But HW support for the feature is no longer planned in EF100.

The code also assumes that a counter object cannot be shared.
This assumption is outdated. The driver may support this now
via action of type INDIRECT provided by generic flow library.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c | 342 +-
 drivers/net/sfc/sfc_mae.h |  17 +-
 drivers/net/sfc/sfc_mae_counter.c |  22 +-
 3 files changed, 211 insertions(+), 170 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index 2b4c821883..4d3778eaba 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -205,6 +205,7 @@ sfc_mae_attach(struct sfc_adapter *sa)
TAILQ_INIT(&mae->outer_rules);
TAILQ_INIT(&mae->mac_addrs);
TAILQ_INIT(&mae->encap_headers);
+   TAILQ_INIT(&mae->counters);
TAILQ_INIT(&mae->action_sets);
TAILQ_INIT(&mae->action_rules);
 
@@ -816,72 +817,155 @@ sfc_mae_encap_header_disable(struct sfc_adapter *sa,
 }
 
 static int
-sfc_mae_counters_enable(struct sfc_adapter *sa,
-   struct sfc_mae_counter *counters,
-   unsigned int n_counters,
-   efx_mae_actions_t *action_set_spec)
+sfc_mae_counter_add(struct sfc_adapter *sa,
+   const struct sfc_mae_counter *counter_tmp,
+   struct sfc_mae_counter **counterp)
 {
-   int rc;
+   struct sfc_mae_counter *counter;
+   struct sfc_mae *mae = &sa->mae;
 
-   sfc_log_init(sa, "entry");
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
 
-   if (n_counters == 0) {
-   sfc_log_init(sa, "no counters - skip");
-   return 0;
+   counter = rte_zmalloc("sfc_mae_counter", sizeof(*counter), 0);
+   if (counter == NULL)
+   return ENOMEM;
+
+   if (counter_tmp != NULL) {
+   counter->rte_id_valid = counter_tmp->rte_id_valid;
+   counter->rte_id = counter_tmp->rte_id;
}
 
-   SFC_ASSERT(sfc_adapter_is_locked(sa));
-   SFC_ASSERT(n_counters == 1);
+   counter->fw_rsrc.counter_id.id = EFX_MAE_RSRC_ID_INVALID;
+   counter->refcnt = 1;
 
-   rc = sfc_mae_counter_fw_rsrc_enable(sa, &counters[0]);
-   if (rc != 0) {
-   sfc_err(sa, "failed to enable MAE counter %u: %s",
-   counters[0].mae_id.id, rte_strerror(rc));
-   goto fail_counter_add;
-   }
+   TAILQ_INSERT_TAIL(&mae->counters, counter, entries);
+   *counterp = counter;
 
-   rc = efx_mae_action_set_fill_in_counter_id(action_set_spec,
-  &counters[0].mae_id);
-   if (rc != 0) {
-   sfc_err(sa, "failed to fill in MAE counter %u in action set: 
%s",
-   counters[0].mae_id.id, rte_strerror(rc));
-   goto fail_fill_in_id;
-   }
+   sfc_dbg(sa, "added counter=%p", counter);
 
return 0;
+}
+
+static void
+sfc_mae_counter_del(struct sfc_adapter *sa, struct sfc_mae_counter *counter)
+{
+   struct sfc_mae *mae = &sa->mae;
 
-fail_fill_in_id:
-   (void)sfc_mae_counter_fw_rsrc_disable(sa, &counters[0]);
+   if (counter == NULL)
+   return;
 
-fail_counter_add:
-   sfc_log_init(sa, "failed: %s", rte_strerror(rc));
-   return rc;
+   SFC_ASSERT(sfc_adapter_is_locked(sa));
+   SFC_ASSERT(counter->refcnt != 0);
+
+   --(counter->refcnt);
+
+   if (counter->refcnt != 0)
+   return;
+
+   if (counter->fw_rsrc.counter_id.id != EFX_MAE_RSRC_ID_INVALID ||
+   counter->fw_rsrc.refcnt != 0) {
+   sfc_err(sa, "deleting counter=%p abandons its FW resource: 
COUNTER_ID=0x%08x, refcnt=%u",
+   counter, counter->fw_rsrc.counter_id.id,
+   counter->fw_rsrc.refcnt);
+   }
+
+   TAILQ_REMOVE(&mae->counters, counter, entries);
+   rte_free(counter);
+
+   sfc_dbg(sa, "deleted counter=%p", counter);
 }
 
 static int
-sfc_mae_counters_disable(struct sfc_adapter *sa,
-struct sfc_mae_counter *counters,
-unsigned int n_counters)
+sfc_mae_counter_enable(struct sfc_adapter *sa, struct sfc_mae_counter *counter,
+  efx_mae_actions_t *action_set_spec)
 {
-   if (n_counters == 0)
+   struct sfc_mae_fw_rsrc *fw_rsrc;
+   int rc;
+
+   if (counter == NULL)
return 0;
 
SFC_ASSERT(sfc_adapter_is_locked(sa));
-   SFC_ASSERT(n_counters == 1);
 
-   if (counters[0].mae_id.id == EFX_MAE_RSRC_ID_INVALID) {
-

[PATCH v3 30/34] net/sfc: support indirect count action in transfer flows

2023-06-04 Thread Ivan Malov
Indirect count action is useful to applications that
need to gather aggregated statistics for many flows.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 doc/guides/nics/sfc_efx.rst|   2 +
 doc/guides/rel_notes/release_23_07.rst |   3 +
 drivers/net/sfc/sfc.h  |   1 +
 drivers/net/sfc/sfc_flow.c | 126 +++
 drivers/net/sfc/sfc_flow.h |  14 +++
 drivers/net/sfc/sfc_mae.c  | 167 -
 drivers/net/sfc/sfc_mae.h  |  15 +++
 7 files changed, 327 insertions(+), 1 deletion(-)

diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index 6e974c3720..ba82b02093 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -306,6 +306,8 @@ Supported actions (***transfer*** rules):
 
 - COUNT
 
+- INDIRECT
+
 - DROP
 
 Validating flow rules depends on the firmware variant.
diff --git a/doc/guides/rel_notes/release_23_07.rst 
b/doc/guides/rel_notes/release_23_07.rst
index 6fae4eb0a7..5a77b71d0a 100644
--- a/doc/guides/rel_notes/release_23_07.rst
+++ b/doc/guides/rel_notes/release_23_07.rst
@@ -70,6 +70,9 @@ New Features
 The caller is responsible to request this offload
 only when the target header is an IPv4-based one.
 
+  * Added support for transfer flow action INDIRECT
+with subtype COUNT, for aggregated statistics.
+
 
 Removed Items
 -
diff --git a/drivers/net/sfc/sfc.h b/drivers/net/sfc/sfc.h
index 6b301aad60..f84a21009e 100644
--- a/drivers/net/sfc/sfc.h
+++ b/drivers/net/sfc/sfc.h
@@ -248,6 +248,7 @@ struct sfc_adapter {
struct sfc_tbls hw_tables;
struct sfc_repr_proxy   repr_proxy;
 
+   struct sfc_flow_indir_actions   flow_indir_actions;
struct sfc_flow_listflow_list;
 
unsigned intrxq_max;
diff --git a/drivers/net/sfc/sfc_flow.c b/drivers/net/sfc/sfc_flow.c
index 0abeabfbf2..a35f20770d 100644
--- a/drivers/net/sfc/sfc_flow.c
+++ b/drivers/net/sfc/sfc_flow.c
@@ -2776,6 +2776,128 @@ sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
return 0;
 }
 
+static struct rte_flow_action_handle *
+sfc_flow_action_handle_create(struct rte_eth_dev *dev,
+ const struct rte_flow_indir_action_conf *conf,
+ const struct rte_flow_action *action,
+ struct rte_flow_error *error)
+{
+   struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+   struct rte_flow_action_handle *handle;
+   int ret;
+
+   if (!conf->transfer) {
+   rte_flow_error_set(error, ENOTSUP,
+  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+  "non-transfer domain does not support 
indirect actions");
+   return NULL;
+   }
+
+   if (conf->ingress || conf->egress) {
+   rte_flow_error_set(error, EINVAL,
+  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+  NULL, "cannot combine ingress/egress with 
transfer");
+   return NULL;
+   }
+
+   handle = rte_zmalloc("sfc_rte_flow_action_handle", sizeof(*handle), 0);
+   if (handle == NULL) {
+   rte_flow_error_set(error, ENOMEM,
+  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+  "failed to allocate memory");
+   return NULL;
+   }
+
+   sfc_adapter_lock(sa);
+
+   ret = sfc_mae_indir_action_create(sa, action, handle, error);
+   if (ret != 0) {
+   sfc_adapter_unlock(sa);
+   rte_free(handle);
+   return NULL;
+   }
+
+   TAILQ_INSERT_TAIL(&sa->flow_indir_actions, handle, entries);
+
+   handle->transfer = (bool)conf->transfer;
+
+   sfc_adapter_unlock(sa);
+
+   return handle;
+}
+
+static int
+sfc_flow_action_handle_destroy(struct rte_eth_dev *dev,
+  struct rte_flow_action_handle *handle,
+  struct rte_flow_error *error)
+{
+   struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+   struct rte_flow_action_handle *entry;
+   int rc = EINVAL;
+
+   sfc_adapter_lock(sa);
+
+   TAILQ_FOREACH(entry, &sa->flow_indir_actions, entries) {
+   if (entry != handle)
+   continue;
+
+   if (entry->transfer) {
+   rc = sfc_mae_indir_action_destroy(sa, handle,
+ error);
+   if (rc != 0)
+   goto exit;
+   } else {
+   SFC_ASSERT(B_FALSE);
+   }
+
+   TAILQ_REMOVE(&sa->flow_indir_actions, entry, entries);
+   rte_free(entry);
+   goto exit;
+   }
+
+   rc = rte_flow_error_set(error, ENOENT,
+   

[PATCH v3 31/34] common/sfc_efx/base: rework MAE counter provisioning helpers

2023-06-04 Thread Ivan Malov
Doing so is required to disambiguate counters of different
types supported by the match-action engine (MAE) on EF100.

Currently, the code only supports action rule counters,
but MAE may also support conntrack assistance counters.
Add type-aware allocate and free MCDI handlers and
extend reporting of counter limits accordingly.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h  |  39 +++-
 drivers/common/sfc_efx/base/efx_impl.h |   2 +-
 drivers/common/sfc_efx/base/efx_mae.c  | 120 -
 drivers/common/sfc_efx/version.map |   2 +
 4 files changed, 140 insertions(+), 23 deletions(-)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 0a08b490e5..5b992ec723 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4191,7 +4191,10 @@ typedef struct efx_mae_limits_s {
uint32_teml_max_n_outer_prios;
uint32_teml_encap_types_supported;
uint32_teml_encap_header_size_limit;
-   uint32_teml_max_n_counters;
+   union {
+   uint32_teml_max_n_counters;
+   uint32_teml_max_n_action_counters;
+   };
 } efx_mae_limits_t;
 
 LIBEFX_API
@@ -4780,6 +4783,14 @@ efx_mae_action_set_fill_in_eh_id(
__inefx_mae_actions_t *spec,
__inconst efx_mae_eh_id_t *eh_idp);
 
+/*
+ * Counter types that may be supported by the match-action engine.
+ * Each counter type maintains its own counter ID namespace in FW.
+ */
+typedef enum efx_counter_type_e {
+   EFX_COUNTER_TYPE_ACTION = 0,
+} efx_counter_type_t;
+
 typedef struct efx_counter_s {
uint32_t id;
 } efx_counter_t;
@@ -4809,6 +4820,8 @@ efx_mae_action_set_alloc(
__out   efx_mae_aset_id_t *aset_idp);
 
 /*
+ * Allocates MAE counter(s) of type EFX_COUNTER_TYPE_ACTION.
+ *
  * Generation count has two purposes:
  *
  * 1) Distinguish between counter packets that belong to freed counter
@@ -4832,6 +4845,20 @@ efx_mae_counters_alloc(
__out_ecount(n_counters)efx_counter_t *countersp,
__out_opt   uint32_t *gen_countp);
 
+/*
+ * Allocates MAE counter(s) of the specified type. Other
+ * than that, behaves like efx_mae_counters_alloc().
+ */
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_counters_alloc_type(
+   __inefx_nic_t *enp,
+   __inefx_counter_type_t type,
+   __inuint32_t n_counters,
+   __out   uint32_t *n_allocatedp,
+   __out_ecount(n_counters)efx_counter_t *countersp,
+   __out_opt   uint32_t *gen_countp);
+
 LIBEFX_API
 extern __checkReturn   efx_rc_t
 efx_mae_counters_free(
@@ -4841,6 +4868,16 @@ efx_mae_counters_free(
__in_ecount(n_counters) const efx_counter_t *countersp,
__out_opt   uint32_t *gen_countp);
 
+LIBEFX_API
+extern __checkReturn   efx_rc_t
+efx_mae_counters_free_type(
+   __inefx_nic_t *enp,
+   __inefx_counter_type_t type,
+   __inuint32_t n_counters,
+   __out   uint32_t *n_freedp,
+   __in_ecount(n_counters) const efx_counter_t *countersp,
+   __out_opt   uint32_t *gen_countp);
+
 /* When set, include counters with a value of zero */
 #defineEFX_MAE_COUNTERS_STREAM_IN_ZERO_SQUASH_DISABLE  (1U << 0)
 
diff --git a/drivers/common/sfc_efx/base/efx_impl.h 
b/drivers/common/sfc_efx/base/efx_impl.h
index e978ad0de8..f6b472c160 100644
--- a/drivers/common/sfc_efx/base/efx_impl.h
+++ b/drivers/common/sfc_efx/base/efx_impl.h
@@ -841,7 +841,7 @@ typedef struct efx_mae_s {
/** Outer rule match field capabilities. */
efx_mae_field_cap_t *em_outer_rule_field_caps;
size_t  em_outer_rule_field_caps_size;
-   uint32_tem_max_ncounters;
+   uint32_tem_max_n_action_counters;
 } efx_mae_t;
 
 #endif /* EFSYS_OPT_MAE */
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index aaea38c933..4078146741 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -67,8 +67,8 @@ efx_mae_get_capabilities(
maep->em_max_nfields =
MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_MATCH_FIELD_COUNT);
 
-   maep->em_max_ncounters =
-   MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_COUNTERS);
+   maep->em_max_n_action_counters =
+   MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_AR_COUNT

[PATCH v3 32/34] net/sfc: indicate MAE counter type in use for transfer flows

2023-06-04 Thread Ivan Malov
Doing so assists adding support for additional counter types.

Current implementation is only aware of action rule counters
that are supported by the match-action engine (MAE) on EF100
NICs, but MAE may also support conntrack assistance counters.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_mae.c | 56 ++
 drivers/net/sfc/sfc_mae.h |  7 ++-
 drivers/net/sfc/sfc_mae_counter.c | 77 +++
 drivers/net/sfc/sfc_mae_counter.h |  2 +-
 4 files changed, 100 insertions(+), 42 deletions(-)

diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index e79df3b56a..7353d04af8 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -65,15 +65,24 @@ sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
 
 static int
 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
- uint32_t nb_counters_max)
+ uint32_t nb_action_counters_max)
 {
-   return sfc_mae_counters_init(®istry->counters, nb_counters_max);
+   int ret;
+
+   ret = sfc_mae_counters_init(®istry->action_counters,
+   nb_action_counters_max);
+   if (ret != 0)
+   return ret;
+
+   registry->action_counters.type = EFX_COUNTER_TYPE_ACTION;
+
+   return 0;
 }
 
 static void
 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
 {
-   sfc_mae_counters_fini(®istry->counters);
+   sfc_mae_counters_fini(®istry->action_counters);
 }
 
 struct rte_flow *
@@ -153,10 +162,10 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
sfc_log_init(sa, "init MAE counter record registry");
rc = sfc_mae_counter_registry_init(&mae->counter_registry,
-  limits.eml_max_n_counters);
+   limits.eml_max_n_action_counters);
if (rc != 0) {
-   sfc_err(sa, "failed to init MAE counters registry for 
%u entries: %s",
-   limits.eml_max_n_counters, rte_strerror(rc));
+   sfc_err(sa, "failed to init record registry for %u AR 
counters: %s",
+   limits.eml_max_n_action_counters, 
rte_strerror(rc));
goto fail_counter_registry_init;
}
}
@@ -833,6 +842,9 @@ sfc_mae_counter_add(struct sfc_adapter *sa,
if (counter_tmp != NULL) {
counter->rte_id_valid = counter_tmp->rte_id_valid;
counter->rte_id = counter_tmp->rte_id;
+   counter->type = counter_tmp->type;
+   } else {
+   counter->type = EFX_COUNTER_TYPE_ACTION;
}
 
counter->fw_rsrc.counter_id.id = EFX_MAE_RSRC_ID_INVALID;
@@ -864,8 +876,8 @@ sfc_mae_counter_del(struct sfc_adapter *sa, struct 
sfc_mae_counter *counter)
 
if (counter->fw_rsrc.counter_id.id != EFX_MAE_RSRC_ID_INVALID ||
counter->fw_rsrc.refcnt != 0) {
-   sfc_err(sa, "deleting counter=%p abandons its FW resource: 
COUNTER_ID=0x%08x, refcnt=%u",
-   counter, counter->fw_rsrc.counter_id.id,
+   sfc_err(sa, "deleting counter=%p abandons its FW resource: 
COUNTER_ID=0x%x-#%u, refcnt=%u",
+   counter, counter->type, counter->fw_rsrc.counter_id.id,
counter->fw_rsrc.refcnt);
}
 
@@ -916,8 +928,8 @@ sfc_mae_counter_enable(struct sfc_adapter *sa, struct 
sfc_mae_counter *counter,
}
 
if (fw_rsrc->refcnt == 0) {
-   sfc_dbg(sa, "enabled counter=%p: COUNTER_ID=0x%08x",
-   counter, fw_rsrc->counter_id.id);
+   sfc_dbg(sa, "enabled counter=%p: COUNTER_ID=0x%x-#%u",
+   counter, counter->type, fw_rsrc->counter_id.id);
}
 
++(fw_rsrc->refcnt);
@@ -940,8 +952,8 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct 
sfc_mae_counter *counter)
 
if (fw_rsrc->counter_id.id == EFX_MAE_RSRC_ID_INVALID ||
fw_rsrc->refcnt == 0) {
-   sfc_err(sa, "failed to disable counter=%p: already disabled; 
COUNTER_ID=0x%08x, refcnt=%u",
-   counter, fw_rsrc->counter_id.id, fw_rsrc->refcnt);
+   sfc_err(sa, "failed to disable counter=%p: already disabled; 
COUNTER_ID=0x%x-#%u, refcnt=%u",
+   counter, counter->type, fw_rsrc->counter_id.id, 
fw_rsrc->refcnt);
return;
}
 
@@ -950,11 +962,11 @@ sfc_mae_counter_disable(struct sfc_adapter *sa, struct 
sfc_mae_counter *counter)
 
rc = sfc_mae_counter_fw_rsrc_disable(sa, counter);
if (rc == 0) {
-   sfc_dbg(sa, "disabled counter=%p with 
COUNTER_ID=0x%08x",
-   counter, counter_id);
+   sfc_dbg(sa, "disabled counter=%p with 
COUN

[PATCH v3 34/34] net/sfc: use conntrack assistance counters in transfer flows

2023-06-04 Thread Ivan Malov
These are 1-bit saturating counters which can only be useful
to tell whether a given flow rule has offloaded some packets
since the last query. Byte count is never provided for these.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/net/sfc/sfc_flow.h|   2 +
 drivers/net/sfc/sfc_mae.c | 119 ++
 drivers/net/sfc/sfc_mae.h |   2 +
 drivers/net/sfc/sfc_mae_counter.c |  30 
 4 files changed, 124 insertions(+), 29 deletions(-)

diff --git a/drivers/net/sfc/sfc_flow.h b/drivers/net/sfc/sfc_flow.h
index af94d0654a..601f93e540 100644
--- a/drivers/net/sfc/sfc_flow.h
+++ b/drivers/net/sfc/sfc_flow.h
@@ -86,6 +86,8 @@ struct sfc_flow_spec_mae {
/* Conntrack (CT) assistance table entry key and response */
sfc_mae_conntrack_response_tct_resp;
sfc_mae_conntrack_key_t ct_key;
+   /* Conntrack (CT) assistance counter */
+   struct sfc_mae_counter  *ct_counter;
 };
 
 /* PMD-specific definition of the opaque type from rte_flow.h */
diff --git a/drivers/net/sfc/sfc_mae.c b/drivers/net/sfc/sfc_mae.c
index 7353d04af8..ab315853d5 100644
--- a/drivers/net/sfc/sfc_mae.c
+++ b/drivers/net/sfc/sfc_mae.c
@@ -65,7 +65,8 @@ sfc_mae_assign_entity_mport(struct sfc_adapter *sa,
 
 static int
 sfc_mae_counter_registry_init(struct sfc_mae_counter_registry *registry,
- uint32_t nb_action_counters_max)
+ uint32_t nb_action_counters_max,
+ uint32_t nb_conntrack_counters_max)
 {
int ret;
 
@@ -76,12 +77,20 @@ sfc_mae_counter_registry_init(struct 
sfc_mae_counter_registry *registry,
 
registry->action_counters.type = EFX_COUNTER_TYPE_ACTION;
 
+   ret = sfc_mae_counters_init(®istry->conntrack_counters,
+   nb_conntrack_counters_max);
+   if (ret != 0)
+   return ret;
+
+   registry->conntrack_counters.type = EFX_COUNTER_TYPE_CONNTRACK;
+
return 0;
 }
 
 static void
 sfc_mae_counter_registry_fini(struct sfc_mae_counter_registry *registry)
 {
+   sfc_mae_counters_fini(®istry->conntrack_counters);
sfc_mae_counters_fini(®istry->action_counters);
 }
 
@@ -162,10 +171,13 @@ sfc_mae_attach(struct sfc_adapter *sa)
 
sfc_log_init(sa, "init MAE counter record registry");
rc = sfc_mae_counter_registry_init(&mae->counter_registry,
-   limits.eml_max_n_action_counters);
+   limits.eml_max_n_action_counters,
+   limits.eml_max_n_conntrack_counters);
if (rc != 0) {
-   sfc_err(sa, "failed to init record registry for %u AR 
counters: %s",
-   limits.eml_max_n_action_counters, 
rte_strerror(rc));
+   sfc_err(sa, "failed to init record registry for %u AR 
and %u CT counters: %s",
+   limits.eml_max_n_action_counters,
+   limits.eml_max_n_conntrack_counters,
+   rte_strerror(rc));
goto fail_counter_registry_init;
}
}
@@ -1471,6 +1483,8 @@ sfc_mae_flow_cleanup(struct sfc_adapter *sa,
}
 
sfc_mae_action_rule_del(sa, spec_mae->action_rule);
+
+   sfc_mae_counter_del(sa, spec_mae->ct_counter);
 }
 
 static int
@@ -4223,7 +4237,7 @@ static const char * const action_names[] = {
 static int
 sfc_mae_rule_parse_action(struct sfc_adapter *sa,
  const struct rte_flow_action *action,
- struct rte_flow *flow,
+ struct rte_flow *flow, bool ct,
  struct sfc_mae_actions_bundle *bundle,
  struct sfc_mae_aset_ctx *ctx,
  struct rte_flow_error *error)
@@ -4239,6 +4253,12 @@ sfc_mae_rule_parse_action(struct sfc_adapter *sa,
bool custom_error = B_FALSE;
int rc = 0;
 
+   if (ct) {
+   mae_counter_type = EFX_COUNTER_TYPE_CONNTRACK;
+   counterp = &spec_mae->ct_counter;
+   spec_ptr = NULL;
+   }
+
switch (action->type) {
case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_VXLAN_DECAP,
@@ -4526,7 +4546,7 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
if (rc != 0)
goto fail_rule_parse_action;
 
-   rc = sfc_mae_rule_parse_action(sa, action, flow,
+   rc = sfc_mae_rule_parse_action(sa, action, flow, ct,
   &bundle, &ctx, error);
if (rc != 0)
goto fail_rule_parse_action;
@@ -4561,8 +4581,15 @@ sfc_mae_rule_parse_actions(struct sfc_adapter *sa,
 */
efx_mae_action_set_popul

[PATCH v3 33/34] common/sfc_efx/base: support conntrack assistance counters

2023-06-04 Thread Ivan Malov
Counters that can be referenced by HW conntrack assistance
table work similar to those of the action rules. However,
their IDs belong to a separate (CT-specific) namespace.

These are 1-bit saturating counters with no byte count.

Signed-off-by: Ivan Malov 
Reviewed-by: Andy Moreton 
---
 drivers/common/sfc_efx/base/efx.h  |  2 ++
 drivers/common/sfc_efx/base/efx_impl.h |  1 +
 drivers/common/sfc_efx/base/efx_mae.c  | 35 +++---
 3 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/drivers/common/sfc_efx/base/efx.h 
b/drivers/common/sfc_efx/base/efx.h
index 5b992ec723..e3830cb494 100644
--- a/drivers/common/sfc_efx/base/efx.h
+++ b/drivers/common/sfc_efx/base/efx.h
@@ -4195,6 +4195,7 @@ typedef struct efx_mae_limits_s {
uint32_teml_max_n_counters;
uint32_teml_max_n_action_counters;
};
+   uint32_teml_max_n_conntrack_counters;
 } efx_mae_limits_t;
 
 LIBEFX_API
@@ -4789,6 +4790,7 @@ efx_mae_action_set_fill_in_eh_id(
  */
 typedef enum efx_counter_type_e {
EFX_COUNTER_TYPE_ACTION = 0,
+   EFX_COUNTER_TYPE_CONNTRACK,
 } efx_counter_type_t;
 
 typedef struct efx_counter_s {
diff --git a/drivers/common/sfc_efx/base/efx_impl.h 
b/drivers/common/sfc_efx/base/efx_impl.h
index f6b472c160..a0dde1b1b4 100644
--- a/drivers/common/sfc_efx/base/efx_impl.h
+++ b/drivers/common/sfc_efx/base/efx_impl.h
@@ -842,6 +842,7 @@ typedef struct efx_mae_s {
efx_mae_field_cap_t *em_outer_rule_field_caps;
size_t  em_outer_rule_field_caps_size;
uint32_tem_max_n_action_counters;
+   uint32_tem_max_n_conntrack_counters;
 } efx_mae_t;
 
 #endif /* EFSYS_OPT_MAE */
diff --git a/drivers/common/sfc_efx/base/efx_mae.c 
b/drivers/common/sfc_efx/base/efx_mae.c
index 4078146741..6457f39ccf 100644
--- a/drivers/common/sfc_efx/base/efx_mae.c
+++ b/drivers/common/sfc_efx/base/efx_mae.c
@@ -16,7 +16,7 @@ efx_mae_get_capabilities(
efx_mcdi_req_t req;
EFX_MCDI_DECLARE_BUF(payload,
MC_CMD_MAE_GET_CAPS_IN_LEN,
-   MC_CMD_MAE_GET_CAPS_OUT_LEN);
+   MC_CMD_MAE_GET_CAPS_V2_OUT_LEN);
struct efx_mae_s *maep = enp->en_maep;
efx_rc_t rc;
 
@@ -24,7 +24,7 @@ efx_mae_get_capabilities(
req.emr_in_buf = payload;
req.emr_in_length = MC_CMD_MAE_GET_CAPS_IN_LEN;
req.emr_out_buf = payload;
-   req.emr_out_length = MC_CMD_MAE_GET_CAPS_OUT_LEN;
+   req.emr_out_length = MC_CMD_MAE_GET_CAPS_V2_OUT_LEN;
 
efx_mcdi_execute(enp, &req);
 
@@ -70,6 +70,13 @@ efx_mae_get_capabilities(
maep->em_max_n_action_counters =
MCDI_OUT_DWORD(req, MAE_GET_CAPS_OUT_AR_COUNTERS);
 
+   if (req.emr_out_length_used >= MC_CMD_MAE_GET_CAPS_V2_OUT_LEN) {
+   maep->em_max_n_conntrack_counters =
+   MCDI_OUT_DWORD(req, MAE_GET_CAPS_V2_OUT_CT_COUNTERS);
+   } else {
+   maep->em_max_n_conntrack_counters = 0;
+   }
+
return (0);
 
 fail2:
@@ -375,6 +382,7 @@ efx_mae_get_limits(
emlp->eml_encap_header_size_limit =
MC_CMD_MAE_ENCAP_HEADER_ALLOC_IN_HDR_DATA_MAXNUM_MCDI2;
emlp->eml_max_n_action_counters = maep->em_max_n_action_counters;
+   emlp->eml_max_n_conntrack_counters = maep->em_max_n_conntrack_counters;
 
return (0);
 
@@ -3275,11 +3283,15 @@ efx_mae_counters_alloc_type(
efx_rc_t rc;
 
EFX_STATIC_ASSERT(EFX_COUNTER_TYPE_ACTION == MAE_COUNTER_TYPE_AR);
+   EFX_STATIC_ASSERT(EFX_COUNTER_TYPE_CONNTRACK == MAE_COUNTER_TYPE_CT);
 
switch (type) {
case EFX_COUNTER_TYPE_ACTION:
max_n_counters = maep->em_max_n_action_counters;
break;
+   case EFX_COUNTER_TYPE_CONNTRACK:
+   max_n_counters = maep->em_max_n_conntrack_counters;
+   break;
default:
rc = EINVAL;
goto fail1;
@@ -3396,6 +3408,9 @@ efx_mae_counters_free_type(
case EFX_COUNTER_TYPE_ACTION:
max_n_counters = maep->em_max_n_action_counters;
break;
+   case EFX_COUNTER_TYPE_CONNTRACK:
+   max_n_counters = maep->em_max_n_conntrack_counters;
+   break;
default:
rc = EINVAL;
goto fail1;
@@ -3498,8 +3513,11 @@ efx_mae_counters_stream_start(
__out   uint32_t *flags_out)
 {
efx_mcdi_req_t req;
-   EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAE_COUNTERS_STREAM_START_IN_LEN,
+   EFX_MCDI_DECLARE_BUF(payload,
+MC_CMD_MAE_COUNTERS_STREAM_START_V2_IN_LEN,
 MC_CMD_MAE_COUNTERS_STREAM_START_OUT_LEN);
+   struct efx_mae_s *maep = enp->en_maep;
+   uint32_t counter_types;
efx_rc_t rc;
 
EFX_STATIC_ASSERT(EFX_MAE_COUNTERS_STREAM_IN_ZERO

Re: [PATCH v4 1/2] ethdev: add API to check if queue is valid

2023-06-04 Thread huangdengdui
On 2023/6/2 20:47, Ferruh Yigit wrote:
> On 6/2/2023 8:52 AM, Dengdui Huang wrote:
>> The API rte_eth_dev_is_valid_rxq/txq which
>> is used to check if Rx/Tx queue is valid.
>> If the queue has been setup, it is considered valid.
>>
>> Signed-off-by: Dengdui Huang 
>> ---
>>  doc/guides/rel_notes/release_23_07.rst |  6 
>>  lib/ethdev/rte_ethdev.c| 22 +++
>>  lib/ethdev/rte_ethdev.h| 38 ++
>>  lib/ethdev/version.map |  2 ++
>>  4 files changed, 68 insertions(+)
>>
>> diff --git a/doc/guides/rel_notes/release_23_07.rst 
>> b/doc/guides/rel_notes/release_23_07.rst
>> index 0d3cada5d0..1332fa3a5a 100644
>> --- a/doc/guides/rel_notes/release_23_07.rst
>> +++ b/doc/guides/rel_notes/release_23_07.rst
>> @@ -83,6 +83,12 @@ New Features
>>for new capability registers, large passthrough BAR and some
>>performance enhancements for UPT.
>>  
>> +* **Added ethdev Rx/Tx queue ID check API.**
>> +
>> +  Added ethdev Rx/Tx queue ID check API which provides functions
>> +  for check if Rx/Tx queue is valid. If the queue has been setup,
>> +  it is considered valid.
>> +
>>  
> 
> Can you please move the release note update to up, before rte_flow
> updates, the expected order is documented in section comment.
> 
>>  Removed Items
>>  -
>> diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
>> index d46e74504e..a134928c8a 100644
>> --- a/lib/ethdev/rte_ethdev.c
>> +++ b/lib/ethdev/rte_ethdev.c
>> @@ -771,6 +771,28 @@ eth_dev_validate_tx_queue(const struct rte_eth_dev 
>> *dev, uint16_t tx_queue_id)
>>  return 0;
>>  }
>>  
>> +int
>> +rte_eth_dev_is_valid_rxq(uint16_t port_id, uint16_t queue_id)
>> +{
>> +struct rte_eth_dev *dev;
>> +
>> +RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
>> +dev = &rte_eth_devices[port_id];
>> +
>> +return eth_dev_validate_rx_queue(dev, queue_id);
>> +}
>> +
>> +int
>> +rte_eth_dev_is_valid_txq(uint16_t port_id, uint16_t queue_id)
>> +{
>> +struct rte_eth_dev *dev;
>> +
>> +RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
>> +dev = &rte_eth_devices[port_id];
>> +
>> +return eth_dev_validate_tx_queue(dev, queue_id);
>> +}
>> +
>>  int
>>  rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
>>  {
>> diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
>> index 9932413c33..4ef803c244 100644
>> --- a/lib/ethdev/rte_ethdev.h
>> +++ b/lib/ethdev/rte_ethdev.h
>> @@ -2666,6 +2666,44 @@ int rte_eth_dev_socket_id(uint16_t port_id);
>>   */
>>  int rte_eth_dev_is_valid_port(uint16_t port_id);
>>  
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
>> + *
>> + * Check if Rx queue is valid. If the queue has been setup,
>> + * it is considered valid.
>> + *
>> + * @param port_id
>> + *   The port identifier of the Ethernet device.
>> + * @param queue_id
>> + *  The index of the receive queue.
>> + * @return
>> + *   - -ENODEV: if *port_id* is valid.
> 
> s/valid/invalid ?
> 
>> + *   - -EINVAL: if queue is out of range or not been setup.
> 
> "if queue_id is out of range or queue is not been setup" ?
> 
>> + *   - 0 if Rx queue is valid.
>> + */
>> +__rte_experimental
>> +int rte_eth_dev_is_valid_rxq(uint16_t port_id, uint16_t queue_id);
>> +
>> +/**
>> + * @warning
>> + * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
>> + *
>> + * Check if Tx queue is valid. If the queue has been setup,
>> + * it is considered valid.
>> + *
>> + * @param port_id
>> + *   The port identifier of the Ethernet device.
>> + * @param queue_id
>> + *  The index of the transmit queue.
>> + * @return
>> + *   - -ENODEV: if *port_id* is valid.
> 
> s/valid/invalid ?
> 
>> + *   - -EINVAL: if queue is out of range or not been setup.
> 
> "if queue_id is out of range or queue is not been setup" ?
> 
>> + *   - 0 if Tx queue is valid.
>> + */
>> +__rte_experimental
>> +int rte_eth_dev_is_valid_txq(uint16_t port_id, uint16_t queue_id);
>> +
>>  /**
>>   * Start specified Rx queue of a port. It is used when rx_deferred_start
>>   * flag of the specified queue is true.
>> diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
>> index 9d418091ef..3aa6bce156 100644
>> --- a/lib/ethdev/version.map
>> +++ b/lib/ethdev/version.map
>> @@ -309,6 +309,8 @@ EXPERIMENTAL {
>>  rte_flow_async_action_list_handle_destroy;
>>  rte_flow_async_action_list_handle_query_update;
>>  rte_flow_async_actions_update;
>> +rte_eth_dev_is_valid_rxq;
>> +rte_eth_dev_is_valid_txq;
> 
> Can you please sort '23.07' block alphabetically?
> 
>>  };
>>  
>>  INTERNAL {
> 
Hi Ferruh,
Thanks for your review, I will do in v5.


[PATCH v5 2/2] app/testpmd: fix segment fault with invalid queue ID

2023-06-04 Thread Dengdui Huang
When input queue ID is invalid, it will lead to
Segmentation fault, like:

dpdk-testpmd -a :01:00.0 -- -i
testpmd> show port 0 txq/rxq 99 desc 0 status
Segmentation fault

dpdk-testpmd -a :01:00.0 -- -i
testpmd> show port 0 rxq 99 desc used count
Segmentation fault

This patch fixes it.

Fixes: fae9aa717d6c ("app/testpmd: support checking descriptor status")
Fixes: 3f9acb5c83bb ("ethdev: avoid non-dataplane checks in Rx queue count")
Cc: sta...@dpdk.org

Signed-off-by: Dengdui Huang 
Acked-by: Ferruh Yigit 
---
 app/test-pmd/cmdline.c | 23 ---
 1 file changed, 16 insertions(+), 7 deletions(-)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 5333ba72c3..a15a442a06 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -12282,12 +12282,13 @@ cmd_show_rx_tx_desc_status_parsed(void *parsed_result,
struct cmd_show_rx_tx_desc_status_result *res = parsed_result;
int rc;
 
-   if (!rte_eth_dev_is_valid_port(res->cmd_pid)) {
-   fprintf(stderr, "invalid port id %u\n", res->cmd_pid);
-   return;
-   }
-
if (!strcmp(res->cmd_keyword, "rxq")) {
+   if (rte_eth_dev_is_valid_rxq(res->cmd_pid, res->cmd_qid) != 0) {
+   fprintf(stderr,
+   "Invalid input: port id = %d, queue id = %d\n",
+   res->cmd_pid, res->cmd_qid);
+   return;
+   }
rc = rte_eth_rx_descriptor_status(res->cmd_pid, res->cmd_qid,
 res->cmd_did);
if (rc < 0) {
@@ -12303,6 +12304,12 @@ cmd_show_rx_tx_desc_status_parsed(void *parsed_result,
else
printf("Desc status = UNAVAILABLE\n");
} else if (!strcmp(res->cmd_keyword, "txq")) {
+   if (rte_eth_dev_is_valid_txq(res->cmd_pid, res->cmd_qid) != 0) {
+   fprintf(stderr,
+   "Invalid input: port id = %d, queue id = %d\n",
+   res->cmd_pid, res->cmd_qid);
+   return;
+   }
rc = rte_eth_tx_descriptor_status(res->cmd_pid, res->cmd_qid,
 res->cmd_did);
if (rc < 0) {
@@ -12382,8 +12389,10 @@ cmd_show_rx_queue_desc_used_count_parsed(void 
*parsed_result,
struct cmd_show_rx_queue_desc_used_count_result *res = parsed_result;
int rc;
 
-   if (!rte_eth_dev_is_valid_port(res->cmd_pid)) {
-   fprintf(stderr, "invalid port id %u\n", res->cmd_pid);
+   if (rte_eth_dev_is_valid_rxq(res->cmd_pid, res->cmd_qid) != 0) {
+   fprintf(stderr,
+   "Invalid input: port id = %d, queue id = %d\n",
+   res->cmd_pid, res->cmd_qid);
return;
}
 
-- 
2.33.0



[PATCH v5 0/2] add Rx/Tx queue ID check API and use it to fix a bug

2023-06-04 Thread Dengdui Huang
This series add a commom API to check queue id
and use it to fix a bug.

v4->v5
update document order and correcting typos

v3->v4
update API name and uptate description in the API documentation

v2->v3
update API name and use the internal function
eth_dev_validate_tx_queue() to check queue id

v1->v2
add a commom API to check queue id

Dengdui Huang (2):
  ethdev: add API to check if queue is valid
  app/testpmd: fix segment fault with invalid queue ID

 app/test-pmd/cmdline.c | 23 +++-
 doc/guides/rel_notes/release_23_07.rst |  6 
 lib/ethdev/rte_ethdev.c| 22 +++
 lib/ethdev/rte_ethdev.h| 38 ++
 lib/ethdev/version.map |  2 ++
 5 files changed, 84 insertions(+), 7 deletions(-)

-- 
2.33.0



[PATCH v5 1/2] ethdev: add API to check if queue is valid

2023-06-04 Thread Dengdui Huang
The API rte_eth_dev_is_valid_rxq/txq which
is used to check if Rx/Tx queue is valid.
If the queue has been setup, it is considered valid.

Signed-off-by: Dengdui Huang 
---
 doc/guides/rel_notes/release_23_07.rst |  6 
 lib/ethdev/rte_ethdev.c| 22 +++
 lib/ethdev/rte_ethdev.h| 38 ++
 lib/ethdev/version.map |  2 ++
 4 files changed, 68 insertions(+)

diff --git a/doc/guides/rel_notes/release_23_07.rst 
b/doc/guides/rel_notes/release_23_07.rst
index 0b5561a6c6..e351be59e4 100644
--- a/doc/guides/rel_notes/release_23_07.rst
+++ b/doc/guides/rel_notes/release_23_07.rst
@@ -65,6 +65,12 @@ New Features
 
   Added RTE_ETH_FEC_LLRS to rte_eth_fec_mode.
 
+* **Added ethdev Rx/Tx queue ID check API.**
+
+  Added ethdev Rx/Tx queue ID check API which provides functions
+  for check if Rx/Tx queue is valid. If the queue has been setup,
+  it is considered valid.
+
 * **Added flow matching of tx queue.**
 
   Added ``RTE_FLOW_ITEM_TYPE_TX_QUEUE`` rte_flow pattern to match tx queue of
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index d46e74504e..a134928c8a 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -771,6 +771,28 @@ eth_dev_validate_tx_queue(const struct rte_eth_dev *dev, 
uint16_t tx_queue_id)
return 0;
 }
 
+int
+rte_eth_dev_is_valid_rxq(uint16_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_dev *dev;
+
+   RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+   dev = &rte_eth_devices[port_id];
+
+   return eth_dev_validate_rx_queue(dev, queue_id);
+}
+
+int
+rte_eth_dev_is_valid_txq(uint16_t port_id, uint16_t queue_id)
+{
+   struct rte_eth_dev *dev;
+
+   RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+   dev = &rte_eth_devices[port_id];
+
+   return eth_dev_validate_tx_queue(dev, queue_id);
+}
+
 int
 rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
 {
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 9932413c33..a37c0bdf76 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -2666,6 +2666,44 @@ int rte_eth_dev_socket_id(uint16_t port_id);
  */
 int rte_eth_dev_is_valid_port(uint16_t port_id);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Check if Rx queue is valid. If the queue has been setup,
+ * it is considered valid.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The index of the receive queue.
+ * @return
+ *   - -ENODEV: if *port_id* is invalid.
+ *   - -EINVAL: if queue_id is out of range or queue is not been setup.
+ *   - 0 if Rx queue is valid.
+ */
+__rte_experimental
+int rte_eth_dev_is_valid_rxq(uint16_t port_id, uint16_t queue_id);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice
+ *
+ * Check if Tx queue is valid. If the queue has been setup,
+ * it is considered valid.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The index of the transmit queue.
+ * @return
+ *   - -ENODEV: if *port_id* is invalid.
+ *   - -EINVAL: if queue_id is out of range or queue is not been setup.
+ *   - 0 if Tx queue is valid.
+ */
+__rte_experimental
+int rte_eth_dev_is_valid_txq(uint16_t port_id, uint16_t queue_id);
+
 /**
  * Start specified Rx queue of a port. It is used when rx_deferred_start
  * flag of the specified queue is true.
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index 9d418091ef..1a33d72668 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -301,6 +301,8 @@ EXPERIMENTAL {
rte_flow_async_create_by_index;
 
# added in 23.07
+   rte_eth_dev_is_valid_rxq;
+   rte_eth_dev_is_valid_txq;
rte_flow_action_list_handle_create;
rte_flow_action_list_handle_destroy;
rte_flow_action_list_handle_query_update;
-- 
2.33.0



[PATCH 0/2] report the outer L3 and L4 packet type

2023-06-04 Thread Chaoyong He
This patch series add the support of:
1. Report the outer L3 packet type.
2. Report the L4 packet type for VXLAN and GENEVE.

Qin Ke (2):
  net/nfp: report outer L3 packet type by Rx descriptor
  net/nfp: add default process to report outer L4 packet type

 drivers/net/nfp/nfp_rxtx.c | 21 +++--
 drivers/net/nfp/nfp_rxtx.h | 21 +
 2 files changed, 36 insertions(+), 6 deletions(-)

-- 
2.39.1



[PATCH 1/2] net/nfp: report outer L3 packet type by Rx descriptor

2023-06-04 Thread Chaoyong He
From: Qin Ke 

Parse outer layer 3 packet type from Rx descriptor and report it.

Signed-off-by: Qin Ke 
Reviewed-by: Niklas Söderlund 
Reviewed-by: Chaoyong He 
---
 drivers/net/nfp/nfp_rxtx.c | 17 +
 drivers/net/nfp/nfp_rxtx.h | 21 +
 2 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 38e084171b..a36efd3aa9 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -321,6 +321,21 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed 
*nfp_ptype, struct rte_mbuf *mb)
if (nfp_tunnel_ptype != NFP_NET_PTYPE_TUNNEL_NONE)
mbuf_ptype |= RTE_PTYPE_INNER_L2_ETHER;
 
+   switch (nfp_ptype->outer_l3_ptype) {
+   case NFP_NET_PTYPE_OUTER_L3_NONE:
+   break;
+   case NFP_NET_PTYPE_OUTER_L3_IPV4:
+   mbuf_ptype |= RTE_PTYPE_L3_IPV4;
+   break;
+   case NFP_NET_PTYPE_OUTER_L3_IPV6:
+   mbuf_ptype |= RTE_PTYPE_L3_IPV6;
+   break;
+   default:
+   PMD_RX_LOG(DEBUG, "Unrecognized nfp outer layer 3 packet type: 
%u",
+   nfp_ptype->outer_l3_ptype);
+   break;
+   }
+
switch (nfp_tunnel_ptype) {
case NFP_NET_PTYPE_TUNNEL_NONE:
break;
@@ -432,6 +447,8 @@ nfp_net_parse_ptype(struct nfp_net_rx_desc *rxds,
NFP_NET_PTYPE_L3_OFFSET;
nfp_ptype.tunnel_ptype = (rxd_ptype & NFP_NET_PTYPE_TUNNEL_MASK) >>
NFP_NET_PTYPE_TUNNEL_OFFSET;
+   nfp_ptype.outer_l3_ptype = (rxd_ptype & NFP_NET_PTYPE_OUTER_L3_MASK) >>
+   NFP_NET_PTYPE_OUTER_L3_OFFSET;
 
nfp_net_set_ptype(&nfp_ptype, mb);
 }
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index eebe9b3ee2..cf713b0cd5 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -178,7 +178,7 @@ struct nfp_net_txq {
  *1   0
  *  5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |   |tunnel |  l3 |  l4 |
+ * |   |ol3|tunnel |  l3 |  l4 |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
  * Bit map about nfp packet type refers to the following:
@@ -210,6 +210,12 @@ struct nfp_net_txq {
  * 0101: NFP_NET_PTYPE_TUNNEL_GENEVE
  * 0010, 0011, 0110~: reserved
  *
+ * Outer L3: bit 10~11, used for outer layer 3.
+ * 00: NFP_NET_PTYPE_OUTER_L3_NONE
+ * 01: NFP_NET_PTYPE_OUTER_L3_IPV6
+ * 10: NFP_NET_PTYPE_OUTER_L3_IPV4
+ * 11: reserved
+ *
  * Reserved: bit 10~15, used for extension.
  */
 
@@ -217,10 +223,12 @@ struct nfp_net_txq {
 #define NFP_NET_PTYPE_L4_MASK  0x0007
 #define NFP_NET_PTYPE_L3_MASK  0x0038
 #define NFP_NET_PTYPE_TUNNEL_MASK  0x03c0
+#define NFP_NET_PTYPE_OUTER_L3_MASK0x0c00
 
 #define NFP_NET_PTYPE_L4_OFFSET0
 #define NFP_NET_PTYPE_L3_OFFSET3
 #define NFP_NET_PTYPE_TUNNEL_OFFSET6
+#define NFP_NET_PTYPE_OUTER_L3_OFFSET  10
 
 /* Case about nfp packet type based on the bit map above. */
 #define NFP_NET_PTYPE_L4_NONE  0
@@ -244,13 +252,18 @@ struct nfp_net_txq {
 #define NFP_NET_PTYPE_TUNNEL_NVGRE 4
 #define NFP_NET_PTYPE_TUNNEL_GENEVE5
 
+#define NFP_NET_PTYPE_OUTER_L3_NONE0
+#define NFP_NET_PTYPE_OUTER_L3_IPV61
+#define NFP_NET_PTYPE_OUTER_L3_IPV42
+
 #define NFP_PTYPE2RTE(tunnel, type) ((tunnel) ? RTE_PTYPE_INNER_##type : 
RTE_PTYPE_##type)
 
 /* Record NFP packet type parsed from rxd.offload_info. */
 struct nfp_ptype_parsed {
-   uint8_t l4_ptype; /**< Packet type of layer 4, or inner layer 4. */
-   uint8_t l3_ptype; /**< Packet type of layer 3, or inner layer 3. */
-   uint8_t tunnel_ptype; /**< Packet type of tunnel. */
+   uint8_t l4_ptype;   /**< Packet type of layer 4, or inner layer 4. 
*/
+   uint8_t l3_ptype;   /**< Packet type of layer 3, or inner layer 3. 
*/
+   uint8_t tunnel_ptype;   /**< Packet type of tunnel. */
+   uint8_t outer_l3_ptype; /**< Packet type of outer layer 3. */
 };
 
 struct nfp_net_rx_desc {
-- 
2.39.1



[PATCH 2/2] net/nfp: add default process to report outer L4 packet type

2023-06-04 Thread Chaoyong He
From: Qin Ke 

The parsing of outer layer 4 packet type by Rx descriptor is not
supported, add default process to report the outer layer 4 packet
type for VXLAN and GENEVE packets.

Signed-off-by: Qin Ke 
Reviewed-by: Niklas Söderlund 
Reviewed-by: Chaoyong He 
---
 drivers/net/nfp/nfp_rxtx.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index a36efd3aa9..0ac9d6db03 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -340,13 +340,13 @@ nfp_net_set_ptype(const struct nfp_ptype_parsed 
*nfp_ptype, struct rte_mbuf *mb)
case NFP_NET_PTYPE_TUNNEL_NONE:
break;
case NFP_NET_PTYPE_TUNNEL_VXLAN:
-   mbuf_ptype |= RTE_PTYPE_TUNNEL_VXLAN;
+   mbuf_ptype |= RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
break;
case NFP_NET_PTYPE_TUNNEL_NVGRE:
mbuf_ptype |= RTE_PTYPE_TUNNEL_NVGRE;
break;
case NFP_NET_PTYPE_TUNNEL_GENEVE:
-   mbuf_ptype |= RTE_PTYPE_TUNNEL_GENEVE;
+   mbuf_ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP;
break;
default:
PMD_RX_LOG(DEBUG, "Unrecognized nfp tunnel packet type: %u",
-- 
2.39.1



Re: [PATCH v4] net/bnx2x: support 2.5Gbps

2023-06-04 Thread Jerin Jacob
On Fri, Jun 2, 2023 at 1:27 PM Julien Aube  wrote:
>
> - add support for 2500baseX_Full in addition to 1000baseT_Full.
>   For 2.5Gbps speed, HSGMII mode shall be enabled in serdes.
> - add the possibility to support SC connectors on SFP (GPON are mostly SC)
> - change the initialisation time from 60ms to 1800ms (for xPON-Based SFP)
>
> This has been tested with GPON's ONU SFP but requires a specific
> firmware configuration described in the documentation.
>
> Signed-off-by: Julien Aube 

Applied to dpdk-next-net-mrvl/for-next-net. Thanks


> ---
>  .mailmap  |  1 +
>  doc/guides/nics/bnx2x.rst |  4 
>  drivers/net/bnx2x/elink.c | 19 +--
>  3 files changed, 22 insertions(+), 2 deletions(-)
>
> diff --git a/.mailmap b/.mailmap
> index db85cc66c6..cb689ef751 100644
> --- a/.mailmap
> +++ b/.mailmap
> @@ -666,6 +666,7 @@ JP Lee 
>  Juan Antonio Montesinos 
>  Juhamatti Kuusisaari 
>  Juho Snellman 
> +Julien Aube 
>  Julien Castets 
>  Julien Courtat 
>  Julien Cretin 
> diff --git a/doc/guides/nics/bnx2x.rst b/doc/guides/nics/bnx2x.rst
> index 788a6dac08..f19784db2c 100644
> --- a/doc/guides/nics/bnx2x.rst
> +++ b/doc/guides/nics/bnx2x.rst
> @@ -22,6 +22,7 @@ BNX2X PMD has support for:
>  - Promiscuous mode
>  - Port hardware statistics
>  - SR-IOV VF
> +- Experimental 2.5Gbps support
>
>  Non-supported Features
>  --
> @@ -72,6 +73,9 @@ Prerequisites
>`linux-firmware git repository 
> `_
>to get the required firmware.
>
> +- 2.5Gbps speed currently require that the firmware's nvm configuration 
> number 7 / 35 (first port) and 36 (second port) are set to 0x70
> +  for 1G/2.5G/10G support . This can be done for EFI or DOS using EDIAG tool 
> from Broadcom.
> +
>  Pre-Installation Configuration
>  --
>
> diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c
> index 43fbf04ece..2c81f85b96 100644
> --- a/drivers/net/bnx2x/elink.c
> +++ b/drivers/net/bnx2x/elink.c
> @@ -867,6 +867,7 @@ typedef elink_status_t 
> (*read_sfp_module_eeprom_func_p)(struct elink_phy *phy,
>
>  #define ELINK_SFP_EEPROM_CON_TYPE_ADDR 0x2
> #define ELINK_SFP_EEPROM_CON_TYPE_VAL_UNKNOWN   0x0
> +   #define ELINK_SFP_EEPROM_CON_TYPE_VAL_SC0x1
> #define ELINK_SFP_EEPROM_CON_TYPE_VAL_LC0x7
> #define ELINK_SFP_EEPROM_CON_TYPE_VAL_COPPER0x21
> #define ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45  0x22
> @@ -5069,6 +5070,15 @@ static void elink_warpcore_set_sgmii_speed(struct 
> elink_phy *phy,
>  0x1000);
> ELINK_DEBUG_P0(sc, "set SGMII AUTONEG");
> } else {
> +   /* Note that 2.5G works only when used with 1G advertisement */
> +   if (fiber_mode && phy->req_line_speed == SPEED_2500 &&
> +  (phy->speed_cap_mask &
> +  (PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
> +   PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))) {
> +   elink_cl45_write(sc, phy, MDIO_WC_DEVAD,
> +   MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6010);
> +   }
> +
> elink_cl45_read(sc, phy, MDIO_WC_DEVAD,
> MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
> val16 &= 0xcebf;
> @@ -5079,6 +5089,7 @@ static void elink_warpcore_set_sgmii_speed(struct 
> elink_phy *phy,
> val16 |= 0x2000;
> break;
> case ELINK_SPEED_1000:
> +   case ELINK_SPEED_2500:
> val16 |= 0x0040;
> break;
> default:
> @@ -9138,6 +9149,7 @@ static elink_status_t elink_get_edc_mode(struct 
> elink_phy *phy,
> break;
> }
> case ELINK_SFP_EEPROM_CON_TYPE_VAL_UNKNOWN:
> +   case ELINK_SFP_EEPROM_CON_TYPE_VAL_SC:
> case ELINK_SFP_EEPROM_CON_TYPE_VAL_LC:
> case ELINK_SFP_EEPROM_CON_TYPE_VAL_RJ45:
> check_limiting_mode = 1;
> @@ -9151,7 +9163,8 @@ static elink_status_t elink_get_edc_mode(struct 
> elink_phy *phy,
> (val[ELINK_SFP_EEPROM_1G_COMP_CODE_ADDR] != 0)) {
> ELINK_DEBUG_P0(sc, "1G SFP module detected");
> phy->media_type = ELINK_ETH_PHY_SFP_1G_FIBER;
> -   if (phy->req_line_speed != ELINK_SPEED_1000) {
> +   if (phy->req_line_speed != ELINK_SPEED_1000 &&
> +   phy->req_line_speed != ELINK_SPEED_2500) {
> uint8_t gport = params->port;
> phy->req_line_speed = ELINK_SPEED_1000;
> if (!CHIP_IS_E1x(sc)) {
> @@ -9324,7 +9337,7 @@ static elink_status_t 
> elink_wait_for_sfp_module_initialized(
>  * som

Re: [PATCH] doc: build manpages as well as html output

2023-06-04 Thread Jerin Jacob
On Thu, Jun 1, 2023 at 9:08 PM Bruce Richardson
 wrote:
>
> Doxygen can produce manpage output as well as html output for the DPDK
> APIs. However, we need to do this as a separate task as the manpage
> output needs to be placed in a different location post-install to the
> html output (/usr/local/share/man vs /usr/local/share/doc/).
>
> Changes required are:
> * Add configurable options for manpage output and html output to the
>   doxygen config template. (Remove option for html output path as it's
>   always "html")
> * Modify API meson.build file to configure two separate doxygen config
>   files, for HTML and manpages respectively.
> * Change doxygen wrapper script to have separate output log files for
>   the manpage and HTML jobs, to avoid conflicts
> * Add "custom_targets" to meson.build file to build the HTML pages and
>   the manpages, with individual install locations for each.
> * Where supported by meson version, call "mandb" post-install to update
>   the man database to ensure the new manpages can be found.
>
> Signed-off-by: Bruce Richardson 

> +
> +mandb = find_program('mandb', required: false)
> +if mandb.found() and get_option('enable_docs') and 
> meson.version().version_compare('>=0.55.0')
> +meson.add_install_script(mandb)

It does not look like just executing mandb it is adding these man
pages to database

log:
Running custom install script '/usr/bin/mandb'
Purging old database entries in /home/jerin/.local/man...
Processing manual pages under /home/jerin/.local/man...
Checking for stray cats under /home/jerin/.local/man...
Processing manual pages under /home/jerin/.local/man/cat1...
Purging old database entries in /home/jerin/.local/share/man...
Processing manual pages under /home/jerin/.local/share/man...
Checking for stray cats under /home/jerin/.local/share/man...
Processing manual pages under /home/jerin/.local/share/man/cat1...
0 man subdirectories contained newer manual pages.
0 manual pages were added.
0 stray cats were added.
0 old database entries were purged.

[main][dpdk.org] $ man  rte_flow_create
No manual entry for rte_flow_create

# Following works by providing the path i.e man pages created properly
only db update is missing
man --manpath=/tmp/i/usr/local/share/man/ rte_flow_create


> +endif
> --
> 2.39.2
>


[PATCH] net/nfp: remove unused struct fields

2023-06-04 Thread Chaoyong He
Remove the data fields of nfp structure which are not used by anyone,
and modify the corresponding logics.

Signed-off-by: Chaoyong He 
Reviewed-by: Niklas Söderlund 
---
 drivers/net/nfp/flower/nfp_flower.c |  4 +---
 drivers/net/nfp/nfp_common.h| 34 +
 drivers/net/nfp/nfp_ethdev_vf.c |  4 +---
 3 files changed, 3 insertions(+), 39 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower.c 
b/drivers/net/nfp/flower/nfp_flower.c
index f7e0ba3b76..c5cc537790 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -438,9 +438,7 @@ nfp_flower_pf_recv_pkts(void *rx_queue,
rte_ring_enqueue(repr->ring, (void *)mb);
avail_multiplexed++;
} else if (repr != NULL) {
-   PMD_RX_LOG(ERR, "[%u] No ring available for repr_port 
%s\n",
-   hw->idx, repr->name);
-   PMD_RX_LOG(DEBUG, "Adding the mbuf to the mbuf array 
passed by the app");
+   PMD_RX_LOG(ERR, "No ring available for repr_port %s", 
repr->name);
rx_pkts[avail++] = mb;
} else {
PMD_RX_LOG(DEBUG, "Adding the mbuf to the mbuf array 
passed by the app");
diff --git a/drivers/net/nfp/nfp_common.h b/drivers/net/nfp/nfp_common.h
index 56ea203d7e..2281445861 100644
--- a/drivers/net/nfp/nfp_common.h
+++ b/drivers/net/nfp/nfp_common.h
@@ -119,36 +119,14 @@ struct nfp_pf_dev {
/* The eth table reported by firmware */
struct nfp_eth_table *nfp_eth_table;
 
-   /* Current values for control */
-   uint32_t ctrl;
-
uint8_t *ctrl_bar;
-   uint8_t *tx_bar;
-   uint8_t *rx_bar;
-
-   uint8_t *qcp_cfg;
-   rte_spinlock_t reconfig_lock;
-
-   uint16_t flbufsz;
-   uint16_t device_id;
-   uint16_t vendor_id;
-   uint16_t subsystem_device_id;
-   uint16_t subsystem_vendor_id;
-#if defined(DSTQ_SELECTION)
-#if DSTQ_SELECTION
-   uint16_t device_function;
-#endif
-#endif
 
struct nfp_cpp *cpp;
struct nfp_cpp_area *ctrl_area;
struct nfp_cpp_area *hwqueues_area;
-   struct nfp_cpp_area *msix_area;
 
uint8_t *hw_queues;
 
-   union eth_table_entry *eth_table;
-
struct nfp_hwinfo *hwinfo;
struct nfp_rtsym_table *sym_tbl;
 
@@ -208,11 +186,6 @@ struct nfp_net_hw {
uint16_t vendor_id;
uint16_t subsystem_device_id;
uint16_t subsystem_vendor_id;
-#if defined(DSTQ_SELECTION)
-#if DSTQ_SELECTION
-   uint16_t device_function;
-#endif
-#endif
 
struct rte_ether_addr mac_addr;
 
@@ -222,19 +195,14 @@ struct nfp_net_hw {
 
struct nfp_cpp *cpp;
struct nfp_cpp_area *ctrl_area;
-   struct nfp_cpp_area *hwqueues_area;
-   struct nfp_cpp_area *msix_area;
struct nfp_cpp_area *mac_stats_area;
uint8_t *mac_stats_bar;
uint8_t *mac_stats;
 
-   uint8_t *hw_queues;
-   /* Sequential physical port number */
+   /* Sequential physical port number, only valid for CoreNIC firmware */
uint8_t idx;
/* Internal port number as seen from NFP */
uint8_t nfp_idx;
-
-   union eth_table_entry *eth_table;
 };
 
 struct nfp_net_adapter {
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index d4357ad115..71f5020ecd 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -402,7 +402,7 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR, "Failed to space for MAC address");
err = -ENOMEM;
-   goto dev_err_queues_map;
+   goto dev_err_ctrl_map;
}
 
nfp_netvf_read_mac(hw);
@@ -443,8 +443,6 @@ nfp_netvf_init(struct rte_eth_dev *eth_dev)
 
return 0;
 
-dev_err_queues_map:
-   nfp_cpp_area_free(hw->hwqueues_area);
 dev_err_ctrl_map:
nfp_cpp_area_free(hw->ctrl_area);
 
-- 
2.39.1



[PATCH] net/nfp: use common helper to trigger stats metering

2023-06-04 Thread Chaoyong He
From: Jin Liu 

Use rte_eal_alarm_set() function instead of clock cycle to
implement meter stats timer, to make code more standard.

Signed-off-by: Jin Liu 
Reviewed-by: Chaoyong He 
Reviewed-by: Niklas Söderlund 
---
 drivers/net/nfp/flower/nfp_flower_ctrl.c | 20 --
 drivers/net/nfp/nfp_mtr.c| 27 +++-
 drivers/net/nfp/nfp_mtr.h|  2 --
 3 files changed, 26 insertions(+), 23 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c 
b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index 3e083d948e..9f069abe06 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -321,29 +321,15 @@ nfp_flower_cmsg_rx(struct nfp_app_fw_flower 
*app_fw_flower,
}
 }
 
-static void
-nfp_mtr_stats_request(struct nfp_app_fw_flower *app_fw_flower)
-{
-   struct nfp_mtr *mtr;
-
-   LIST_FOREACH(mtr, &app_fw_flower->mtr_priv->mtrs, next)
-   (void)nfp_flower_cmsg_qos_stats(app_fw_flower, 
&mtr->mtr_profile->conf.head);
-}
-
 void
 nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower *app_fw_flower)
 {
uint16_t count;
-   uint64_t cur_tsc;
-   uint64_t drain_tsc;
-   uint64_t pre_tsc = 0;
struct nfp_net_rxq *rxq;
struct nfp_net_hw *ctrl_hw;
struct rte_eth_dev *ctrl_eth_dev;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 
-   drain_tsc = app_fw_flower->mtr_priv->drain_tsc;
-
ctrl_hw = app_fw_flower->ctrl_hw;
ctrl_eth_dev = ctrl_hw->eth_dev;
 
@@ -357,11 +343,5 @@ nfp_flower_ctrl_vnic_poll(struct nfp_app_fw_flower 
*app_fw_flower)
/* Process cmsgs here */
nfp_flower_cmsg_rx(app_fw_flower, pkts_burst, count);
}
-
-   cur_tsc = rte_rdtsc();
-   if (unlikely(cur_tsc - pre_tsc > drain_tsc)) {
-   nfp_mtr_stats_request(app_fw_flower);
-   pre_tsc = cur_tsc;
-   }
}
 }
diff --git a/drivers/net/nfp/nfp_mtr.c b/drivers/net/nfp/nfp_mtr.c
index 5f85106f9d..afc4de4cc7 100644
--- a/drivers/net/nfp/nfp_mtr.c
+++ b/drivers/net/nfp/nfp_mtr.c
@@ -6,6 +6,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #include "nfp_common.h"
 #include "nfp_mtr.h"
@@ -18,6 +19,9 @@
 #define NFP_FL_QOS_METERRTE_BIT32(10)
 #define NFP_FL_QOS_RFC2697  RTE_BIT32(0)
 
+/* Alarm timeout value in microseconds */
+#define NFP_METER_STATS_INTERVAL 100  /* 1 second */
+
 /**
  * Callback to get MTR capabilities.
  *
@@ -1072,9 +1076,22 @@ nfp_net_mtr_ops_get(struct rte_eth_dev *dev, void *arg)
return 0;
 }
 
+static void
+nfp_mtr_stats_request(void *arg)
+{
+   struct nfp_mtr *mtr;
+   struct nfp_app_fw_flower *app_fw_flower = arg;
+
+   LIST_FOREACH(mtr, &app_fw_flower->mtr_priv->mtrs, next)
+   nfp_flower_cmsg_qos_stats(app_fw_flower, 
&mtr->mtr_profile->conf.head);
+
+   rte_eal_alarm_set(NFP_METER_STATS_INTERVAL, nfp_mtr_stats_request, arg);
+}
+
 int
 nfp_mtr_priv_init(struct nfp_pf_dev *pf_dev)
 {
+   int ret;
struct nfp_mtr_priv *priv;
struct nfp_app_fw_flower *app_fw_flower;
 
@@ -1087,7 +1104,13 @@ nfp_mtr_priv_init(struct nfp_pf_dev *pf_dev)
app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
app_fw_flower->mtr_priv = priv;
 
-   priv->drain_tsc = rte_get_tsc_hz();
+   ret = rte_eal_alarm_set(NFP_METER_STATS_INTERVAL, nfp_mtr_stats_request,
+   (void *)app_fw_flower);
+   if (ret < 0) {
+   PMD_INIT_LOG(ERR, "nfp mtr timer init failed.");
+   rte_free(priv);
+   return ret;
+   }
 
LIST_INIT(&priv->mtrs);
LIST_INIT(&priv->profiles);
@@ -1110,6 +1133,8 @@ nfp_mtr_priv_uninit(struct nfp_pf_dev *pf_dev)
app_fw_flower = NFP_PRIV_TO_APP_FW_FLOWER(pf_dev->app_fw_priv);
priv = app_fw_flower->mtr_priv;
 
+   rte_eal_alarm_cancel(nfp_mtr_stats_request, (void *)app_fw_flower);
+
LIST_FOREACH(mtr, &priv->mtrs, next) {
LIST_REMOVE(mtr, next);
rte_free(mtr);
diff --git a/drivers/net/nfp/nfp_mtr.h b/drivers/net/nfp/nfp_mtr.h
index 41c472f139..f5406381ab 100644
--- a/drivers/net/nfp/nfp_mtr.h
+++ b/drivers/net/nfp/nfp_mtr.h
@@ -159,14 +159,12 @@ struct nfp_mtr {
  * @policies:the head node of policy list
  * @mtrs:the head node of mtrs list
  * @mtr_stats_lock:  spinlock for meter stats
- * @drain_tsc:   clock period
  */
 struct nfp_mtr_priv {
LIST_HEAD(, nfp_mtr_profile) profiles;
LIST_HEAD(, nfp_mtr_policy) policies;
LIST_HEAD(, nfp_mtr) mtrs;
rte_spinlock_t mtr_stats_lock;
-   uint64_t drain_tsc;
 };
 
 int nfp_net_mtr_ops_get(struct rte_eth_dev *dev, void *arg);
-- 
2.39.1



Re: [PATCH 00/10] support telemetry query ethdev info

2023-06-04 Thread Jie Hai

On 2023/6/1 22:36, Ferruh Yigit wrote:

On 5/30/2023 10:05 AM, Jie Hai wrote:

This patchset supports querying information about ethdev.
The information includes MAC addresses, RxTx offload, flow ctrl,
Rx|Tx queue, firmware version, DCB, RSS, FEC, VLAN, etc.


Dengdui Huang (1):
   ethdev: support telemetry query MAC addresses

Jie Hai (9):
   ethdev: support RxTx offload display
   ethdev: support telemetry query flow ctrl info
   ethdev: support telemetry query Rx queue info
   ethdev: support telemetry query Tx queue info
   ethdev: add firmware version in telemetry info command
   ethdev: support telemetry query DCB info
   ethdev: support telemetry query RSS info
   ethdev: support telemetry query FEC info
   ethdev: support telemetry query VLAN info



Hi Jie,

Overall it is good to add more telemetry support, but it is making
'rte_ethdev.c' bigger, specially naming of the static functions that
telemetry handlers use making file confusing.
Can you please create a specific file for telemetry functions?

First you can move the existing ones and later add your patches.

Also there is a common part that reads and verifies port_id, I think
that part can be extracted to a common function, I will comment on it in
one of the patches.

Thanks,
ferruh
.

Hi ferruh,

Thanks for your review, I will fix them in the next version.

Thanks,
Jie Hai


[PATCH v8 00/14] net/cpfl: add hairpin queue support

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patchset adds hairpin queue support.

v2 changes:
 - change hairpin rx queus configuration sequence.
 - code refine.

v3 changes:
 - Refine the patchset based on the latest code.

v4 change:
 - Remove hairpin rx buffer queue's sw_ring.
 - Change hairpin rx queus configuration sequence in cpfl_hairpin_bind function.
 - Refind hairpin queue setup and release.

v5 change:
 - Fix memory leak during queue setup.
 - Refine hairpin Rxq/Txq start/stop.

v6 change:
 - Add sign-off.

v7 change:
 - Update cpfl.rst

v8 change:
 - Fix Intel-compilation failure.

Beilei Xing (14):
  net/cpfl: refine structures
  common/idpf: support queue groups add/delete
  net/cpfl: add haipin queue group during vport init
  net/cpfl: support hairpin queue capbility get
  net/cpfl: support hairpin queue setup and release
  common/idpf: add queue config API
  net/cpfl: support hairpin queue configuration
  common/idpf: add switch queue API
  net/cpfl: support hairpin queue start/stop
  common/idpf: add irq map config API
  net/cpfl: enable write back based on ITR expire
  net/cpfl: support peer ports get
  net/cpfl: support hairpin bind/unbind
  doc: update the doc of CPFL PMD

 doc/guides/nics/cpfl.rst   |   7 +
 drivers/common/idpf/idpf_common_device.c   |  75 ++
 drivers/common/idpf/idpf_common_device.h   |   4 +
 drivers/common/idpf/idpf_common_virtchnl.c | 138 +++-
 drivers/common/idpf/idpf_common_virtchnl.h |  18 +
 drivers/common/idpf/version.map|   6 +
 drivers/net/cpfl/cpfl_ethdev.c | 611 ++--
 drivers/net/cpfl/cpfl_ethdev.h |  35 +-
 drivers/net/cpfl/cpfl_rxtx.c   | 781 +++--
 drivers/net/cpfl/cpfl_rxtx.h   |  76 ++
 drivers/net/cpfl/cpfl_rxtx_vec_common.h|  21 +-
 11 files changed, 1653 insertions(+), 119 deletions(-)

-- 
2.26.2



[PATCH v8 01/14] net/cpfl: refine structures

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch refines some structures to support hairpin queue,
cpfl_rx_queue/cpfl_tx_queue/cpfl_vport.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  |  85 +++-
 drivers/net/cpfl/cpfl_ethdev.h  |   6 +-
 drivers/net/cpfl/cpfl_rxtx.c| 175 +---
 drivers/net/cpfl/cpfl_rxtx.h|   8 ++
 drivers/net/cpfl/cpfl_rxtx_vec_common.h |  17 +--
 5 files changed, 196 insertions(+), 95 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 7528a14d05..e587155db6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -124,7 +124,8 @@ static int
 cpfl_dev_link_update(struct rte_eth_dev *dev,
 __rte_unused int wait_to_complete)
 {
-   struct idpf_vport *vport = dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct rte_eth_link new_link;
unsigned int i;
 
@@ -156,7 +157,8 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
 static int
 cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
-   struct idpf_vport *vport = dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
 
dev_info->max_rx_queues = base->caps.max_rx_q;
@@ -216,7 +218,8 @@ cpfl_dev_info_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *dev_info)
 static int
 cpfl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
-   struct idpf_vport *vport = dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
 
/* mtu setting is forbidden if port is start */
if (dev->data->dev_started) {
@@ -256,12 +259,12 @@ static uint64_t
 cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 {
uint64_t mbuf_alloc_failed = 0;
-   struct idpf_rx_queue *rxq;
+   struct cpfl_rx_queue *cpfl_rxq;
int i = 0;
 
for (i = 0; i < dev->data->nb_rx_queues; i++) {
-   rxq = dev->data->rx_queues[i];
-   mbuf_alloc_failed += 
__atomic_load_n(&rxq->rx_stats.mbuf_alloc_failed,
+   cpfl_rxq = dev->data->rx_queues[i];
+   mbuf_alloc_failed += 
__atomic_load_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed,
 __ATOMIC_RELAXED);
}
 
@@ -271,8 +274,8 @@ cpfl_get_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 static int
 cpfl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 {
-   struct idpf_vport *vport =
-   (struct idpf_vport *)dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
 
@@ -305,20 +308,20 @@ cpfl_dev_stats_get(struct rte_eth_dev *dev, struct 
rte_eth_stats *stats)
 static void
 cpfl_reset_mbuf_alloc_failed_stats(struct rte_eth_dev *dev)
 {
-   struct idpf_rx_queue *rxq;
+   struct cpfl_rx_queue *cpfl_rxq;
int i;
 
for (i = 0; i < dev->data->nb_rx_queues; i++) {
-   rxq = dev->data->rx_queues[i];
-   __atomic_store_n(&rxq->rx_stats.mbuf_alloc_failed, 0, 
__ATOMIC_RELAXED);
+   cpfl_rxq = dev->data->rx_queues[i];
+   __atomic_store_n(&cpfl_rxq->base.rx_stats.mbuf_alloc_failed, 0, 
__ATOMIC_RELAXED);
}
 }
 
 static int
 cpfl_dev_stats_reset(struct rte_eth_dev *dev)
 {
-   struct idpf_vport *vport =
-   (struct idpf_vport *)dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
int ret;
 
@@ -343,8 +346,8 @@ static int cpfl_dev_xstats_reset(struct rte_eth_dev *dev)
 static int cpfl_dev_xstats_get(struct rte_eth_dev *dev,
   struct rte_eth_xstat *xstats, unsigned int n)
 {
-   struct idpf_vport *vport =
-   (struct idpf_vport *)dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct virtchnl2_vport_stats *pstats = NULL;
unsigned int i;
int ret;
@@ -459,7 +462,8 @@ cpfl_rss_reta_update(struct rte_eth_dev *dev,
 struct rte_eth_rss_reta_entry64 *reta_conf,
 uint16_t reta_size)
 {
-   struct idpf_vport *vport = dev->data->dev_private;
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct idpf_adapter *base = vport->adapter;
uin

[PATCH v8 02/14] common/idpf: support queue groups add/delete

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch adds queue group add/delete virtual channel support.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_virtchnl.c | 66 ++
 drivers/common/idpf/idpf_common_virtchnl.h |  9 +++
 drivers/common/idpf/version.map|  2 +
 3 files changed, 77 insertions(+)

diff --git a/drivers/common/idpf/idpf_common_virtchnl.c 
b/drivers/common/idpf/idpf_common_virtchnl.c
index b713678634..a3fe55c897 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -359,6 +359,72 @@ idpf_vc_vport_destroy(struct idpf_vport *vport)
return err;
 }
 
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+  struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+  uint8_t *p2p_queue_grps_out)
+{
+   struct idpf_adapter *adapter = vport->adapter;
+   struct idpf_cmd_info args;
+   int size, qg_info_size;
+   int err = -1;
+
+   size = sizeof(*p2p_queue_grps_info) +
+  (p2p_queue_grps_info->qg_info.num_queue_groups - 1) *
+  sizeof(struct virtchnl2_queue_group_info);
+
+   memset(&args, 0, sizeof(args));
+   args.ops = VIRTCHNL2_OP_ADD_QUEUE_GROUPS;
+   args.in_args = (uint8_t *)p2p_queue_grps_info;
+   args.in_args_size = size;
+   args.out_buffer = adapter->mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(adapter, &args);
+   if (err != 0) {
+   DRV_LOG(ERR,
+   "Failed to execute command of 
VIRTCHNL2_OP_ADD_QUEUE_GROUPS");
+   return err;
+   }
+
+   rte_memcpy(p2p_queue_grps_out, args.out_buffer, IDPF_DFLT_MBX_BUF_SIZE);
+   return 0;
+}
+
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+ uint16_t num_q_grps,
+ struct virtchnl2_queue_group_id *qg_ids)
+{
+   struct idpf_adapter *adapter = vport->adapter;
+   struct virtchnl2_delete_queue_groups *vc_del_q_grps;
+   struct idpf_cmd_info args;
+   int size;
+   int err;
+
+   size = sizeof(*vc_del_q_grps) +
+  (num_q_grps - 1) * sizeof(struct virtchnl2_queue_group_id);
+   vc_del_q_grps = rte_zmalloc("vc_del_q_grps", size, 0);
+
+   vc_del_q_grps->vport_id = vport->vport_id;
+   vc_del_q_grps->num_queue_groups = num_q_grps;
+   memcpy(vc_del_q_grps->qg_ids, qg_ids,
+  num_q_grps * sizeof(struct virtchnl2_queue_group_id));
+
+   memset(&args, 0, sizeof(args));
+   args.ops = VIRTCHNL2_OP_DEL_QUEUE_GROUPS;
+   args.in_args = (uint8_t *)vc_del_q_grps;
+   args.in_args_size = size;
+   args.out_buffer = adapter->mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(adapter, &args);
+   if (err != 0)
+   DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_DEL_QUEUE_GROUPS");
+
+   rte_free(vc_del_q_grps);
+   return err;
+}
+
 int
 idpf_vc_rss_key_set(struct idpf_vport *vport)
 {
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h 
b/drivers/common/idpf/idpf_common_virtchnl.h
index c45295290e..58b16e1c5d 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -64,4 +64,13 @@ int idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 
*num_q_msg,
 __rte_internal
 int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
   u16 *buff_count, struct idpf_dma_mem **buffs);
+__rte_internal
+int idpf_vc_queue_grps_del(struct idpf_vport *vport,
+  uint16_t num_q_grps,
+  struct virtchnl2_queue_group_id *qg_ids);
+__rte_internal
+int
+idpf_vc_queue_grps_add(struct idpf_vport *vport,
+  struct virtchnl2_add_queue_groups *ptp_queue_grps_info,
+  uint8_t *ptp_queue_grps_out);
 #endif /* _IDPF_COMMON_VIRTCHNL_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 70334a1b03..01d18f3f3f 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -43,6 +43,8 @@ INTERNAL {
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
+   idpf_vc_queue_grps_add;
+   idpf_vc_queue_grps_del;
idpf_vc_queue_switch;
idpf_vc_queues_ena_dis;
idpf_vc_rss_hash_get;
-- 
2.26.2



[PATCH v8 03/14] net/cpfl: add haipin queue group during vport init

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch adds haipin queue group during vport init.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 133 +
 drivers/net/cpfl/cpfl_ethdev.h |  18 +
 drivers/net/cpfl/cpfl_rxtx.h   |   7 ++
 3 files changed, 158 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index e587155db6..c1273a7478 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -840,6 +840,20 @@ cpfl_dev_stop(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+cpfl_p2p_queue_grps_del(struct idpf_vport *vport)
+{
+   struct virtchnl2_queue_group_id qg_ids[CPFL_P2P_NB_QUEUE_GRPS] = {0};
+   int ret = 0;
+
+   qg_ids[0].queue_group_id = CPFL_P2P_QUEUE_GRP_ID;
+   qg_ids[0].queue_group_type = VIRTCHNL2_QUEUE_GROUP_P2P;
+   ret = idpf_vc_queue_grps_del(vport, CPFL_P2P_NB_QUEUE_GRPS, qg_ids);
+   if (ret)
+   PMD_DRV_LOG(ERR, "Failed to delete p2p queue groups");
+   return ret;
+}
+
 static int
 cpfl_dev_close(struct rte_eth_dev *dev)
 {
@@ -848,7 +862,12 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
 
cpfl_dev_stop(dev);
+
+   if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
+   cpfl_p2p_queue_grps_del(vport);
+
idpf_vport_deinit(vport);
+   rte_free(cpfl_vport->p2p_q_chunks_info);
 
adapter->cur_vports &= ~RTE_BIT32(vport->devarg_id);
adapter->cur_vport_nb--;
@@ -1284,6 +1303,96 @@ cpfl_vport_idx_alloc(struct cpfl_adapter_ext *adapter)
return vport_idx;
 }
 
+static int
+cpfl_p2p_q_grps_add(struct idpf_vport *vport,
+   struct virtchnl2_add_queue_groups *p2p_queue_grps_info,
+   uint8_t *p2p_q_vc_out_info)
+{
+   int ret;
+
+   p2p_queue_grps_info->vport_id = vport->vport_id;
+   p2p_queue_grps_info->qg_info.num_queue_groups = CPFL_P2P_NB_QUEUE_GRPS;
+   p2p_queue_grps_info->qg_info.groups[0].num_rx_q = 
CPFL_MAX_P2P_NB_QUEUES;
+   p2p_queue_grps_info->qg_info.groups[0].num_rx_bufq = 
CPFL_P2P_NB_RX_BUFQ;
+   p2p_queue_grps_info->qg_info.groups[0].num_tx_q = 
CPFL_MAX_P2P_NB_QUEUES;
+   p2p_queue_grps_info->qg_info.groups[0].num_tx_complq = 
CPFL_P2P_NB_TX_COMPLQ;
+   p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_id = 
CPFL_P2P_QUEUE_GRP_ID;
+   p2p_queue_grps_info->qg_info.groups[0].qg_id.queue_group_type = 
VIRTCHNL2_QUEUE_GROUP_P2P;
+   p2p_queue_grps_info->qg_info.groups[0].rx_q_grp_info.rss_lut_size = 0;
+   p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.tx_tc = 0;
+   p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.priority = 0;
+   p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.is_sp = 0;
+   p2p_queue_grps_info->qg_info.groups[0].tx_q_grp_info.pir_weight = 0;
+
+   ret = idpf_vc_queue_grps_add(vport, p2p_queue_grps_info, 
p2p_q_vc_out_info);
+   if (ret != 0) {
+   PMD_DRV_LOG(ERR, "Failed to add p2p queue groups.");
+   return ret;
+   }
+
+   return ret;
+}
+
+static int
+cpfl_p2p_queue_info_init(struct cpfl_vport *cpfl_vport,
+struct virtchnl2_add_queue_groups *p2p_q_vc_out_info)
+{
+   struct p2p_queue_chunks_info *p2p_q_chunks_info = 
cpfl_vport->p2p_q_chunks_info;
+   struct virtchnl2_queue_reg_chunks *vc_chunks_out;
+   int i, type;
+
+   if (p2p_q_vc_out_info->qg_info.groups[0].qg_id.queue_group_type !=
+   VIRTCHNL2_QUEUE_GROUP_P2P) {
+   PMD_DRV_LOG(ERR, "Add queue group response mismatch.");
+   return -EINVAL;
+   }
+
+   vc_chunks_out = &p2p_q_vc_out_info->qg_info.groups[0].chunks;
+
+   for (i = 0; i < vc_chunks_out->num_chunks; i++) {
+   type = vc_chunks_out->chunks[i].type;
+   switch (type) {
+   case VIRTCHNL2_QUEUE_TYPE_TX:
+   p2p_q_chunks_info->tx_start_qid =
+   vc_chunks_out->chunks[i].start_queue_id;
+   p2p_q_chunks_info->tx_qtail_start =
+   vc_chunks_out->chunks[i].qtail_reg_start;
+   p2p_q_chunks_info->tx_qtail_spacing =
+   vc_chunks_out->chunks[i].qtail_reg_spacing;
+   break;
+   case VIRTCHNL2_QUEUE_TYPE_RX:
+   p2p_q_chunks_info->rx_start_qid =
+   vc_chunks_out->chunks[i].start_queue_id;
+   p2p_q_chunks_info->rx_qtail_start =
+   vc_chunks_out->chunks[i].qtail_reg_start;
+   p2p_q_chunks_info->rx_qtail_spacing =
+   vc_chunks_out->chunks[i].qtail_reg_spacing;
+   break;
+   case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:

[PATCH v8 04/14] net/cpfl: support hairpin queue capbility get

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch adds hairpin_cap_get ops support.

Signed-off-by: Xiao Wang 
Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 18 ++
 drivers/net/cpfl/cpfl_rxtx.h   |  3 +++
 2 files changed, 21 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index c1273a7478..40b4515539 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -154,6 +154,23 @@ cpfl_dev_link_update(struct rte_eth_dev *dev,
return rte_eth_linkstatus_set(dev, &new_link);
 }
 
+static int
+cpfl_hairpin_cap_get(struct rte_eth_dev *dev,
+struct rte_eth_hairpin_cap *cap)
+{
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+
+   if (cpfl_vport->p2p_q_chunks_info == NULL)
+   return -ENOTSUP;
+
+   cap->max_nb_queues = CPFL_MAX_P2P_NB_QUEUES;
+   cap->max_rx_2_tx = CPFL_MAX_HAIRPINQ_RX_2_TX;
+   cap->max_tx_2_rx = CPFL_MAX_HAIRPINQ_TX_2_RX;
+   cap->max_nb_desc = CPFL_MAX_HAIRPINQ_NB_DESC;
+
+   return 0;
+}
+
 static int
 cpfl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -904,6 +921,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get = cpfl_dev_xstats_get,
.xstats_get_names   = cpfl_dev_xstats_get_names,
.xstats_reset   = cpfl_dev_xstats_reset,
+   .hairpin_cap_get= cpfl_hairpin_cap_get,
 };
 
 static int
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 1fe65778f0..a4a164d462 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -14,6 +14,9 @@
 #define CPFL_MAX_RING_DESC 4096
 #define CPFL_DMA_MEM_ALIGN 4096
 
+#define CPFL_MAX_HAIRPINQ_RX_2_TX  1
+#define CPFL_MAX_HAIRPINQ_TX_2_RX  1
+#define CPFL_MAX_HAIRPINQ_NB_DESC  1024
 #define CPFL_MAX_P2P_NB_QUEUES 16
 #define CPFL_P2P_NB_RX_BUFQ1
 #define CPFL_P2P_NB_TX_COMPLQ  1
-- 
2.26.2



[PATCH v8 05/14] net/cpfl: support hairpin queue setup and release

2023-06-04 Thread beilei . xing
From: Beilei Xing 

Support hairpin Rx/Tx queue setup and release.

Signed-off-by: Xiao Wang 
Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c  |   6 +
 drivers/net/cpfl/cpfl_ethdev.h  |  11 +
 drivers/net/cpfl/cpfl_rxtx.c| 364 +++-
 drivers/net/cpfl/cpfl_rxtx.h|  36 +++
 drivers/net/cpfl/cpfl_rxtx_vec_common.h |   4 +
 5 files changed, 420 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 40b4515539..b17c538ec2 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport->adapter);
 
cpfl_dev_stop(dev);
+   if (cpfl_vport->p2p_mp) {
+   rte_mempool_free(cpfl_vport->p2p_mp);
+   cpfl_vport->p2p_mp = NULL;
+   }
 
if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
cpfl_p2p_queue_grps_del(vport);
@@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.xstats_get_names   = cpfl_dev_xstats_get_names,
.xstats_reset   = cpfl_dev_xstats_reset,
.hairpin_cap_get= cpfl_hairpin_cap_get,
+   .rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
+   .tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
 };
 
 static int
diff --git a/drivers/net/cpfl/cpfl_ethdev.h b/drivers/net/cpfl/cpfl_ethdev.h
index 666d46a44a..2e42354f70 100644
--- a/drivers/net/cpfl/cpfl_ethdev.h
+++ b/drivers/net/cpfl/cpfl_ethdev.h
@@ -89,6 +89,17 @@ struct p2p_queue_chunks_info {
 struct cpfl_vport {
struct idpf_vport base;
struct p2p_queue_chunks_info *p2p_q_chunks_info;
+
+   struct rte_mempool *p2p_mp;
+
+   uint16_t nb_data_rxq;
+   uint16_t nb_data_txq;
+   uint16_t nb_p2p_rxq;
+   uint16_t nb_p2p_txq;
+
+   struct idpf_rx_queue *p2p_rx_bufq;
+   struct idpf_tx_queue *p2p_tx_complq;
+   bool p2p_manual_bind;
 };
 
 struct cpfl_adapter_ext {
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 04a51b8d15..90b408d1f4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -10,6 +10,67 @@
 #include "cpfl_rxtx.h"
 #include "cpfl_rxtx_vec_common.h"
 
+static inline void
+cpfl_tx_hairpin_descq_reset(struct idpf_tx_queue *txq)
+{
+   uint32_t i, size;
+
+   if (!txq) {
+   PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
+   return;
+   }
+
+   size = txq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+   for (i = 0; i < size; i++)
+   ((volatile char *)txq->desc_ring)[i] = 0;
+}
+
+static inline void
+cpfl_tx_hairpin_complq_reset(struct idpf_tx_queue *cq)
+{
+   uint32_t i, size;
+
+   if (!cq) {
+   PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
+   return;
+   }
+
+   size = cq->nb_tx_desc * CPFL_P2P_DESC_LEN;
+   for (i = 0; i < size; i++)
+   ((volatile char *)cq->compl_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_descq_reset(struct idpf_rx_queue *rxq)
+{
+   uint16_t len;
+   uint32_t i;
+
+   if (!rxq)
+   return;
+
+   len = rxq->nb_rx_desc;
+   for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+   ((volatile char *)rxq->rx_ring)[i] = 0;
+}
+
+static inline void
+cpfl_rx_hairpin_bufq_reset(struct idpf_rx_queue *rxbq)
+{
+   uint16_t len;
+   uint32_t i;
+
+   if (!rxbq)
+   return;
+
+   len = rxbq->nb_rx_desc;
+   for (i = 0; i < len * CPFL_P2P_DESC_LEN; i++)
+   ((volatile char *)rxbq->rx_ring)[i] = 0;
+
+   rxbq->bufq1 = NULL;
+   rxbq->bufq2 = NULL;
+}
+
 static uint64_t
 cpfl_rx_offload_convert(uint64_t offload)
 {
@@ -234,7 +295,10 @@ cpfl_rx_queue_release(void *rxq)
 
/* Split queue */
if (!q->adapter->is_rx_singleq) {
-   if (q->bufq2)
+   /* the mz is shared between Tx/Rx hairpin, let Rx_release
+* free the buf, q->bufq1->mz and q->mz.
+*/
+   if (!cpfl_rxq->hairpin_info.hairpin_q && q->bufq2)
cpfl_rx_split_bufq_release(q->bufq2);
 
if (q->bufq1)
@@ -385,6 +449,7 @@ cpfl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
}
}
 
+   cpfl_vport->nb_data_rxq++;
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = cpfl_rxq;
 
@@ -548,6 +613,7 @@ cpfl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
queue_idx * vport->chunks_info.tx_qtail_spacing);
txq->ops = &def_txq_ops;
+   cpfl_vport->nb_data_txq++;
txq->q_set = true;
dev->data->tx_q

[PATCH v8 06/14] common/idpf: add queue config API

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch supports Rx/Tx queue configuration APIs.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_virtchnl.c | 70 ++
 drivers/common/idpf/idpf_common_virtchnl.h |  6 ++
 drivers/common/idpf/version.map|  2 +
 3 files changed, 78 insertions(+)

diff --git a/drivers/common/idpf/idpf_common_virtchnl.c 
b/drivers/common/idpf/idpf_common_virtchnl.c
index a3fe55c897..211b44a88e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -1050,6 +1050,41 @@ idpf_vc_rxq_config(struct idpf_vport *vport, struct 
idpf_rx_queue *rxq)
return err;
 }
 
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct 
virtchnl2_rxq_info *rxq_info,
+  uint16_t num_qs)
+{
+   struct idpf_adapter *adapter = vport->adapter;
+   struct virtchnl2_config_rx_queues *vc_rxqs = NULL;
+   struct idpf_cmd_info args;
+   int size, err, i;
+
+   size = sizeof(*vc_rxqs) + (num_qs - 1) *
+   sizeof(struct virtchnl2_rxq_info);
+   vc_rxqs = rte_zmalloc("cfg_rxqs", size, 0);
+   if (vc_rxqs == NULL) {
+   DRV_LOG(ERR, "Failed to allocate virtchnl2_config_rx_queues");
+   err = -ENOMEM;
+   return err;
+   }
+   vc_rxqs->vport_id = vport->vport_id;
+   vc_rxqs->num_qinfo = num_qs;
+   memcpy(vc_rxqs->qinfo, rxq_info, num_qs * sizeof(struct 
virtchnl2_rxq_info));
+
+   memset(&args, 0, sizeof(args));
+   args.ops = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+   args.in_args = (uint8_t *)vc_rxqs;
+   args.in_args_size = size;
+   args.out_buffer = adapter->mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(adapter, &args);
+   rte_free(vc_rxqs);
+   if (err != 0)
+   DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_RX_QUEUES");
+
+   return err;
+}
+
 int
 idpf_vc_txq_config(struct idpf_vport *vport, struct idpf_tx_queue *txq)
 {
@@ -1121,6 +1156,41 @@ idpf_vc_txq_config(struct idpf_vport *vport, struct 
idpf_tx_queue *txq)
return err;
 }
 
+int
+idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct virtchnl2_txq_info 
*txq_info,
+  uint16_t num_qs)
+{
+   struct idpf_adapter *adapter = vport->adapter;
+   struct virtchnl2_config_tx_queues *vc_txqs = NULL;
+   struct idpf_cmd_info args;
+   int size, err;
+
+   size = sizeof(*vc_txqs) + (num_qs - 1) * sizeof(struct 
virtchnl2_txq_info);
+   vc_txqs = rte_zmalloc("cfg_txqs", size, 0);
+   if (vc_txqs == NULL) {
+   DRV_LOG(ERR, "Failed to allocate virtchnl2_config_tx_queues");
+   err = -ENOMEM;
+   return err;
+   }
+   vc_txqs->vport_id = vport->vport_id;
+   vc_txqs->num_qinfo = num_qs;
+   memcpy(vc_txqs->qinfo, txq_info, num_qs * sizeof(struct 
virtchnl2_txq_info));
+
+   memset(&args, 0, sizeof(args));
+   args.ops = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+   args.in_args = (uint8_t *)vc_txqs;
+   args.in_args_size = size;
+   args.out_buffer = adapter->mbx_resp;
+   args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+
+   err = idpf_vc_cmd_execute(adapter, &args);
+   rte_free(vc_txqs);
+   if (err != 0)
+   DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_CONFIG_TX_QUEUES");
+
+   return err;
+}
+
 int
 idpf_vc_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
  struct idpf_ctlq_msg *q_msg)
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h 
b/drivers/common/idpf/idpf_common_virtchnl.h
index 58b16e1c5d..db83761a5e 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -65,6 +65,12 @@ __rte_internal
 int idpf_vc_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
   u16 *buff_count, struct idpf_dma_mem **buffs);
 __rte_internal
+int idpf_vc_rxq_config_by_info(struct idpf_vport *vport, struct 
virtchnl2_rxq_info *rxq_info,
+  uint16_t num_qs);
+__rte_internal
+int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct 
virtchnl2_txq_info *txq_info,
+  uint16_t num_qs);
+__rte_internal
 int idpf_vc_queue_grps_del(struct idpf_vport *vport,
   uint16_t num_q_grps,
   struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 01d18f3f3f..17e77884ce 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -54,8 +54,10 @@ INTERNAL {
idpf_vc_rss_lut_get;
idpf_vc_rss_lut_set;
idpf_vc_rxq_config;
+   idpf_vc_rxq_config_by_info;
idpf_vc_stats_query;
idpf_vc_txq_config;
+   idpf_vc_txq_config_by_info;
idpf_vc_vectors

[PATCH v8 07/14] net/cpfl: support hairpin queue configuration

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch supports Rx/Tx hairpin queue configuration.

Signed-off-by: Xiao Wang 
Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 136 +++--
 drivers/net/cpfl/cpfl_rxtx.c   |  80 +++
 drivers/net/cpfl/cpfl_rxtx.h   |   7 ++
 3 files changed, 217 insertions(+), 6 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
return idpf_vport_irq_map_config(vport, nb_rx_queues);
 }
 
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+   struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+   struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+   struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+   struct cpfl_txq_hairpin_info *hairpin_info;
+   struct cpfl_tx_queue *cpfl_txq;
+   int i;
+
+   for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+   cpfl_txq = dev->data->tx_queues[i];
+   hairpin_info = &cpfl_txq->hairpin_info;
+   if (hairpin_info->peer_rxp != rx_port) {
+   PMD_DRV_LOG(ERR, "port %d is not the peer port", 
rx_port);
+   return -EINVAL;
+   }
+   hairpin_info->peer_rxq_id =
+   
cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+   hairpin_info->peer_rxq_id - 
cpfl_rx_vport->nb_data_rxq);
+   }
+
+   return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone 
*/
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+   struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_rx_vport->base;
+   struct idpf_adapter *adapter = vport->adapter;
+   struct idpf_hw *hw = &adapter->hw;
+   struct cpfl_rx_queue *cpfl_rxq;
+   struct cpfl_tx_queue *cpfl_txq;
+   struct rte_eth_dev *peer_dev;
+   const struct rte_memzone *mz;
+   uint16_t peer_tx_port;
+   uint16_t peer_tx_qid;
+   int i;
+
+   for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+   cpfl_rxq = dev->data->rx_queues[i];
+   peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+   peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+   peer_dev = &rte_eth_devices[peer_tx_port];
+   cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+   /* bind rx queue */
+   mz = cpfl_txq->base.mz;
+   cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+   cpfl_rxq->base.rx_ring = mz->addr;
+   cpfl_rxq->base.mz = mz;
+
+   /* bind rx buffer queue */
+   mz = cpfl_txq->base.complq->mz;
+   cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+   cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+   cpfl_rxq->base.bufq1->mz = mz;
+   cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+   
cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+   0, 
cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+   }
+}
+
 static int
 cpfl_start_queues(struct rte_eth_dev *dev)
 {
+   struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+   struct idpf_vport *vport = &cpfl_vport->base;
struct cpfl_rx_queue *cpfl_rxq;
struct cpfl_tx_queue *cpfl_txq;
+   int update_flag = 0;
int err = 0;
int i;
 
+   /* For normal data queues, configure, init and enale Txq.
+* For non-manual bind hairpin queues, configure Txq.
+*/
for (i = 0; i < dev->data->nb_tx_queues; i++) {
cpfl_txq = dev->data->tx_queues[i];
if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
continue;
-   err = cpfl_tx_queue_start(dev, i);
+   if (!cpfl_txq->hairpin_info.hairpin_q) {
+   err = cpfl_tx_queue_start(dev, i);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", 
i);
+   return err;
+   }
+   } else if (!cpfl_vport->p2p_manual_bind) {
+   if (update_flag == 0) {
+   err = cpfl_txq_hairpin_info_update(dev,
+  
cpfl_txq->hairpin_info.peer_rxp);
+   if (err != 0) {
+

[PATCH v8 08/14] common/idpf: add switch queue API

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch adds idpf_vc_ena_dis_one_queue API.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_virtchnl.c | 2 +-
 drivers/common/idpf/idpf_common_virtchnl.h | 3 +++
 drivers/common/idpf/version.map| 1 +
 3 files changed, 5 insertions(+), 1 deletion(-)

diff --git a/drivers/common/idpf/idpf_common_virtchnl.c 
b/drivers/common/idpf/idpf_common_virtchnl.c
index 211b44a88e..6455f640da 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.c
+++ b/drivers/common/idpf/idpf_common_virtchnl.c
@@ -733,7 +733,7 @@ idpf_vc_vectors_dealloc(struct idpf_vport *vport)
return err;
 }
 
-static int
+int
 idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
  uint32_t type, bool on)
 {
diff --git a/drivers/common/idpf/idpf_common_virtchnl.h 
b/drivers/common/idpf/idpf_common_virtchnl.h
index db83761a5e..9ff5c38c26 100644
--- a/drivers/common/idpf/idpf_common_virtchnl.h
+++ b/drivers/common/idpf/idpf_common_virtchnl.h
@@ -71,6 +71,9 @@ __rte_internal
 int idpf_vc_txq_config_by_info(struct idpf_vport *vport, struct 
virtchnl2_txq_info *txq_info,
   uint16_t num_qs);
 __rte_internal
+int idpf_vc_ena_dis_one_queue(struct idpf_vport *vport, uint16_t qid,
+ uint32_t type, bool on);
+__rte_internal
 int idpf_vc_queue_grps_del(struct idpf_vport *vport,
   uint16_t num_q_grps,
   struct virtchnl2_queue_group_id *qg_ids);
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 17e77884ce..25624732b0 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -40,6 +40,7 @@ INTERNAL {
idpf_vc_cmd_execute;
idpf_vc_ctlq_post_rx_buffs;
idpf_vc_ctlq_recv;
+   idpf_vc_ena_dis_one_queue;
idpf_vc_irq_map_unmap_config;
idpf_vc_one_msg_read;
idpf_vc_ptype_info_query;
-- 
2.26.2



[PATCH v8 09/14] net/cpfl: support hairpin queue start/stop

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch supports Rx/Tx hairpin queue start/stop.

Signed-off-by: Xiao Wang 
Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c |  46 +
 drivers/net/cpfl/cpfl_rxtx.c   | 164 +
 drivers/net/cpfl/cpfl_rxtx.h   |  15 +++
 3 files changed, 207 insertions(+), 18 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index a06def06d0..2b99e58341 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -896,6 +896,52 @@ cpfl_start_queues(struct rte_eth_dev *dev)
}
}
 
+   /* For non-manual bind hairpin queues, enable Tx queue and Rx queue,
+* then enable Tx completion queue and Rx buffer queue.
+*/
+   for (i = cpfl_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+   cpfl_txq = dev->data->tx_queues[i];
+   if (cpfl_txq->hairpin_info.hairpin_q && 
!cpfl_vport->p2p_manual_bind) {
+   err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+i - 
cpfl_vport->nb_data_txq,
+false, true);
+   if (err)
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin TX 
queue %u on",
+   i);
+   else
+   cpfl_txq->base.q_started = true;
+   }
+   }
+
+   for (i = cpfl_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+   cpfl_rxq = dev->data->rx_queues[i];
+   if (cpfl_rxq->hairpin_info.hairpin_q && 
!cpfl_vport->p2p_manual_bind) {
+   err = cpfl_switch_hairpin_rxtx_queue(cpfl_vport,
+i - 
cpfl_vport->nb_data_rxq,
+true, true);
+   if (err)
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin RX 
queue %u on",
+   i);
+   else
+   cpfl_rxq->base.q_started = true;
+   }
+   }
+
+   if (!cpfl_vport->p2p_manual_bind &&
+   cpfl_vport->p2p_tx_complq != NULL &&
+   cpfl_vport->p2p_rx_bufq != NULL) {
+   err = cpfl_switch_hairpin_complq(cpfl_vport, true);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+   return err;
+   }
+   err = cpfl_switch_hairpin_bufq(cpfl_vport, true);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx bufq");
+   return err;
+   }
+   }
+
return err;
 }
 
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 9408c6e1a4..8d1f8a560b 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -1002,6 +1002,89 @@ cpfl_hairpin_txq_config(struct idpf_vport *vport, struct 
cpfl_tx_queue *cpfl_txq
return idpf_vc_txq_config_by_info(vport, txq_info, 1);
 }
 
+int
+cpfl_switch_hairpin_complq(struct cpfl_vport *cpfl_vport, bool on)
+{
+   struct idpf_vport *vport = &cpfl_vport->base;
+   uint32_t type;
+   int err, queue_id;
+
+   type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+   queue_id = cpfl_vport->p2p_tx_complq->queue_id;
+   err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+   return err;
+}
+
+int
+cpfl_switch_hairpin_bufq(struct cpfl_vport *cpfl_vport, bool on)
+{
+   struct idpf_vport *vport = &cpfl_vport->base;
+   uint32_t type;
+   int err, queue_id;
+
+   type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+   queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+   err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+
+   return err;
+}
+
+int
+cpfl_switch_hairpin_rxtx_queue(struct cpfl_vport *cpfl_vport, uint16_t 
logic_qid,
+  bool rx, bool on)
+{
+   struct idpf_vport *vport = &cpfl_vport->base;
+   uint32_t type;
+   int err, queue_id;
+
+   type = rx ? VIRTCHNL2_QUEUE_TYPE_RX : VIRTCHNL2_QUEUE_TYPE_TX;
+
+   if (type == VIRTCHNL2_QUEUE_TYPE_RX)
+   queue_id = 
cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid, logic_qid);
+   else
+   queue_id = 
cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->tx_start_qid, logic_qid);
+   err = idpf_vc_ena_dis_one_queue(vport, queue_id, type, on);
+   if (err)
+   return err;
+
+   return err;
+}
+
+static int
+cpfl_alloc_split_p2p_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+   volatile struct virtchnl2_p2p_rx_buf_desc *rxd;
+   struct rte_mbuf *mbuf = NULL;
+   uint64_t dma_addr;
+   uin

[PATCH v8 10/14] common/idpf: add irq map config API

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch supports idpf_vport_irq_map_config_by_qids API.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/common/idpf/idpf_common_device.c | 75 
 drivers/common/idpf/idpf_common_device.h |  4 ++
 drivers/common/idpf/version.map  |  1 +
 3 files changed, 80 insertions(+)

diff --git a/drivers/common/idpf/idpf_common_device.c 
b/drivers/common/idpf/idpf_common_device.c
index dc47551b17..cc4207a46e 100644
--- a/drivers/common/idpf/idpf_common_device.c
+++ b/drivers/common/idpf/idpf_common_device.c
@@ -667,6 +667,81 @@ idpf_vport_irq_map_config(struct idpf_vport *vport, 
uint16_t nb_rx_queues)
return ret;
 }
 
+int
+idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport, uint32_t *qids, 
uint16_t nb_rx_queues)
+{
+   struct idpf_adapter *adapter = vport->adapter;
+   struct virtchnl2_queue_vector *qv_map;
+   struct idpf_hw *hw = &adapter->hw;
+   uint32_t dynctl_val, itrn_val;
+   uint32_t dynctl_reg_start;
+   uint32_t itrn_reg_start;
+   uint16_t i;
+   int ret;
+
+   qv_map = rte_zmalloc("qv_map",
+nb_rx_queues *
+sizeof(struct virtchnl2_queue_vector), 0);
+   if (qv_map == NULL) {
+   DRV_LOG(ERR, "Failed to allocate %d queue-vector map",
+   nb_rx_queues);
+   ret = -ENOMEM;
+   goto qv_map_alloc_err;
+   }
+
+   /* Rx interrupt disabled, Map interrupt only for writeback */
+
+   /* The capability flags adapter->caps.other_caps should be
+* compared with bit VIRTCHNL2_CAP_WB_ON_ITR here. The if
+* condition should be updated when the FW can return the
+* correct flag bits.
+*/
+   dynctl_reg_start =
+   vport->recv_vectors->vchunks.vchunks->dynctl_reg_start;
+   itrn_reg_start =
+   vport->recv_vectors->vchunks.vchunks->itrn_reg_start;
+   dynctl_val = IDPF_READ_REG(hw, dynctl_reg_start);
+   DRV_LOG(DEBUG, "Value of dynctl_reg_start is 0x%x", dynctl_val);
+   itrn_val = IDPF_READ_REG(hw, itrn_reg_start);
+   DRV_LOG(DEBUG, "Value of itrn_reg_start is 0x%x", itrn_val);
+   /* Force write-backs by setting WB_ON_ITR bit in DYN_CTL
+* register. WB_ON_ITR and INTENA are mutually exclusive
+* bits. Setting WB_ON_ITR bits means TX and RX Descs
+* are written back based on ITR expiration irrespective
+* of INTENA setting.
+*/
+   /* TBD: need to tune INTERVAL value for better performance. */
+   itrn_val = (itrn_val == 0) ? IDPF_DFLT_INTERVAL : itrn_val;
+   dynctl_val = VIRTCHNL2_ITR_IDX_0  <<
+PF_GLINT_DYN_CTL_ITR_INDX_S |
+PF_GLINT_DYN_CTL_WB_ON_ITR_M |
+itrn_val << PF_GLINT_DYN_CTL_INTERVAL_S;
+   IDPF_WRITE_REG(hw, dynctl_reg_start, dynctl_val);
+
+   for (i = 0; i < nb_rx_queues; i++) {
+   /* map all queues to the same vector */
+   qv_map[i].queue_id = qids[i];
+   qv_map[i].vector_id =
+   vport->recv_vectors->vchunks.vchunks->start_vector_id;
+   }
+   vport->qv_map = qv_map;
+
+   ret = idpf_vc_irq_map_unmap_config(vport, nb_rx_queues, true);
+   if (ret != 0) {
+   DRV_LOG(ERR, "config interrupt mapping failed");
+   goto config_irq_map_err;
+   }
+
+   return 0;
+
+config_irq_map_err:
+   rte_free(vport->qv_map);
+   vport->qv_map = NULL;
+
+qv_map_alloc_err:
+   return ret;
+}
+
 int
 idpf_vport_irq_unmap_config(struct idpf_vport *vport, uint16_t nb_rx_queues)
 {
diff --git a/drivers/common/idpf/idpf_common_device.h 
b/drivers/common/idpf/idpf_common_device.h
index 112367dae8..f767ea7cec 100644
--- a/drivers/common/idpf/idpf_common_device.h
+++ b/drivers/common/idpf/idpf_common_device.h
@@ -200,5 +200,9 @@ int idpf_vport_info_init(struct idpf_vport *vport,
 struct virtchnl2_create_vport *vport_info);
 __rte_internal
 void idpf_vport_stats_update(struct virtchnl2_vport_stats *oes, struct 
virtchnl2_vport_stats *nes);
+__rte_internal
+int idpf_vport_irq_map_config_by_qids(struct idpf_vport *vport,
+ uint32_t *qids,
+ uint16_t nb_rx_queues);
 
 #endif /* _IDPF_COMMON_DEVICE_H_ */
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 25624732b0..0729f6b912 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -69,6 +69,7 @@ INTERNAL {
idpf_vport_info_init;
idpf_vport_init;
idpf_vport_irq_map_config;
+   idpf_vport_irq_map_config_by_qids;
idpf_vport_irq_unmap_config;
idpf_vport_rss_config;
idpf_vport_stats_update;
-- 
2.26.2



[PATCH v8 11/14] net/cpfl: enable write back based on ITR expire

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch enables write back based on ITR expire
(WR_ON_ITR) for hairpin queues.

Signed-off-by: Mingxia Liu 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 13 -
 1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 2b99e58341..850f1c0bc6 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -735,11 +735,22 @@ cpfl_dev_configure(struct rte_eth_dev *dev)
 static int
 cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
 {
+   uint32_t qids[CPFL_MAX_P2P_NB_QUEUES + IDPF_DEFAULT_RXQ_NUM] = {0};
struct cpfl_vport *cpfl_vport = dev->data->dev_private;
struct idpf_vport *vport = &cpfl_vport->base;
uint16_t nb_rx_queues = dev->data->nb_rx_queues;
+   struct cpfl_rx_queue *cpfl_rxq;
+   int i;
 
-   return idpf_vport_irq_map_config(vport, nb_rx_queues);
+   for (i = 0; i < nb_rx_queues; i++) {
+   cpfl_rxq = dev->data->rx_queues[i];
+   if (cpfl_rxq->hairpin_info.hairpin_q)
+   qids[i] = 
cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info->rx_start_qid,
+ (i - 
cpfl_vport->nb_data_rxq));
+   else
+   qids[i] = 
cpfl_hw_qid_get(vport->chunks_info.rx_start_qid, i);
+   }
+   return idpf_vport_irq_map_config_by_qids(vport, qids, nb_rx_queues);
 }
 
 /* Update hairpin_info for dev's tx hairpin queue */
-- 
2.26.2



[PATCH v8 12/14] net/cpfl: support peer ports get

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch supports get hairpin peer ports.

Signed-off-by: Xiao Wang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 41 ++
 1 file changed, 41 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 850f1c0bc6..1a1ca4bc77 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1080,6 +1080,46 @@ cpfl_dev_close(struct rte_eth_dev *dev)
return 0;
 }
 
+static int
+cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
+   size_t len, uint32_t tx)
+{
+   struct cpfl_vport *cpfl_vport =
+   (struct cpfl_vport *)dev->data->dev_private;
+   struct idpf_tx_queue *txq;
+   struct idpf_rx_queue *rxq;
+   struct cpfl_tx_queue *cpfl_txq;
+   struct cpfl_rx_queue *cpfl_rxq;
+   int i;
+   int j = 0;
+
+   if (len <= 0)
+   return -EINVAL;
+
+   if (cpfl_vport->p2p_q_chunks_info == NULL)
+   return -ENOTSUP;
+
+   if (tx > 0) {
+   for (i = cpfl_vport->nb_data_txq, j = 0; i < 
dev->data->nb_tx_queues; i++, j++) {
+   txq = dev->data->tx_queues[i];
+   if (txq == NULL)
+   return -EINVAL;
+   cpfl_txq = (struct cpfl_tx_queue *)txq;
+   peer_ports[j] = cpfl_txq->hairpin_info.peer_rxp;
+   }
+   } else if (tx == 0) {
+   for (i = cpfl_vport->nb_data_rxq, j = 0; i < 
dev->data->nb_rx_queues; i++, j++) {
+   rxq = dev->data->rx_queues[i];
+   if (rxq == NULL)
+   return -EINVAL;
+   cpfl_rxq = (struct cpfl_rx_queue *)rxq;
+   peer_ports[j] = cpfl_rxq->hairpin_info.peer_txp;
+   }
+   }
+
+   return j;
+}
+
 static const struct eth_dev_ops cpfl_eth_dev_ops = {
.dev_configure  = cpfl_dev_configure,
.dev_close  = cpfl_dev_close,
@@ -1109,6 +1149,7 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
.hairpin_cap_get= cpfl_hairpin_cap_get,
.rx_hairpin_queue_setup = cpfl_rx_hairpin_queue_setup,
.tx_hairpin_queue_setup = cpfl_tx_hairpin_queue_setup,
+   .hairpin_get_peer_ports = cpfl_hairpin_get_peer_ports,
 };
 
 static int
-- 
2.26.2



[PATCH v8 13/14] net/cpfl: support hairpin bind/unbind

2023-06-04 Thread beilei . xing
From: Beilei Xing 

This patch supports hairpin_bind/unbind ops.

Signed-off-by: Xiao Wang 
Signed-off-by: Beilei Xing 
---
 drivers/net/cpfl/cpfl_ethdev.c | 137 +
 1 file changed, 137 insertions(+)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index 1a1ca4bc77..0d127eae3e 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -1120,6 +1120,141 @@ cpfl_hairpin_get_peer_ports(struct rte_eth_dev *dev, 
uint16_t *peer_ports,
return j;
 }
 
+static int
+cpfl_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+   struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+   struct idpf_vport *tx_vport = &cpfl_tx_vport->base;
+   struct cpfl_vport *cpfl_rx_vport;
+   struct cpfl_tx_queue *cpfl_txq;
+   struct cpfl_rx_queue *cpfl_rxq;
+   struct rte_eth_dev *peer_dev;
+   struct idpf_vport *rx_vport;
+   int err = 0;
+   int i;
+
+   err = cpfl_txq_hairpin_info_update(dev, rx_port);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to update Tx hairpin queue info.");
+   return err;
+   }
+
+   /* configure hairpin queues */
+   for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+   cpfl_txq = dev->data->tx_queues[i];
+   err = cpfl_hairpin_txq_config(tx_vport, cpfl_txq);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx queue 
%u", i);
+   return err;
+   }
+   }
+
+   err = cpfl_hairpin_tx_complq_config(cpfl_tx_vport);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
+   return err;
+   }
+
+   peer_dev = &rte_eth_devices[rx_port];
+   cpfl_rx_vport = (struct cpfl_vport *)peer_dev->data->dev_private;
+   rx_vport = &cpfl_rx_vport->base;
+   cpfl_rxq_hairpin_mz_bind(peer_dev);
+
+   err = cpfl_hairpin_rx_bufq_config(cpfl_rx_vport);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+   return err;
+   }
+
+   for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; 
i++) {
+   cpfl_rxq = peer_dev->data->rx_queues[i];
+   err = cpfl_hairpin_rxq_config(rx_vport, cpfl_rxq);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx queue 
%u", i);
+   return err;
+   }
+   err = cpfl_rx_queue_init(peer_dev, i);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue %u", i);
+   return err;
+   }
+   }
+
+   /* enable hairpin queues */
+   for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+   cpfl_txq = dev->data->tx_queues[i];
+   err = cpfl_switch_hairpin_rxtx_queue(cpfl_tx_vport,
+i - 
cpfl_tx_vport->nb_data_txq,
+false, true);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin TX queue %u 
on",
+   i);
+   return err;
+   }
+   cpfl_txq->base.q_started = true;
+   }
+
+   err = cpfl_switch_hairpin_complq(cpfl_tx_vport, true);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin Tx complq");
+   return err;
+   }
+
+   for (i = cpfl_rx_vport->nb_data_rxq; i < peer_dev->data->nb_rx_queues; 
i++) {
+   cpfl_rxq = peer_dev->data->rx_queues[i];
+   err = cpfl_switch_hairpin_rxtx_queue(cpfl_rx_vport,
+i - 
cpfl_rx_vport->nb_data_rxq,
+true, true);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin RX queue %u 
on",
+   i);
+   }
+   cpfl_rxq->base.q_started = true;
+   }
+
+   err = cpfl_switch_hairpin_bufq(cpfl_rx_vport, true);
+   if (err != 0) {
+   PMD_DRV_LOG(ERR, "Failed to switch hairpin Rx buffer queue");
+   return err;
+   }
+
+   return 0;
+}
+
+static int
+cpfl_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+   struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+   struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+   struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+   struct cpfl_tx_queue *cpfl_txq;
+   struct cpfl_rx_queue *cpfl_rxq;
+   int i;
+
+   /* disable hairpin queues */
+   for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {

[PATCH v8 14/14] doc: update the doc of CPFL PMD

2023-06-04 Thread beilei . xing
From: Beilei Xing 

Update cpfl.rst to clarify hairpin support.

Signed-off-by: Beilei Xing 
---
 doc/guides/nics/cpfl.rst | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/doc/guides/nics/cpfl.rst b/doc/guides/nics/cpfl.rst
index d25db088eb..8d5c3082e4 100644
--- a/doc/guides/nics/cpfl.rst
+++ b/doc/guides/nics/cpfl.rst
@@ -106,3 +106,10 @@ The paths are chosen based on 2 conditions:
   A value "P" means the offload feature is not supported by vector path.
   If any not supported features are used, cpfl vector PMD is disabled
   and the scalar paths are chosen.
+
+Hairpin queue
+~
+
+ E2100 Series can loopback packets from RX port to TX port, this feature is
+ called port-to-port or hairpin.
+ Currently, the PMD only supports single port hairpin.
-- 
2.26.2



Re: [PATCH] doc/guides: fix typo for cnxk platform

2023-06-04 Thread Jerin Jacob
On Fri, Jun 2, 2023 at 9:52 PM Thierry Herbelot
 wrote:
>
> The Linux kernel option has an added underscore.
>
> Fixes: 14ad4f01845331a ('doc: add Marvell OCTEON TX2 platform guide')
> Signed-off-by: Thierry Herbelot 


fixed following issues while merging.

Applied to dpdk-next-net-mrvl/for-next-net. Thanks

Wrong tag:
Fixes: 14ad4f01845331a ('doc: add Marvell OCTEON TX2 platform guide')
Wrong 'Fixes' reference:
Fixes: 14ad4f01845331a ('doc: add Marvell OCTEON TX2 platform guide')
Is it candidate for Cc: sta...@dpdk.org backport?
doc/guides: fix typo for cnxk platform

Invalid patch(es) found - checked 1 patch
check-git-log failed


> ---
>  doc/guides/platform/cnxk.rst | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/doc/guides/platform/cnxk.rst b/doc/guides/platform/cnxk.rst
> index 4a1966c66bae..b3aa4de09d0a 100644
> --- a/doc/guides/platform/cnxk.rst
> +++ b/doc/guides/platform/cnxk.rst
> @@ -258,7 +258,7 @@ context or stats using debugfs.
>
>  Enable ``debugfs`` by:
>
> -1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUGFS=y``.
> +1. Compile kernel with debugfs enabled, i.e ``CONFIG_DEBUG_FS=y``.
>  2. Boot OCTEON CN9K/CN10K with debugfs supported kernel.
>  3. Verify ``debugfs`` mounted by default "mount | grep -i debugfs" or mount 
> it manually by using.
>
> --
> 2.39.2
>