Hi Akhil,
On 12/13/2017 7:26 PM, Akhil Goyal wrote:
Rx and Tx patch are rewritten with improved internal APIs
to improve performance.
Signed-off-by: Akhil Goyal <akhil.go...@nxp.com>
Signed-off-by: Nipun Gupta <nipun.gu...@nxp.com>
---
drivers/crypto/dpaa_sec/dpaa_sec.c | 260 ++++++++++++++++++++++---------------
drivers/crypto/dpaa_sec/dpaa_sec.h | 2 +-
2 files changed, 153 insertions(+), 109 deletions(-)
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c
b/drivers/crypto/dpaa_sec/dpaa_sec.c
index ea744e6..b650d5c 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -563,46 +563,67 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
return 0;
}
..<snip>
-
+#define DPAA_MAX_DEQUEUE_NUM_FRAMES 32
It will be better, if you define it in dpaa_sec.h
/* qp is lockless, should be accessed by only one thread */
static int
dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
{
struct qman_fq *fq;
+ unsigned int pkts = 0;
+ int ret;
+ struct qm_dqrr_entry *dq;
fq = &qp->outq;
- dpaa_sec_op_nb = 0;
- dpaa_sec_ops = ops;
+ ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
+ DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
Any particular reason for keeping the limit as 32 for SEC.
The dpaa eth PMD is using it as 63 i.e 6 bits
Also, you have a option to use '0'. NUM_FRAMES is zero—indicates that
the volatile command is not terminate until the specified FQ becomes
empty.
.. <snip>
-
static uint16_t
dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
/* Function to transmit the frames to given device and queuepair */
uint32_t loop;
- int32_t ret;
struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
you can avoid this explicit typecasting
uint16_t num_tx = 0;
+ struct qm_fd fds[DPAA_SEC_BURST], *fd;
+ uint32_t frames_to_send;
+ struct rte_crypto_op *op;
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ struct dpaa_sec_op_ctx *ctx;
+ uint32_t auth_only_len;
+ struct qman_fq *inq[DPAA_SEC_BURST];
+
+ while (nb_ops) {
+ frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
+ DPAA_SEC_BURST : nb_ops;
+ for (loop = 0; loop < frames_to_send; loop++) {
+ op = *(ops++);
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (dpaa_sec_session *)
here and other places as well
+ get_session_private_data(
+ op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (dpaa_sec_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ PMD_TX_LOG(ERR,
+ "sessionless crypto op not supported");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
+ ses->qp, qp);
+ if (dpaa_sec_attach_sess_q(qp, ses)) {
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ }
- if (unlikely(nb_ops == 0))
- return 0;
+ /*
+ * Segmented buffer is not supported.
+ */
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else {
+ PMD_TX_LOG(ERR, "not supported sec op");
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
+ }