Underlying libsso_snow3g library now supports bit-level
operations, so PMD has been updated to allow them.

Signed-off-by: Pablo de Lara <pablo.de.lara.guarch at intel.com>
---
 doc/guides/cryptodevs/snow3g.rst       |   5 +-
 drivers/crypto/snow3g/rte_snow3g_pmd.c | 193 ++++++++++++++++++++++++---------
 2 files changed, 142 insertions(+), 56 deletions(-)

diff --git a/doc/guides/cryptodevs/snow3g.rst b/doc/guides/cryptodevs/snow3g.rst
index a084cad..65628c6 100644
--- a/doc/guides/cryptodevs/snow3g.rst
+++ b/doc/guides/cryptodevs/snow3g.rst
@@ -51,8 +51,9 @@ Limitations
 -----------

 * Chained mbufs are not supported.
-* Snow3g(UEA2) supported only if cipher length, cipher offset fields are 
byte-aligned.
-* Snow3g(UIA2) supported only if hash length, hash offset fields are 
byte-aligned.
+* Snow3g(UIA2) supported only if hash offset field is byte-aligned.
+* In-place bit-level operations for Snow3g(UEA2) are not supported
+  (if length and/or offset of data to be ciphered is not byte-aligned).

 Installation
 ------------
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c 
b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 8bbee8a..b90d27d 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -206,14 +206,6 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
                        break;
                }

-               if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
-                               || ((ops[i]->sym->cipher.data.offset
-                                       % BYTE_LEN) != 0)) {
-                       ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-                       SNOW3G_LOG_ERR("Data Length or offset");
-                       break;
-               }
-
                src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
                                (ops[i]->sym->cipher.data.offset >> 3);
                dst[i] = ops[i]->sym->m_dst ?
@@ -233,6 +225,39 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops,
        return processed_ops;
 }

+/** Encrypt/decrypt mbuf (bit level function). */
+static uint8_t
+process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
+               struct snow3g_session *session)
+{
+       uint8_t *src, *dst;
+       uint8_t *IV;
+       uint32_t length_in_bits, offset_in_bits;
+
+       /* Sanity checks. */
+       if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               SNOW3G_LOG_ERR("iv");
+               return 0;
+       }
+
+       offset_in_bits = op->sym->cipher.data.offset;
+       src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *);
+       if (op->sym->m_dst == NULL) {
+               op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+               SNOW3G_LOG_ERR("bit-level in-place not supported\n");
+               return 0;
+       }
+       dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *);
+       IV = op->sym->cipher.iv.data;
+       length_in_bits = op->sym->cipher.data.length;
+
+       sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV,
+                       src, dst, length_in_bits, offset_in_bits);
+
+       return 1;
+}
+
 /** Generate/verify hash from mbufs with same hash key. */
 static int
 process_snow3g_hash_op(struct rte_crypto_op **ops,
@@ -257,11 +282,10 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
                        break;
                }

-               if (((ops[i]->sym->auth.data.length % BYTE_LEN) != 0)
-                               || ((ops[i]->sym->auth.data.offset
-                                       % BYTE_LEN) != 0)) {
+               /* Data must be byte aligned */
+               if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
                        ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
-                       SNOW3G_LOG_ERR("Data Length or offset");
+                       SNOW3G_LOG_ERR("Offset");
                        break;
                }

@@ -301,10 +325,11 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
 /** Process a batch of crypto ops which shares the same session. */
 static int
 process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
-               struct snow3g_qp *qp, uint8_t num_ops)
+               struct snow3g_qp *qp, uint8_t num_ops,
+               uint16_t *accumulated_enqueued_ops)
 {
        unsigned i;
-       unsigned processed_ops;
+       unsigned enqueued_ops, processed_ops;

        switch (session->op) {
        case SNOW3G_OP_ONLY_CIPHER:
@@ -344,7 +369,63 @@ process_ops(struct rte_crypto_op **ops, struct 
snow3g_session *session,
                }
        }

-       return processed_ops;
+       enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+                       (void **)ops, processed_ops);
+       qp->qp_stats.enqueued_count += enqueued_ops;
+       *accumulated_enqueued_ops += enqueued_ops;
+
+       return enqueued_ops;
+}
+
+/** Process a crypto op with length/offset in bits. */
+static int
+process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
+               struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops)
+{
+       unsigned enqueued_op, processed_op;
+
+       switch (session->op) {
+       case SNOW3G_OP_ONLY_CIPHER:
+               processed_op = process_snow3g_cipher_op_bit(op,
+                               session);
+               break;
+       case SNOW3G_OP_ONLY_AUTH:
+               processed_op = process_snow3g_hash_op(&op, session, 1);
+               break;
+       case SNOW3G_OP_CIPHER_AUTH:
+               processed_op = process_snow3g_cipher_op_bit(op, session);
+               if (processed_op == 1)
+                       process_snow3g_hash_op(&op, session, 1);
+               break;
+       case SNOW3G_OP_AUTH_CIPHER:
+               processed_op = process_snow3g_hash_op(&op, session, 1);
+               if (processed_op == 1)
+                       process_snow3g_cipher_op_bit(op, session);
+               break;
+       default:
+               /* Operation not supported. */
+               processed_op = 0;
+       }
+
+       /*
+        * If there was no error/authentication failure,
+        * change status to successful.
+        */
+       if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+               op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+       /* Free session if a session-less crypto op. */
+       if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+               rte_mempool_put(qp->sess_mp, op->sym->session);
+               op->sym->session = NULL;
+       }
+
+       enqueued_op = rte_ring_enqueue_burst(qp->processed_ops,
+                       (void **)&op, processed_op);
+       qp->qp_stats.enqueued_count += enqueued_op;
+       *accumulated_enqueued_ops += enqueued_op;
+
+       return enqueued_op;
 }

 static uint16_t
@@ -356,7 +437,7 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct 
rte_crypto_op **ops,

        struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
        struct snow3g_qp *qp = queue_pair;
-       unsigned i, n;
+       unsigned i;
        uint8_t burst_size = 0;
        uint16_t enqueued_ops = 0;
        uint8_t processed_ops;
@@ -372,8 +453,32 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct 
rte_crypto_op **ops,
                                curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
                        curr_c_op->status =
                                        RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
-                       qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
-                       return enqueued_ops;
+                       break;
+               }
+
+               /* If length/offset is at bit-level, process this buffer alone. 
*/
+               if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0)
+                               || ((curr_c_op->sym->cipher.data.offset
+                                       % BYTE_LEN) != 0)) {
+                       /* Process the ops of the previous session. */
+                       if (prev_sess != NULL) {
+                               processed_ops = process_ops(c_ops, prev_sess,
+                               qp, burst_size, &enqueued_ops);
+                               if (processed_ops < burst_size) {
+                                       burst_size = 0;
+                                       break;
+                               }
+
+                               burst_size = 0;
+                               prev_sess = NULL;
+                       }
+
+                       processed_ops = process_op_bit(curr_c_op, curr_sess,
+                                                       qp, &enqueued_ops);
+                       if (processed_ops != 1)
+                               break;
+
+                       continue;
                }

                /* Batch ops that share the same session. */
@@ -387,20 +492,14 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct 
rte_crypto_op **ops,
                         * process them, and start a new batch.
                         */
                        if (burst_size == SNOW3G_MAX_BURST) {
-                               processed_ops = process_ops(c_ops,
-                                               prev_sess, qp, burst_size);
-                               n = rte_ring_enqueue_burst(qp->processed_ops,
-                                               (void **)c_ops,
-                                               processed_ops);
-                               qp->qp_stats.enqueued_count += n;
-                               enqueued_ops += n;
-                               if (n < burst_size) {
-                                       qp->qp_stats.enqueue_err_count +=
-                                                       nb_ops - enqueued_ops;
-                                       return enqueued_ops;
+                               processed_ops = process_ops(c_ops, prev_sess,
+                                               qp, burst_size, &enqueued_ops);
+                               if (processed_ops < burst_size) {
+                                       burst_size = 0;
+                                       break;
                                }
-                               burst_size = 0;

+                               burst_size = 0;
                                prev_sess = NULL;
                        }
                } else {
@@ -408,41 +507,27 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct 
rte_crypto_op **ops,
                         * Different session, process the ops
                         * of the previous session.
                         */
-                       processed_ops = process_ops(c_ops,
-                                       prev_sess, qp, burst_size);
-                       n = rte_ring_enqueue_burst(qp->processed_ops,
-                                       (void **)c_ops,
-                                       processed_ops);
-                       qp->qp_stats.enqueued_count += n;
-                       enqueued_ops += n;
-                       if (n < burst_size) {
-                               qp->qp_stats.enqueue_err_count +=
-                                               nb_ops - enqueued_ops;
-                               return enqueued_ops;
+                       processed_ops = process_ops(c_ops, prev_sess,
+                                       qp, burst_size, &enqueued_ops);
+                       if (processed_ops < burst_size) {
+                               burst_size = 0;
+                               break;
                        }
-                       burst_size = 0;

+                       burst_size = 0;
                        prev_sess = curr_sess;
+
                        c_ops[burst_size++] = curr_c_op;
                }
        }

        if (burst_size != 0) {
                /* Process the crypto ops of the last session. */
-               processed_ops = process_ops(c_ops,
-                               prev_sess, qp, burst_size);
-               n = rte_ring_enqueue_burst(qp->processed_ops,
-                               (void **)c_ops,
-                               processed_ops);
-               qp->qp_stats.enqueued_count += n;
-               enqueued_ops += n;
-               if (n < burst_size) {
-                       qp->qp_stats.enqueue_err_count +=
-                                       nb_ops - enqueued_ops;
-                       return enqueued_ops;
-               }
+               processed_ops = process_ops(c_ops, prev_sess,
+                               qp, burst_size, &enqueued_ops);
        }

+       qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
        return enqueued_ops;
 }

-- 
2.5.0

Reply via email to