This commit adds scatter-gather list capability to Intel QuickAssist
Technology driver.

Signed-off-by: Arek Kusztal <arkadiuszx.kusz...@intel.com>
---
 doc/guides/rel_notes/release_17_02.rst |   2 +
 drivers/crypto/qat/qat_adf/qat_algs.h  |  14 +++-
 drivers/crypto/qat/qat_crypto.c        | 130 +++++++++++++++++++++++++++++++--
 drivers/crypto/qat/qat_crypto.h        |   3 +
 drivers/crypto/qat/qat_qp.c            |  55 +++++++++++++-
 5 files changed, 195 insertions(+), 9 deletions(-)

diff --git a/doc/guides/rel_notes/release_17_02.rst 
b/doc/guides/rel_notes/release_17_02.rst
index f3e7bb6..7eaf98e 100644
--- a/doc/guides/rel_notes/release_17_02.rst
+++ b/doc/guides/rel_notes/release_17_02.rst
@@ -44,6 +44,8 @@ New Features
 
   * DES algorithm.
 
+  * Scatter-gather list (SGL) support.
+
 
 Resolved Issues
 ---------------
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h 
b/drivers/crypto/qat/qat_adf/qat_algs.h
index 5409e1e..e01b9d7 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -47,6 +47,7 @@
 #ifndef _ICP_QAT_ALGS_H_
 #define _ICP_QAT_ALGS_H_
 #include <rte_memory.h>
+#include <rte_crypto.h>
 #include "icp_qat_hw.h"
 #include "icp_qat_fw.h"
 #include "icp_qat_fw_la.h"
@@ -79,13 +80,24 @@ struct qat_alg_buf {
        uint64_t addr;
 } __rte_packed;
 
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SGL_MAX_NUMBER     16
+
 struct qat_alg_buf_list {
        uint64_t resrvd;
        uint32_t num_bufs;
        uint32_t num_mapped_bufs;
-       struct qat_alg_buf bufers[];
+       struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
 } __rte_packed __rte_cache_aligned;
 
+struct qat_crypto_op_cookie {
+       struct qat_alg_buf_list qat_sgl_list;
+       struct qat_alg_buf_list qat_sgl_list_oop;
+       phys_addr_t cookie_phys_addr;
+};
+
 /* Common content descriptor */
 struct qat_alg_cd {
        struct icp_qat_hw_cipher_algo_blk cipher;
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index fa78c60..27e40fd 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -67,6 +67,10 @@
 
 #define BYTE_LENGTH    8
 
+#define SGL_SECOND_COOKIE_ADDR(arg, cast)      ((cast)(arg) \
+                               + offsetof(struct qat_crypto_op_cookie, \
+                                       qat_sgl_list_oop))
+
 static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
        {       /* SHA1 HMAC */
                .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
@@ -523,7 +527,8 @@ static inline uint32_t
 adf_modulo(uint32_t data, uint32_t shift);
 
 static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+               struct qat_crypto_op_cookie *qat_op_cookie);
 
 void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
                void *session)
@@ -900,9 +905,17 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op 
**ops,
        }
 
        while (nb_ops_sent != nb_ops_possible) {
-               ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
+
+               ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
+                               tmp_qp->sgl_cookies[tail / queue->msg_size]);
                if (ret != 0) {
                        tmp_qp->stats.enqueue_err_count++;
+                       /*
+                        * This message cannot be enqueued,
+                        * decrease number of ops that wasnt sent
+                        */
+                       rte_atomic16_sub(&tmp_qp->inflights16,
+                                       nb_ops_possible - nb_ops_sent);
                        if (nb_ops_sent == 0)
                                return 0;
                        goto kick_tail;
@@ -911,6 +924,7 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op 
**ops,
                tail = adf_modulo(tail + queue->msg_size, queue->modulo);
                nb_ops_sent++;
                cur_op++;
+
        }
 kick_tail:
        WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
@@ -936,12 +950,13 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op 
**ops,
 
        while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
                        msg_counter != nb_ops) {
+
                rx_op = (struct rte_crypto_op *)(uintptr_t)
                                (resp_msg->opaque_data);
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
                rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
-                               sizeof(struct icp_qat_fw_comn_resp));
+                       sizeof(struct icp_qat_fw_comn_resp));
 #endif
                if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
                                ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
@@ -950,7 +965,9 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op 
**ops,
                } else {
                        rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
                }
+
                *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
+
                queue->head = adf_modulo(queue->head +
                                queue->msg_size,
                                ADF_RING_SIZE_MODULO(queue->queue_size));
@@ -972,8 +989,61 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op 
**ops,
 }
 
 static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
+qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
+               void *sgl_cookie, uint32_t data_len)
+{
+       int nr = 1;
+       struct qat_alg_buf_list *list = sgl_cookie;
+
+       uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+                       buff_start + rte_pktmbuf_data_len(buf);
+
+       list->bufers[0].addr = buff_start;
+       list->bufers[0].resrvd = 0;
+       list->bufers[0].len = buf_len;
+
+       if (data_len < buf_len) {
+               list->num_bufs = nr;
+               list->bufers[0].len = rte_pktmbuf_mtophys(buf) -
+                       buff_start + data_len;
+               return 0;
+       }
+
+       buf = buf->next;
+       while (buf) {
+               if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
+                       PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
+                                       " entry(%u)",
+                                       QAT_SGL_MAX_NUMBER);
+                       return -EINVAL;
+               }
+
+               list->bufers[nr].len = rte_pktmbuf_data_len(buf);
+               list->bufers[nr].resrvd = 0;
+               list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+
+               buf_len += list->bufers[nr].len;
+               buf = buf->next;
+
+               if (buf_len > data_len) {
+                       list->bufers[nr].len -=
+                               buf_len - data_len;
+                       buf = NULL;
+               }
+
+               ++nr;
+       }
+
+       list->num_bufs = nr;
+
+       return 0;
+}
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
+               struct qat_crypto_op_cookie *qat_op_cookie)
 {
+       int ret = 0;
        struct qat_session *ctx;
        struct icp_qat_fw_la_cipher_req_params *cipher_param;
        struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -983,6 +1053,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t 
*out_msg)
        uint32_t auth_len = 0, auth_ofs = 0;
        uint32_t min_ofs = 0;
        uint64_t src_buf_start = 0, dst_buf_start = 0;
+       uint8_t do_sgl = 0;
 
 
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
@@ -1100,10 +1171,16 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, 
uint8_t *out_msg)
 
        }
 
+       if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+               do_sgl = 1;
+
        /* adjust for chain case */
        if (do_cipher && do_auth)
                min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
 
+       if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+               min_ofs = 0;
+
        if (unlikely(op->sym->m_dst != NULL)) {
                /* Out-of-place operation (OOP)
                 * Don't align DMA start. DMA the minimum data-set
@@ -1113,6 +1190,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t 
*out_msg)
                        rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
                dst_buf_start =
                        rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+
        } else {
                /* In-place operation
                 * Start DMA at nearest aligned address below min_ofs
@@ -1158,8 +1236,47 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, 
uint8_t *out_msg)
                (cipher_param->cipher_offset + cipher_param->cipher_length)
                : (auth_param->auth_off + auth_param->auth_len);
 
-       qat_req->comn_mid.src_data_addr = src_buf_start;
-       qat_req->comn_mid.dest_data_addr = dst_buf_start;
+       if (do_sgl) {
+
+               ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+                               QAT_COMN_PTR_TYPE_SGL);
+               ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
+                               &qat_op_cookie->qat_sgl_list,
+                               qat_req->comn_mid.src_length);
+               if (ret) {
+                       PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
+                       return ret;
+               }
+
+               if (likely(op->sym->m_dst == NULL))
+                       qat_req->comn_mid.dest_data_addr =
+                               qat_req->comn_mid.src_data_addr =
+                               qat_op_cookie->cookie_phys_addr;
+               else {
+                       ret = qat_sgl_fill_array(op->sym->m_dst,
+                                       dst_buf_start,
+                                       &qat_op_cookie->qat_sgl_list_oop,
+                                               qat_req->comn_mid.dst_length);
+
+                       if (ret) {
+                               PMD_DRV_LOG(ERR, "QAT PMD Cannot "
+                                               "fill sgl array");
+                               return ret;
+                       }
+
+                       qat_req->comn_mid.src_data_addr =
+                               qat_op_cookie->cookie_phys_addr;
+
+                       qat_req->comn_mid.dest_data_addr =
+                               SGL_SECOND_COOKIE_ADDR(
+                                               qat_op_cookie->cookie_phys_addr,
+                                               phys_addr_t);
+               }
+
+       } else {
+               qat_req->comn_mid.src_data_addr = src_buf_start;
+               qat_req->comn_mid.dest_data_addr = dst_buf_start;
+       }
 
        if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
                        ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
@@ -1191,7 +1308,6 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t 
*out_msg)
                }
        }
 
-
 #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
        rte_hexdump(stdout, "qat_req:", qat_req,
                        sizeof(struct icp_qat_fw_la_bulk_req));
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 6b84488..ef90281 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -69,6 +69,9 @@ struct qat_qp {
        struct  qat_queue       tx_q;
        struct  qat_queue       rx_q;
        struct  rte_cryptodev_stats stats;
+       struct rte_mempool *sgl_pool;
+       void **sgl_cookies;
+       uint32_t nb_descriptors;
 } __rte_cache_aligned;
 
 /** private data structure for each QAT device */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 2e7188b..331ff31 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -41,6 +41,7 @@
 
 #include "qat_logs.h"
 #include "qat_crypto.h"
+#include "qat_algs.h"
 #include "adf_transport_access_macros.h"
 
 #define ADF_MAX_SYM_DESC                       4096
@@ -136,6 +137,8 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, 
uint16_t queue_pair_id,
 {
        struct qat_qp *qp;
        int ret;
+       char sgl_pool_name[RTE_RING_NAMESIZE];
+       uint32_t i;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -166,7 +169,6 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, 
uint16_t queue_pair_id,
                                queue_pair_id);
                return -EINVAL;
        }
-
        /* Allocate the queue pair data structure. */
        qp = rte_zmalloc("qat PMD qp metadata",
                        sizeof(*qp), RTE_CACHE_LINE_SIZE);
@@ -174,6 +176,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, 
uint16_t queue_pair_id,
                PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
                return -ENOMEM;
        }
+       qp->nb_descriptors = qp_conf->nb_descriptors;
+       qp->sgl_cookies = rte_zmalloc("qat PMD SGL cookie pointer",
+                       qp_conf->nb_descriptors * sizeof(*qp->sgl_cookies),
+                       RTE_CACHE_LINE_SIZE);
+
        qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
        rte_atomic16_init(&qp->inflights16);
 
@@ -191,9 +198,45 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, 
uint16_t queue_pair_id,
                qat_queue_delete(&(qp->tx_q));
                goto create_err;
        }
+
        adf_configure_queues(qp);
        adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+
+       snprintf(sgl_pool_name, RTE_RING_NAMESIZE, "%s_qp_sgl_%d_%hu",
+               dev->driver->pci_drv.driver.name, dev->data->dev_id,
+               queue_pair_id);
+
+       qp->sgl_pool = rte_mempool_lookup(sgl_pool_name);
+
+       if (qp->sgl_pool == NULL)
+               qp->sgl_pool = rte_mempool_create(sgl_pool_name,
+                               qp->nb_descriptors,
+                               sizeof(struct qat_crypto_op_cookie), 64, 0,
+                               NULL, NULL, NULL, NULL, socket_id,
+                               0);
+       if (!qp->sgl_pool) {
+               PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
+                               " sgl mempool");
+               goto create_err;
+       }
+
+       for (i = 0; i < qp->nb_descriptors; i++) {
+               if (rte_mempool_get(qp->sgl_pool, &qp->sgl_cookies[i])) {
+                       PMD_DRV_LOG(ERR, "QAT PMD Cannot get sgl_cookie");
+                       return -EFAULT;
+               }
+
+               struct qat_crypto_op_cookie *sql_cookie =
+                               qp->sgl_cookies[i];
+
+               sql_cookie->cookie_phys_addr =
+                               rte_mempool_virt2phy(qp->sgl_pool,
+                               sql_cookie) +
+                               offsetof(struct qat_crypto_op_cookie,
+                               qat_sgl_list);
+       }
        dev->data->queue_pairs[queue_pair_id] = qp;
+
        return 0;
 
 create_err:
@@ -206,6 +249,8 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, 
uint16_t queue_pair_id)
        struct qat_qp *qp =
                        (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
 
+       uint32_t i;
+
        PMD_INIT_FUNC_TRACE();
        if (qp == NULL) {
                PMD_DRV_LOG(DEBUG, "qp already freed");
@@ -221,6 +266,14 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, 
uint16_t queue_pair_id)
        }
 
        adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+
+       for (i = 0; i < qp->nb_descriptors; i++)
+               rte_mempool_put(qp->sgl_pool, qp->sgl_cookies[i]);
+
+       if (qp->sgl_pool)
+               rte_mempool_free(qp->sgl_pool);
+
+       rte_free(qp->sgl_cookies);
        rte_free(qp);
        dev->data->queue_pairs[queue_pair_id] = NULL;
        return 0;
-- 
2.1.0

Reply via email to