From: Nishikant Nayak <nishikanta.na...@intel.com>

This patch handles the changes required for updating the common
header fields specific to GEN LCE, Also added/updated of the response
processing APIs based on GEN LCE requirement.

Signed-off-by: Nishikant Nayak <nishikanta.na...@intel.com>
Signed-off-by: Ciara Power <ciara.po...@intel.com>
Acked-by: Arkadiusz Kusztal <arkadiuszx.kusz...@intel.com>
---
v7:
    - Removed unnecessary whitespace and indent changes.
    - Added signed-off for second developer that worked on v7.
    - Utilised 100 char line limit.
v2:
    - Renamed device from GEN 5 to GEN LCE.
    - Removed unused code.
    - Updated macro names.
    - Added GEN LCE specific API for deque burst.
    - Fixed code formatting.
---
 drivers/crypto/qat/qat_sym.c         | 14 ++++++-
 drivers/crypto/qat/qat_sym.h         | 51 ++++++++++++++++++++++++-
 drivers/crypto/qat/qat_sym_session.c | 57 ++++++++++++++++++++++++++--
 drivers/crypto/qat/qat_sym_session.h | 10 ++++-
 4 files changed, 124 insertions(+), 8 deletions(-)

diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
index 6e03bde841..9113dfef56 100644
--- a/drivers/crypto/qat/qat_sym.c
+++ b/drivers/crypto/qat/qat_sym.c
@@ -180,7 +180,13 @@ qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops)
 {
        return qat_dequeue_op_burst(qp, (void **)ops,
-                               qat_sym_process_response, nb_ops);
+                                                       
qat_sym_process_response, nb_ops);
+}
+
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t 
nb_ops)
+{
+       return qat_dequeue_op_burst(qp, (void **)ops, 
qat_sym_process_response_gen_lce, nb_ops);
 }
 
 int
@@ -200,6 +206,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
        char capa_memz_name[RTE_CRYPTODEV_NAME_MAX_LEN];
        struct rte_cryptodev *cryptodev;
        struct qat_cryptodev_private *internals;
+       enum qat_device_gen qat_dev_gen = qat_pci_dev->qat_dev_gen;
        const struct qat_crypto_gen_dev_ops *gen_dev_ops =
                &qat_sym_gen_dev_ops[qat_pci_dev->qat_dev_gen];
 
@@ -249,7 +256,10 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev,
        cryptodev->dev_ops = gen_dev_ops->cryptodev_ops;
 
        cryptodev->enqueue_burst = qat_sym_enqueue_burst;
-       cryptodev->dequeue_burst = qat_sym_dequeue_burst;
+       if (qat_dev_gen == QAT_GEN_LCE)
+               cryptodev->dequeue_burst = qat_sym_dequeue_burst_gen_lce;
+       else
+               cryptodev->dequeue_burst = qat_sym_dequeue_burst;
 
        cryptodev->feature_flags = gen_dev_ops->get_feature_flags(qat_pci_dev);
 
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
index f2f197d050..6616064251 100644
--- a/drivers/crypto/qat/qat_sym.h
+++ b/drivers/crypto/qat/qat_sym.h
@@ -142,6 +142,9 @@ uint16_t
 qat_sym_dequeue_burst(void *qp, struct rte_crypto_op **ops,
                uint16_t nb_ops);
 
+uint16_t
+qat_sym_dequeue_burst_gen_lce(void *qp, struct rte_crypto_op **ops, uint16_t 
nb_ops);
+
 #ifdef RTE_QAT_OPENSSL
 /** Encrypt a single partial block
  *  Depends on openssl libcrypto
@@ -390,6 +393,46 @@ qat_sym_process_response(void **op, uint8_t *resp, void 
*op_cookie,
        return 1;
 }
 
+static __rte_always_inline int
+qat_sym_process_response_gen_lce(void **op, uint8_t *resp, void *op_cookie 
__rte_unused,
+               uint64_t *dequeue_err_count __rte_unused)
+{
+       struct icp_qat_fw_comn_resp *resp_msg = (struct icp_qat_fw_comn_resp 
*)resp;
+       struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) 
(resp_msg->opaque_data);
+       struct qat_sym_session *sess;
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+       QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+                       sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+       sess = CRYPTODEV_GET_SYM_SESS_PRIV(rx_op->sym->session);
+
+       rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+       if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != 
ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(
+                       resp_msg->comn_hdr.comn_status))
+               rx_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+       else if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != 
ICP_QAT_FW_COMN_RESP_INVALID_PARAM_STAT_GET(
+                       resp_msg->comn_hdr.comn_status))
+               rx_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+       if (sess->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+               if (ICP_QAT_FW_LA_VER_STATUS_FAIL == 
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+                               resp_msg->comn_hdr.comn_status))
+                       rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+       }
+
+       *op = (void *)rx_op;
+
+       /*
+        * return 1 as dequeue op only move on to the next op
+        * if one was ready to return to API
+        */
+       return 1;
+}
+
 int
 qat_sym_configure_dp_ctx(struct rte_cryptodev *dev, uint16_t qp_id,
        struct rte_crypto_raw_dp_ctx *raw_dp_ctx,
@@ -455,7 +498,13 @@ qat_sym_preprocess_requests(void **ops __rte_unused,
 
 static inline void
 qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused,
-       void *op_cookie __rte_unused)
+       void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
+{
+}
+
+static inline void
+qat_sym_process_response_gen_lce(void **op __rte_unused, uint8_t *resp 
__rte_unused,
+       void *op_cookie __rte_unused, uint64_t *dequeue_err_count __rte_unused)
 {
 }
 
diff --git a/drivers/crypto/qat/qat_sym_session.c 
b/drivers/crypto/qat/qat_sym_session.c
index 39e4a833ec..e763cfcb51 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -136,6 +136,9 @@ qat_sym_cd_auth_set(struct qat_sym_session *cdesc,
 static void
 qat_sym_session_init_common_hdr(struct qat_sym_session *session);
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session);
+
 /* Req/cd init functions */
 
 static void
@@ -757,6 +760,12 @@ qat_sym_session_set_parameters(struct rte_cryptodev *dev,
                session->qat_cmd);
                return -ENOTSUP;
        }
+
+       if (qat_dev_gen == QAT_GEN_LCE) {
+               qat_sym_session_init_gen_lce_hdr(session);
+               return 0;
+       }
+
        qat_sym_session_finalize(session);
 
        return qat_sym_gen_dev_ops[qat_dev_gen].set_session((void *)dev,
@@ -1103,6 +1112,12 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
                        dev->data->dev_private;
        enum qat_device_gen qat_dev_gen =
                        internals->qat_dev->qat_dev_gen;
+       if (qat_dev_gen == QAT_GEN_LCE) {
+               struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+               struct lce_key_buff_desc *key_buff = &req_tmpl->key_buff;
+
+               key_buff->keybuff = session->key_paddr;
+       }
 
        /*
         * Store AEAD IV parameters as cipher IV,
@@ -1166,9 +1181,14 @@ qat_sym_session_configure_aead(struct rte_cryptodev *dev,
        }
 
        if (session->is_single_pass) {
-               if (qat_sym_cd_cipher_set(session,
-                               aead_xform->key.data, aead_xform->key.length))
-                       return -EINVAL;
+               if (qat_dev_gen != QAT_GEN_LCE) {
+                       if (qat_sym_cd_cipher_set(session,
+                                       aead_xform->key.data, 
aead_xform->key.length))
+                               return -EINVAL;
+               } else {
+                       session->auth_key_length = aead_xform->key.length;
+                       memcpy(session->key_array, aead_xform->key.data, 
aead_xform->key.length);
+               }
        } else if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
                        aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
                        (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
@@ -2074,6 +2094,37 @@ qat_sym_session_init_common_hdr(struct qat_sym_session 
*session)
                                        ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
 }
 
+static void
+qat_sym_session_init_gen_lce_hdr(struct qat_sym_session *session)
+{
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &session->fw_req;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+
+       /*
+        * GEN_LCE specifies separate command id for AEAD operations but 
Cryptodev
+        * API processes AEAD operations as Single pass Crypto operations.
+        * Hence even for GEN_LCE, Session Algo Command ID is CIPHER.
+        * Note, however Session Algo Mode is AEAD.
+        */
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_AEAD;
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+       header->hdr_flags = 
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD_GEN_LCE(ICP_QAT_FW_COMN_REQ_FLAG_SET,
+                       ICP_QAT_FW_COMN_GEN_LCE_DESC_LAYOUT);
+       header->comn_req_flags = 
ICP_QAT_FW_COMN_FLAGS_BUILD_GEN_LCE(QAT_COMN_PTR_TYPE_SGL,
+                       QAT_COMN_KEY_BUFFER_USED);
+
+       ICP_QAT_FW_SYM_AEAD_ALGO_SET(header->serv_specif_flags, 
QAT_LA_CRYPTO_AEAD_AES_GCM_GEN_LCE);
+       ICP_QAT_FW_SYM_IV_SIZE_SET(header->serv_specif_flags, 
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+       ICP_QAT_FW_SYM_IV_IN_DESC_FLAG_SET(header->serv_specif_flags,
+                       ICP_QAT_FW_SYM_IV_IN_DESC_VALID);
+
+       if (session->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+               ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, 
ICP_QAT_HW_CIPHER_DECRYPT);
+       } else {
+               ICP_QAT_FW_SYM_DIR_FLAG_SET(header->serv_specif_flags, 
ICP_QAT_HW_CIPHER_ENCRYPT);
+       }
+}
+
 int qat_sym_cd_cipher_set(struct qat_sym_session *cdesc,
                                                const uint8_t *cipherkey,
                                                uint32_t cipherkeylen)
diff --git a/drivers/crypto/qat/qat_sym_session.h 
b/drivers/crypto/qat/qat_sym_session.h
index 2e25c90342..c41f8cc791 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -111,10 +111,16 @@ struct qat_sym_session {
        enum icp_qat_hw_auth_op auth_op;
        enum icp_qat_hw_auth_mode auth_mode;
        void *bpi_ctx;
-       struct qat_sym_cd cd;
+       union {
+               struct qat_sym_cd cd;
+               uint8_t key_array[32];
+       };
        uint8_t prefix_state[QAT_PREFIX_TBL_SIZE] __rte_cache_aligned;
        uint8_t *cd_cur_ptr;
-       phys_addr_t cd_paddr;
+       union {
+               phys_addr_t cd_paddr;
+               phys_addr_t key_paddr;
+       };
        phys_addr_t prefix_paddr;
        struct icp_qat_fw_la_bulk_req fw_req;
        uint8_t aad_len;
-- 
2.25.1

Reply via email to