Asymmetric RSA operations (SIGN, VERIFY, ENCRYPT and DECRYPT) are
supported in virtio PMD.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukri...@marvell.com>
---
 .../virtio/virtio_crypto_capabilities.h       |  19 +
 drivers/crypto/virtio/virtio_cryptodev.c      | 384 +++++++++++++++---
 drivers/crypto/virtio/virtio_rxtx.c           | 226 ++++++++++-
 lib/cryptodev/cryptodev_pmd.h                 |   6 +
 lib/vhost/virtio_crypto.h                     |  80 ++++
 5 files changed, 647 insertions(+), 68 deletions(-)

diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h 
b/drivers/crypto/virtio/virtio_crypto_capabilities.h
index 03c30deefd..1b26ff6720 100644
--- a/drivers/crypto/virtio/virtio_crypto_capabilities.h
+++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -48,4 +48,23 @@
                }, }                                                    \
        }
 
+#define VIRTIO_ASYM_CAPABILITIES                                       \
+       {       /* RSA */                                               \
+               .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC,                    \
+               {.asym = {                                              \
+                       .xform_capa = {                                 \
+                       .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA,        \
+                       .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) |   \
+                                       (1 << RTE_CRYPTO_ASYM_OP_VERIFY) |  \
+                                       (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | \
+                                       (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), \
+                       {.modlen = {                                    \
+                               .min = 1,                               \
+                               .max = 1024,                            \
+                               .increment = 1                          \
+                       }, }                                            \
+               }                                                       \
+                }, }                                                   \
+       }
+
 #endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c 
b/drivers/crypto/virtio/virtio_cryptodev.c
index 98415af123..f9a3f1e13a 100644
--- a/drivers/crypto/virtio/virtio_cryptodev.c
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -41,6 +41,11 @@ static void virtio_crypto_sym_clear_session(struct 
rte_cryptodev *dev,
 static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
                struct rte_crypto_sym_xform *xform,
                struct rte_cryptodev_sym_session *session);
+static void virtio_crypto_asym_clear_session(struct rte_cryptodev *dev,
+               struct rte_cryptodev_asym_session *sess);
+static int virtio_crypto_asym_configure_session(struct rte_cryptodev *dev,
+               struct rte_crypto_asym_xform *xform,
+               struct rte_cryptodev_asym_session *session);
 
 /*
  * The set of PCI devices this driver supports
@@ -53,6 +58,7 @@ static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
 
 static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
        VIRTIO_SYM_CAPABILITIES,
+       VIRTIO_ASYM_CAPABILITIES,
        RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
 };
 
@@ -88,7 +94,7 @@ virtio_crypto_send_command(struct virtqueue *vq,
                return -EINVAL;
        }
        /* cipher only is supported, it is available if auth_key is NULL */
-       if (!cipher_key) {
+       if (session->ctrl.header.algo == VIRTIO_CRYPTO_SERVICE_CIPHER && 
!cipher_key) {
                VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
                return -EINVAL;
        }
@@ -104,19 +110,23 @@ virtio_crypto_send_command(struct virtqueue *vq,
 
        /* calculate the length of cipher key */
        if (cipher_key) {
-               switch (ctrl->u.sym_create_session.op_type) {
-               case VIRTIO_CRYPTO_SYM_OP_CIPHER:
-                       len_cipher_key
-                               = ctrl->u.sym_create_session.u.cipher
-                                                       .para.keylen;
-                       break;
-               case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
-                       len_cipher_key
-                               = ctrl->u.sym_create_session.u.chain
-                                       .para.cipher_param.keylen;
-                       break;
-               default:
-                       VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+               if (session->ctrl.header.algo == VIRTIO_CRYPTO_SERVICE_CIPHER) {
+                       switch (ctrl->u.sym_create_session.op_type) {
+                       case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+                               len_cipher_key = 
ctrl->u.sym_create_session.u.cipher.para.keylen;
+                               break;
+                       case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+                               len_cipher_key =
+                                       
ctrl->u.sym_create_session.u.chain.para.cipher_param.keylen;
+                               break;
+                       default:
+                               VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op 
type");
+                               return -EINVAL;
+                       }
+               } else if (session->ctrl.header.algo == 
VIRTIO_CRYPTO_AKCIPHER_RSA) {
+                       len_cipher_key = 
ctrl->u.akcipher_create_session.para.keylen;
+               } else {
+                       VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid crypto service 
for cipher key");
                        return -EINVAL;
                }
        }
@@ -513,7 +523,10 @@ static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
        /* Crypto related operations */
        .sym_session_get_size           = 
virtio_crypto_sym_get_session_private_size,
        .sym_session_configure          = virtio_crypto_sym_configure_session,
-       .sym_session_clear              = virtio_crypto_sym_clear_session
+       .sym_session_clear              = virtio_crypto_sym_clear_session,
+       .asym_session_get_size          = 
virtio_crypto_sym_get_session_private_size,
+       .asym_session_configure         = virtio_crypto_asym_configure_session,
+       .asym_session_clear             = virtio_crypto_asym_clear_session
 };
 
 static void
@@ -737,6 +750,8 @@ crypto_virtio_create(const char *name, struct 
rte_pci_device *pci_dev,
        cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
 
        cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+               RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
+               RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT |
                RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
                RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
 
@@ -924,32 +939,24 @@ virtio_crypto_check_sym_clear_session_paras(
 #define NUM_ENTRY_SYM_CLEAR_SESSION 2
 
 static void
-virtio_crypto_sym_clear_session(
+virtio_crypto_clear_session(
                struct rte_cryptodev *dev,
-               struct rte_cryptodev_sym_session *sess)
+               struct virtio_crypto_op_ctrl_req *ctrl)
 {
        struct virtio_crypto_hw *hw;
        struct virtqueue *vq;
-       struct virtio_crypto_session *session;
-       struct virtio_crypto_op_ctrl_req *ctrl;
        struct vring_desc *desc;
        uint8_t *status;
        uint8_t needed = 1;
        uint32_t head;
-       uint8_t *malloc_virt_addr;
        uint64_t malloc_phys_addr;
        uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
        uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
        uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
-
-       PMD_INIT_FUNC_TRACE();
-
-       if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
-               return;
+       uint64_t session_id = ctrl->u.destroy_session.session_id;
 
        hw = dev->data->dev_private;
        vq = hw->cvq;
-       session = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
 
        VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
                        "vq = %p", vq->vq_desc_head_idx, vq);
@@ -961,34 +968,15 @@ virtio_crypto_sym_clear_session(
                return;
        }
 
-       /*
-        * malloc memory to store information of ctrl request op,
-        * returned status and desc vring
-        */
-       malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
-               + NUM_ENTRY_SYM_CLEAR_SESSION
-               * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
-       if (malloc_virt_addr == NULL) {
-               VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
-               return;
-       }
-       malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
-
-       /* assign ctrl request op part */
-       ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
-       ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
-       /* default data virtqueue is 0 */
-       ctrl->header.queue_id = 0;
-       ctrl->u.destroy_session.session_id = session->session_id;
+       malloc_phys_addr = rte_malloc_virt2iova(ctrl);
 
        /* status part */
        status = &(((struct virtio_crypto_inhdr *)
-               ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+               ((uint8_t *)ctrl + len_op_ctrl_req))->status);
        *status = VIRTIO_CRYPTO_ERR;
 
        /* indirect desc vring part */
-       desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
-               + desc_offset);
+       desc = (struct vring_desc *)((uint8_t *)ctrl + desc_offset);
 
        /* ctrl request part */
        desc[0].addr = malloc_phys_addr;
@@ -1050,8 +1038,8 @@ virtio_crypto_sym_clear_session(
        if (*status != VIRTIO_CRYPTO_OK) {
                VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
                                "status=%"PRIu32", session_id=%"PRIu64"",
-                               *status, session->session_id);
-               rte_free(malloc_virt_addr);
+                               *status, session_id);
+               rte_free(ctrl);
                return;
        }
 
@@ -1059,9 +1047,86 @@ virtio_crypto_sym_clear_session(
        VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx=%d", 
vq->vq_desc_head_idx);
 
        VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
-                       session->session_id);
+                       session_id);
+
+       rte_free(ctrl);
+}
+
+static void
+virtio_crypto_sym_clear_session(
+               struct rte_cryptodev *dev,
+               struct rte_cryptodev_sym_session *sess)
+{
+       uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+       uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+       struct virtio_crypto_op_ctrl_req *ctrl;
+       struct virtio_crypto_session *session;
+       uint8_t *malloc_virt_addr;
 
-       rte_free(malloc_virt_addr);
+       PMD_INIT_FUNC_TRACE();
+
+       if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+               return;
+
+       session = CRYPTODEV_GET_SYM_SESS_PRIV(sess);
+
+       /*
+        * malloc memory to store information of ctrl request op,
+        * returned status and desc vring
+        */
+       malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+               + NUM_ENTRY_SYM_CLEAR_SESSION
+               * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+       if (malloc_virt_addr == NULL) {
+               VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+               return;
+       }
+
+       /* assign ctrl request op part */
+       ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+       ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+       /* default data virtqueue is 0 */
+       ctrl->header.queue_id = 0;
+       ctrl->u.destroy_session.session_id = session->session_id;
+
+       return virtio_crypto_clear_session(dev, ctrl);
+}
+
+static void
+virtio_crypto_asym_clear_session(
+               struct rte_cryptodev *dev,
+               struct rte_cryptodev_asym_session *sess)
+{
+       uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+       uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+       struct virtio_crypto_op_ctrl_req *ctrl;
+       struct virtio_crypto_session *session;
+       uint8_t *malloc_virt_addr;
+
+       PMD_INIT_FUNC_TRACE();
+
+       session = CRYPTODEV_GET_ASYM_SESS_PRIV(sess);
+
+       /*
+        * malloc memory to store information of ctrl request op,
+        * returned status and desc vring
+        */
+       malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+               + NUM_ENTRY_SYM_CLEAR_SESSION
+               * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+       if (malloc_virt_addr == NULL) {
+               VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+               return;
+       }
+
+       /* assign ctrl request op part */
+       ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+       ctrl->header.opcode = VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION;
+       /* default data virtqueue is 0 */
+       ctrl->header.queue_id = 0;
+       ctrl->u.destroy_session.session_id = session->session_id;
+
+       return virtio_crypto_clear_session(dev, ctrl);
 }
 
 static struct rte_crypto_cipher_xform *
@@ -1292,6 +1357,23 @@ virtio_crypto_check_sym_configure_session_paras(
        return 0;
 }
 
+static int
+virtio_crypto_check_asym_configure_session_paras(
+               struct rte_cryptodev *dev,
+               struct rte_crypto_asym_xform *xform,
+               struct rte_cryptodev_asym_session *asym_sess)
+{
+       if (unlikely(xform == NULL) || unlikely(asym_sess == NULL)) {
+               VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+               return -1;
+       }
+
+       if (virtio_crypto_check_sym_session_paras(dev) < 0)
+               return -1;
+
+       return 0;
+}
+
 static int
 virtio_crypto_sym_configure_session(
                struct rte_cryptodev *dev,
@@ -1383,6 +1465,204 @@ virtio_crypto_sym_configure_session(
        return -1;
 }
 
+static size_t
+tlv_encode(uint8_t **tlv, uint8_t type, uint8_t *data, size_t len)
+{
+       uint8_t *lenval = NULL;
+       size_t lenval_n = 0;
+
+       if (len > 65535) {
+               goto _exit;
+       } else if (len > 255) {
+               lenval_n = 4 + len;
+               lenval = rte_malloc(NULL, lenval_n, 0);
+
+               lenval[0] = type;
+               lenval[1] = 0x82;
+               lenval[2] = (len & 0xFF00) >> 8;
+               lenval[3] = (len & 0xFF);
+               rte_memcpy(&lenval[4], data, len);
+       } else if (len > 127) {
+               lenval_n = 3 + len;
+               lenval = rte_malloc(NULL, lenval_n, 0);
+
+               lenval[0] = type;
+               lenval[1] = 0x81;
+               lenval[2] = len;
+               rte_memcpy(&lenval[3], data, len);
+       } else {
+               lenval_n = 2 + len;
+               lenval = rte_malloc(NULL, lenval_n, 0);
+
+               lenval[0] = type;
+               lenval[1] = len;
+               rte_memcpy(&lenval[2], data, len);
+       }
+
+_exit:
+       *tlv = lenval;
+       return lenval_n;
+}
+
+static int
+virtio_crypto_asym_rsa_xform_to_der(
+               struct rte_crypto_asym_xform *xform,
+               unsigned char **der)
+{
+       size_t nlen, elen, dlen, plen, qlen, dplen, dqlen, qinvlen, tlen;
+       uint8_t *n, *e, *d, *p, *q, *dp, *dq, *qinv, *t;
+       uint8_t ver[3] = {0x02, 0x01, 0x00};
+
+       if (xform->xform_type != RTE_CRYPTO_ASYM_XFORM_RSA)
+               return -EINVAL;
+
+       /* Length of sequence in bytes */
+       tlen = RTE_DIM(ver);
+       nlen = tlv_encode(&n, 0x02, xform->rsa.n.data, xform->rsa.n.length);
+       elen = tlv_encode(&e, 0x02, xform->rsa.e.data, xform->rsa.e.length);
+       tlen += (nlen + elen);
+
+       dlen = tlv_encode(&d, 0x02, xform->rsa.d.data, xform->rsa.d.length);
+       tlen += dlen;
+
+       plen = tlv_encode(&p, 0x02, xform->rsa.qt.p.data, 
xform->rsa.qt.p.length);
+       qlen = tlv_encode(&q, 0x02, xform->rsa.qt.q.data, 
xform->rsa.qt.q.length);
+       dplen = tlv_encode(&dp, 0x02, xform->rsa.qt.dP.data, 
xform->rsa.qt.dP.length);
+       dqlen = tlv_encode(&dq, 0x02, xform->rsa.qt.dQ.data, 
xform->rsa.qt.dQ.length);
+       qinvlen = tlv_encode(&qinv, 0x02, xform->rsa.qt.qInv.data, 
xform->rsa.qt.qInv.length);
+       tlen += (plen + qlen + dplen + dqlen + qinvlen);
+
+       t = rte_malloc(NULL, tlen, 0);
+       *der = t;
+       rte_memcpy(t, ver, RTE_DIM(ver));
+       t += RTE_DIM(ver);
+       rte_memcpy(t, n, nlen);
+       t += nlen;
+       rte_memcpy(t, e, elen);
+       t += elen;
+       rte_free(n);
+       rte_free(e);
+
+       rte_memcpy(t, d, dlen);
+       t += dlen;
+       rte_free(d);
+
+       rte_memcpy(t, p, plen);
+       t += plen;
+       rte_memcpy(t, q, plen);
+       t += qlen;
+       rte_memcpy(t, dp, dplen);
+       t += dplen;
+       rte_memcpy(t, dq, dqlen);
+       t += dqlen;
+       rte_memcpy(t, qinv, qinvlen);
+       t += qinvlen;
+       rte_free(p);
+       rte_free(q);
+       rte_free(dp);
+       rte_free(dq);
+       rte_free(qinv);
+
+       t = *der;
+       tlen = tlv_encode(der, 0x30, t, tlen);
+       return tlen;
+}
+
+static int
+virtio_crypto_asym_configure_session(
+               struct rte_cryptodev *dev,
+               struct rte_crypto_asym_xform *xform,
+               struct rte_cryptodev_asym_session *sess)
+{
+       struct virtio_crypto_akcipher_session_para *para;
+       struct virtio_crypto_op_ctrl_req *ctrl_req;
+       struct virtio_crypto_session *session;
+       struct virtio_crypto_hw *hw;
+       struct virtqueue *control_vq;
+       uint8_t *key = NULL;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       ret = virtio_crypto_check_asym_configure_session_paras(dev, xform,
+                       sess);
+       if (ret < 0) {
+               VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+               return ret;
+       }
+
+       session = CRYPTODEV_GET_ASYM_SESS_PRIV(sess);
+       memset(session, 0, sizeof(struct virtio_crypto_session));
+       ctrl_req = &session->ctrl;
+       ctrl_req->header.opcode = VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION;
+       /* FIXME: support multiqueue */
+       ctrl_req->header.queue_id = 0;
+       para = &ctrl_req->u.akcipher_create_session.para;
+
+       switch (xform->xform_type) {
+       case RTE_CRYPTO_ASYM_XFORM_RSA:
+       ctrl_req->header.algo = VIRTIO_CRYPTO_AKCIPHER_RSA;
+               para->algo = VIRTIO_CRYPTO_AKCIPHER_RSA;
+
+               if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP)
+                       para->keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC;
+               else
+                       para->keytype = VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE;
+
+               if (xform->rsa.padding.type == RTE_CRYPTO_RSA_PADDING_NONE) {
+                       para->u.rsa.padding_algo = 
VIRTIO_CRYPTO_RSA_RAW_PADDING;
+               } else if (xform->rsa.padding.type == 
RTE_CRYPTO_RSA_PADDING_PKCS1_5) {
+                       para->u.rsa.padding_algo = 
VIRTIO_CRYPTO_RSA_PKCS1_PADDING;
+                       switch (xform->rsa.padding.hash) {
+                       case  RTE_CRYPTO_AUTH_SHA1:
+                               para->u.rsa.hash_algo = VIRTIO_CRYPTO_RSA_SHA1;
+                               break;
+                       case  RTE_CRYPTO_AUTH_SHA224:
+                               para->u.rsa.hash_algo = 
VIRTIO_CRYPTO_RSA_SHA224;
+                               break;
+                       case  RTE_CRYPTO_AUTH_SHA256:
+                               para->u.rsa.hash_algo = 
VIRTIO_CRYPTO_RSA_SHA256;
+                               break;
+                       case  RTE_CRYPTO_AUTH_SHA512:
+                               para->u.rsa.hash_algo = 
VIRTIO_CRYPTO_RSA_SHA512;
+                               break;
+                       case  RTE_CRYPTO_AUTH_MD5:
+                               para->u.rsa.hash_algo = VIRTIO_CRYPTO_RSA_MD5;
+                               break;
+                       default:
+                               para->u.rsa.hash_algo = 
VIRTIO_CRYPTO_RSA_NO_HASH;
+                       }
+               } else {
+                       VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid padding type");
+                       return -EINVAL;
+               }
+
+               ret = virtio_crypto_asym_rsa_xform_to_der(xform, &key);
+               if (ret <= 0) {
+                       VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid RSA primitives");
+                       return ret;
+               }
+
+               ctrl_req->u.akcipher_create_session.para.keylen = ret;
+               break;
+       default:
+               para->algo = VIRTIO_CRYPTO_NO_AKCIPHER;
+       }
+
+       hw = dev->data->dev_private;
+       control_vq = hw->cvq;
+       ret = virtio_crypto_send_command(control_vq, ctrl_req,
+                               key, NULL, session);
+       if (ret < 0) {
+               VIRTIO_CRYPTO_SESSION_LOG_ERR("create session failed: %d", ret);
+               goto error_out;
+       }
+
+       return 0;
+error_out:
+       return -1;
+}
+
 static void
 virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
                struct rte_cryptodev_info *info)
diff --git a/drivers/crypto/virtio/virtio_rxtx.c 
b/drivers/crypto/virtio/virtio_rxtx.c
index 48b5f4ebbb..d00af8b7ce 100644
--- a/drivers/crypto/virtio/virtio_rxtx.c
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -343,6 +343,196 @@ virtqueue_crypto_sym_enqueue_xmit(
        return 0;
 }
 
+static int
+virtqueue_crypto_asym_pkt_header_arrange(
+               struct rte_crypto_op *cop,
+               struct virtio_crypto_op_data_req *data,
+               struct virtio_crypto_session *session)
+{
+       struct rte_crypto_asym_op *asym_op = cop->asym;
+       struct virtio_crypto_op_data_req *req_data = data;
+       struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+
+       req_data->header.session_id = session->session_id;
+
+       switch (ctrl->header.algo)      {
+       case VIRTIO_CRYPTO_AKCIPHER_RSA:
+               req_data->header.algo = ctrl->header.algo;
+               if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+                       req_data->header.opcode = VIRTIO_CRYPTO_AKCIPHER_SIGN;
+                       req_data->u.akcipher_req.para.src_data_len
+                               = asym_op->rsa.message.length;
+                       /* qemu does not accept zero size write buffer */
+                       req_data->u.akcipher_req.para.dst_data_len
+                               = asym_op->rsa.sign.length;
+               } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+                       req_data->header.opcode = VIRTIO_CRYPTO_AKCIPHER_VERIFY;
+                       req_data->u.akcipher_req.para.src_data_len
+                               = asym_op->rsa.sign.length;
+                       /* qemu does not accept zero size write buffer */
+                       req_data->u.akcipher_req.para.dst_data_len
+                               = asym_op->rsa.message.length;
+               } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+                       req_data->header.opcode = 
VIRTIO_CRYPTO_AKCIPHER_ENCRYPT;
+                       req_data->u.akcipher_req.para.src_data_len
+                               = asym_op->rsa.message.length;
+                       /* qemu does not accept zero size write buffer */
+                       req_data->u.akcipher_req.para.dst_data_len
+                               = asym_op->rsa.cipher.length;
+               } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+                       req_data->header.opcode = 
VIRTIO_CRYPTO_AKCIPHER_DECRYPT;
+                       req_data->u.akcipher_req.para.src_data_len
+                               = asym_op->rsa.cipher.length;
+                       /* qemu does not accept zero size write buffer */
+                       req_data->u.akcipher_req.para.dst_data_len
+                               = asym_op->rsa.message.length;
+               } else {
+                       return -EINVAL;
+               }
+
+               break;
+       default:
+               req_data->header.algo = VIRTIO_CRYPTO_NO_AKCIPHER;
+       }
+
+       return 0;
+}
+
+static int
+virtqueue_crypto_asym_enqueue_xmit(
+               struct virtqueue *txvq,
+               struct rte_crypto_op *cop)
+{
+       uint16_t idx = 0;
+       uint16_t num_entry;
+       uint16_t needed = 1;
+       uint16_t head_idx;
+       struct vq_desc_extra *dxp;
+       struct vring_desc *start_dp;
+       struct vring_desc *desc;
+       uint64_t indirect_op_data_req_phys_addr;
+       uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+       uint32_t indirect_vring_addr_offset = req_data_len +
+               sizeof(struct virtio_crypto_inhdr);
+       struct rte_crypto_asym_op *asym_op = cop->asym;
+       struct virtio_crypto_session *session =
+               CRYPTODEV_GET_ASYM_SESS_PRIV(cop->asym->session);
+       struct virtio_crypto_op_data_req *op_data_req;
+       struct virtio_crypto_op_cookie *crypto_op_cookie;
+
+       if (unlikely(txvq->vq_free_cnt == 0))
+               return -ENOSPC;
+       if (unlikely(txvq->vq_free_cnt < needed))
+               return -EMSGSIZE;
+       head_idx = txvq->vq_desc_head_idx;
+       if (unlikely(head_idx >= txvq->vq_nentries))
+               return -EFAULT;
+
+       dxp = &txvq->vq_descx[head_idx];
+
+       if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+               VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+               return -EFAULT;
+       }
+       crypto_op_cookie = dxp->cookie;
+       indirect_op_data_req_phys_addr =
+               rte_mempool_virt2iova(crypto_op_cookie);
+       op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+       if (virtqueue_crypto_asym_pkt_header_arrange(cop, op_data_req, session))
+               return -EFAULT;
+
+       /* status is initialized to VIRTIO_CRYPTO_ERR */
+       ((struct virtio_crypto_inhdr *)
+               ((uint8_t *)op_data_req + req_data_len))->status =
+               VIRTIO_CRYPTO_ERR;
+
+       /* point to indirect vring entry */
+       desc = (struct vring_desc *)
+               ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+       for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+               desc[idx].next = idx + 1;
+       desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+       idx = 0;
+
+       /* indirect vring: first part, virtio_crypto_op_data_req */
+       desc[idx].addr = indirect_op_data_req_phys_addr;
+       desc[idx].len = req_data_len;
+       desc[idx++].flags = VRING_DESC_F_NEXT;
+
+       if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) {
+               /* indirect vring: src data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.message.data);
+               desc[idx].len = asym_op->rsa.message.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT;
+
+               /* indirect vring: dst data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.sign.data);
+               desc[idx].len = asym_op->rsa.sign.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
+       } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_VERIFY) {
+               /* indirect vring: src data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.sign.data);
+               desc[idx].len = asym_op->rsa.sign.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT;
+
+               /* indirect vring: dst data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.message.data);
+               desc[idx].len = asym_op->rsa.message.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT;
+       } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_ENCRYPT) {
+               /* indirect vring: src data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.message.data);
+               desc[idx].len = asym_op->rsa.message.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT;
+
+               /* indirect vring: dst data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.cipher.data);
+               desc[idx].len = asym_op->rsa.cipher.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
+       } else if (asym_op->rsa.op_type == RTE_CRYPTO_ASYM_OP_DECRYPT) {
+               /* indirect vring: src data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.cipher.data);
+               desc[idx].len = asym_op->rsa.cipher.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT;
+
+               /* indirect vring: dst data */
+               desc[idx].addr = rte_mem_virt2iova(asym_op->rsa.message.data);
+               desc[idx].len = asym_op->rsa.message.length;
+               desc[idx++].flags = VRING_DESC_F_NEXT | VRING_DESC_F_WRITE;
+       } else {
+               VIRTIO_CRYPTO_TX_LOG_ERR("Invalid asym op");
+               return -EINVAL;
+       }
+
+       /* indirect vring: last part, status returned */
+       desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+       desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+       desc[idx++].flags = VRING_DESC_F_WRITE;
+
+       num_entry = idx;
+
+       /* save the infos to use when receiving packets */
+       dxp->crypto_op = (void *)cop;
+       dxp->ndescs = needed;
+
+       /* use a single buffer */
+       start_dp = txvq->vq_ring.desc;
+       start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+               indirect_vring_addr_offset;
+       start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+       start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+       idx = start_dp[head_idx].next;
+       txvq->vq_desc_head_idx = idx;
+       if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+               txvq->vq_desc_tail_idx = idx;
+       txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+       vq_update_avail_ring(txvq, head_idx);
+
+       return 0;
+}
+
 static int
 virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
                struct rte_crypto_op *cop)
@@ -353,6 +543,9 @@ virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
        case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
                ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
                break;
+       case RTE_CRYPTO_OP_TYPE_ASYMMETRIC:
+               ret = virtqueue_crypto_asym_enqueue_xmit(txvq, cop);
+               break;
        default:
                VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
                                cop->type);
@@ -476,27 +669,28 @@ virtio_crypto_pkt_tx_burst(void *tx_queue, struct 
rte_crypto_op **tx_pkts,
        VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
 
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
-               struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
-               /* nb_segs is always 1 at virtio crypto situation */
-               int need = txm->nb_segs - txvq->vq_free_cnt;
-
-               /*
-                * Positive value indicates it hasn't enough space in vring
-                * descriptors
-                */
-               if (unlikely(need > 0)) {
+               if (tx_pkts[nb_tx]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+                       struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+                       /* nb_segs is always 1 at virtio crypto situation */
+                       int need = txm->nb_segs - txvq->vq_free_cnt;
+
                        /*
-                        * try it again because the receive process may be
-                        * free some space
+                        * Positive value indicates it hasn't enough space in 
vring
+                        * descriptors
                         */
-                       need = txm->nb_segs - txvq->vq_free_cnt;
                        if (unlikely(need > 0)) {
-                               VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
-                                       "descriptors to transmit");
-                               break;
+                               /*
+                                * try it again because the receive process may 
be
+                                * free some space
+                                */
+                               need = txm->nb_segs - txvq->vq_free_cnt;
+                               if (unlikely(need > 0)) {
+                                       VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+                                                                               
         "descriptors to transmit");
+                                       break;
+                               }
                        }
                }
-
                txvq->packets_sent_total++;
 
                /* Enqueue Packet buffers */
diff --git a/lib/cryptodev/cryptodev_pmd.h b/lib/cryptodev/cryptodev_pmd.h
index 5c84a3b847..929c6defe9 100644
--- a/lib/cryptodev/cryptodev_pmd.h
+++ b/lib/cryptodev/cryptodev_pmd.h
@@ -715,6 +715,12 @@ struct rte_cryptodev_asym_session {
        uint8_t sess_private_data[];
 };
 
+/**
+ * Helper macro to get session private data
+ */
+#define CRYPTODEV_GET_ASYM_SESS_PRIV(s) \
+       ((void *)(((struct rte_cryptodev_asym_session *)s)->sess_private_data))
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/vhost/virtio_crypto.h b/lib/vhost/virtio_crypto.h
index 28877a5da3..d42af62f2f 100644
--- a/lib/vhost/virtio_crypto.h
+++ b/lib/vhost/virtio_crypto.h
@@ -9,6 +9,7 @@
 #define VIRTIO_CRYPTO_SERVICE_HASH   1
 #define VIRTIO_CRYPTO_SERVICE_MAC    2
 #define VIRTIO_CRYPTO_SERVICE_AEAD   3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
 
 #define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
 
@@ -29,6 +30,10 @@ struct virtio_crypto_ctrl_header {
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
 #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
        uint32_t opcode;
        uint32_t algo;
        uint32_t flag;
@@ -152,6 +157,58 @@ struct virtio_crypto_aead_create_session_req {
        uint8_t padding[32];
 };
 
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING   0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+       uint32_t padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH   0
+#define VIRTIO_CRYPTO_RSA_MD2       1
+#define VIRTIO_CRYPTO_RSA_MD3       2
+#define VIRTIO_CRYPTO_RSA_MD4       3
+#define VIRTIO_CRYPTO_RSA_MD5       4
+#define VIRTIO_CRYPTO_RSA_SHA1      5
+#define VIRTIO_CRYPTO_RSA_SHA256    6
+#define VIRTIO_CRYPTO_RSA_SHA384    7
+#define VIRTIO_CRYPTO_RSA_SHA512    8
+#define VIRTIO_CRYPTO_RSA_SHA224    9
+       uint32_t hash_algo;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN   0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+       uint32_t curve_id;
+       uint32_t padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER    0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA   1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA   2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+       uint32_t algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC  1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+       uint32_t keytype;
+       uint32_t keylen;
+
+       union {
+               struct virtio_crypto_rsa_session_para rsa;
+               struct virtio_crypto_ecdsa_session_para ecdsa;
+       } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+       struct virtio_crypto_akcipher_session_para para;
+       uint8_t padding[36];
+};
+
 struct virtio_crypto_alg_chain_session_para {
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
@@ -219,6 +276,8 @@ struct virtio_crypto_op_ctrl_req {
                        mac_create_session;
                struct virtio_crypto_aead_create_session_req
                        aead_create_session;
+               struct virtio_crypto_akcipher_create_session_req
+                       akcipher_create_session;
                struct virtio_crypto_destroy_session_req
                        destroy_session;
                uint8_t padding[56];
@@ -238,6 +297,14 @@ struct virtio_crypto_op_header {
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
 #define VIRTIO_CRYPTO_AEAD_DECRYPT \
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
        uint32_t opcode;
        /* algo should be service-specific algorithms */
        uint32_t algo;
@@ -362,6 +429,16 @@ struct virtio_crypto_aead_data_req {
        uint8_t padding[32];
 };
 
+struct virtio_crypto_akcipher_para {
+       uint32_t src_data_len;
+       uint32_t dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+       struct virtio_crypto_akcipher_para para;
+       uint8_t padding[40];
+};
+
 /* The request of the data virtqueue's packet */
 struct virtio_crypto_op_data_req {
        struct virtio_crypto_op_header header;
@@ -371,6 +448,7 @@ struct virtio_crypto_op_data_req {
                struct virtio_crypto_hash_data_req hash_req;
                struct virtio_crypto_mac_data_req mac_req;
                struct virtio_crypto_aead_data_req aead_req;
+               struct virtio_crypto_akcipher_data_req akcipher_req;
                uint8_t padding[48];
        } u;
 };
@@ -380,6 +458,8 @@ struct virtio_crypto_op_data_req {
 #define VIRTIO_CRYPTO_BADMSG    2
 #define VIRTIO_CRYPTO_NOTSUPP   3
 #define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC     5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
 
 /* The accelerator hardware is ready */
 #define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
-- 
2.25.1

Reply via email to