Support asymmetric RSA crypto operations in vhost-user.

Signed-off-by: Gowrishankar Muthukrishnan <gmuthukri...@marvell.com>
---
 lib/cryptodev/cryptodev_pmd.h |   6 +
 lib/vhost/rte_vhost_crypto.h  |  14 +-
 lib/vhost/vhost.c             |  11 +-
 lib/vhost/vhost.h             |   1 +
 lib/vhost/vhost_crypto.c      | 551 +++++++++++++++++++++++++++++++---
 lib/vhost/vhost_user.c        |   4 +
 lib/vhost/vhost_user.h        |  34 ++-
 lib/vhost/virtio_crypto.h     |  87 +++++-
 8 files changed, 655 insertions(+), 53 deletions(-)

diff --git a/lib/cryptodev/cryptodev_pmd.h b/lib/cryptodev/cryptodev_pmd.h
index 3bb3d95c13..bbda72a238 100644
--- a/lib/cryptodev/cryptodev_pmd.h
+++ b/lib/cryptodev/cryptodev_pmd.h
@@ -696,6 +696,12 @@ struct rte_cryptodev_asym_session {
        uint8_t sess_private_data[];
 };
 
+/**
+ * Helper macro to get session private data
+ */
+#define CRYPTODEV_GET_ASYM_SESS_PRIV(s) \
+       ((void *)(((struct rte_cryptodev_asym_session *)s)->sess_private_data))
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/vhost/rte_vhost_crypto.h b/lib/vhost/rte_vhost_crypto.h
index 2b01ecda08..e03542de26 100644
--- a/lib/vhost/rte_vhost_crypto.h
+++ b/lib/vhost/rte_vhost_crypto.h
@@ -52,8 +52,10 @@ rte_vhost_crypto_driver_start(const char *path);
  * @param cryptodev_id
  *  The identifier of DPDK Cryptodev, the same cryptodev_id can be assigned to
  *  multiple Vhost-crypto devices.
- * @param sess_pool
- *  The pointer to the created cryptodev session pool.
+ * @param sym_sess_pool
+ *  The pointer to the created cryptodev sym session pool.
+ * @param asym_sess_pool
+ *  The pointer to the created cryptodev asym session pool.
  * @param socket_id
  *  NUMA Socket ID to allocate resources on. *
  * @return
@@ -62,7 +64,7 @@ rte_vhost_crypto_driver_start(const char *path);
  */
 int
 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
-               struct rte_mempool *sess_pool,
+               struct rte_mempool *sym_sess_pool, struct rte_mempool 
*asym_sess_pool,
                int socket_id);
 
 /**
@@ -116,6 +118,10 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
  * dequeued from the cryptodev, this function shall be called to write the
  * processed data back to the vring descriptor (if no-copy is turned off).
  *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param qid
+ *  Virtio queue index.
  * @param ops
  *  The address of an array of *rte_crypto_op* structure that was dequeued
  *  from cryptodev.
@@ -130,7 +136,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
  *  The number of ops processed.
  */
 uint16_t
-rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+rte_vhost_crypto_finalize_requests(int vid, int qid, struct rte_crypto_op 
**ops,
                uint16_t nb_ops, int *callfds, uint16_t *nb_callfds);
 
 #ifdef __cplusplus
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index c03bb9c6eb..675459e05b 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -631,8 +631,12 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t 
vring_idx)
 
        /* Also allocate holes, if any, up to requested vring index. */
        for (i = 0; i <= vring_idx; i++) {
-               if (dev->virtqueue[i])
+               rte_spinlock_lock(&dev->virtqueue_lock);
+               if (dev->virtqueue[i]) {
+                       rte_spinlock_unlock(&dev->virtqueue_lock);
                        continue;
+               }
+               rte_spinlock_unlock(&dev->virtqueue_lock);
 
                vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
                if (vq == NULL) {
@@ -642,13 +646,15 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t 
vring_idx)
                        return -1;
                }
 
-               dev->virtqueue[i] = vq;
                init_vring_queue(dev, vq, i);
                rte_rwlock_init(&vq->access_lock);
                rte_rwlock_init(&vq->iotlb_lock);
                vq->avail_wrap_counter = 1;
                vq->used_wrap_counter = 1;
                vq->signalled_used_valid = false;
+               rte_spinlock_lock(&dev->virtqueue_lock);
+               dev->virtqueue[i] = vq;
+               rte_spinlock_unlock(&dev->virtqueue_lock);
        }
 
        dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1);
@@ -735,6 +741,7 @@ vhost_new_device(struct vhost_backend_ops *ops)
        dev->postcopy_ufd = -1;
        rte_spinlock_init(&dev->backend_req_lock);
        dev->backend_ops = ops;
+       rte_spinlock_init(&dev->virtqueue_lock);
 
        return i;
 }
diff --git a/lib/vhost/vhost.h b/lib/vhost/vhost.h
index 9723429b1c..9ba8cca5d4 100644
--- a/lib/vhost/vhost.h
+++ b/lib/vhost/vhost.h
@@ -489,6 +489,7 @@ struct virtio_net {
 
        int                     extbuf;
        int                     linearbuf;
+       rte_spinlock_t virtqueue_lock;
        struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
 
        rte_rwlock_t    iotlb_pending_lock;
diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index 9bf5ef67b9..66fa53baea 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -53,6 +53,15 @@
  */
 #define vhost_crypto_desc vring_desc
 
+struct vhost_crypto_session {
+       RTE_STD_C11
+       union {
+               struct rte_cryptodev_asym_session *asym;
+               struct rte_cryptodev_sym_session *sym;
+       };
+       enum rte_crypto_op_type type;
+};
+
 static int
 cipher_algo_transform(uint32_t virtio_cipher_algo,
                enum rte_crypto_cipher_algorithm *algo)
@@ -196,7 +205,8 @@ struct vhost_crypto {
         */
        struct rte_hash *session_map;
        struct rte_mempool *mbuf_pool;
-       struct rte_mempool *sess_pool;
+       struct rte_mempool *sym_sess_pool;
+       struct rte_mempool *asym_sess_pool;
        struct rte_mempool *wb_pool;
 
        /** DPDK cryptodev ID */
@@ -205,8 +215,10 @@ struct vhost_crypto {
 
        uint64_t last_session_id;
 
-       uint64_t cache_session_id;
-       struct rte_cryptodev_sym_session *cache_session;
+       uint64_t cache_sym_session_id;
+       void *cache_sym_session;
+       uint64_t cache_asym_session_id;
+       void *cache_asym_session;
        /** socket id for the device */
        int socket_id;
 
@@ -236,7 +248,7 @@ struct vhost_crypto_data_req {
 
 static int
 transform_cipher_param(struct rte_crypto_sym_xform *xform,
-               VhostUserCryptoSessionParam *param)
+               VhostUserCryptoSymSessionParam *param)
 {
        int ret;
 
@@ -272,7 +284,7 @@ transform_cipher_param(struct rte_crypto_sym_xform *xform,
 
 static int
 transform_chain_param(struct rte_crypto_sym_xform *xforms,
-               VhostUserCryptoSessionParam *param)
+               VhostUserCryptoSymSessionParam *param)
 {
        struct rte_crypto_sym_xform *xform_cipher, *xform_auth;
        int ret;
@@ -333,17 +345,17 @@ transform_chain_param(struct rte_crypto_sym_xform *xforms,
 }
 
 static void
-vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
+vhost_crypto_create_sym_sess(struct vhost_crypto *vcrypto,
                VhostUserCryptoSessionParam *sess_param)
 {
        struct rte_crypto_sym_xform xform1 = {0}, xform2 = {0};
        struct rte_cryptodev_sym_session *session;
        int ret;
 
-       switch (sess_param->op_type) {
+       switch (sess_param->u.sym_sess.op_type) {
        case VIRTIO_CRYPTO_SYM_OP_NONE:
        case VIRTIO_CRYPTO_SYM_OP_CIPHER:
-               ret = transform_cipher_param(&xform1, sess_param);
+               ret = transform_cipher_param(&xform1, &sess_param->u.sym_sess);
                if (unlikely(ret)) {
                        VC_LOG_ERR("Error transform session msg (%i)", ret);
                        sess_param->session_id = ret;
@@ -351,7 +363,7 @@ vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
                }
                break;
        case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
-               if (unlikely(sess_param->hash_mode !=
+               if (unlikely(sess_param->u.sym_sess.hash_mode !=
                                VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)) {
                        sess_param->session_id = -VIRTIO_CRYPTO_NOTSUPP;
                        VC_LOG_ERR("Error transform session message (%i)",
@@ -361,7 +373,7 @@ vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
 
                xform1.next = &xform2;
 
-               ret = transform_chain_param(&xform1, sess_param);
+               ret = transform_chain_param(&xform1, &sess_param->u.sym_sess);
                if (unlikely(ret)) {
                        VC_LOG_ERR("Error transform session message (%i)", ret);
                        sess_param->session_id = ret;
@@ -376,7 +388,7 @@ vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
        }
 
        session = rte_cryptodev_sym_session_create(vcrypto->cid, &xform1,
-                       vcrypto->sess_pool);
+                       vcrypto->sym_sess_pool);
        if (!session) {
                VC_LOG_ERR("Failed to create session");
                sess_param->session_id = -VIRTIO_CRYPTO_ERR;
@@ -401,22 +413,282 @@ vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
        vcrypto->last_session_id++;
 }
 
+static int
+tlv_decode(uint8_t *tlv, uint8_t type, uint8_t **data, size_t *data_len)
+{
+       size_t tlen = -EINVAL, len;
+
+       if (tlv[0] != type)
+               return -EINVAL;
+
+       if (tlv[1] == 0x82) {
+               len = (tlv[2] << 8) | tlv[3];
+               *data = rte_malloc(NULL, len, 0);
+               rte_memcpy(*data, &tlv[4], len);
+               tlen = len + 4;
+       } else if (tlv[1] == 0x81) {
+               len = tlv[2];
+               *data = rte_malloc(NULL, len, 0);
+               rte_memcpy(*data, &tlv[3], len);
+               tlen = len + 3;
+       } else {
+               len = tlv[1];
+               *data = rte_malloc(NULL, len, 0);
+               rte_memcpy(*data, &tlv[2], len);
+               tlen = len + 2;
+       }
+
+       *data_len = len;
+       return tlen;
+}
+
+static int
+virtio_crypto_asym_rsa_der_to_xform(uint8_t *der, size_t der_len,
+               struct rte_crypto_asym_xform *xform)
+{
+       uint8_t *n = NULL, *e = NULL, *d = NULL, *p = NULL, *q = NULL, *dp = 
NULL,
+               *dq = NULL, *qinv = NULL, *v = NULL, *tlv;
+       size_t nlen, elen, dlen, plen, qlen, dplen, dqlen, qinvlen, vlen;
+       int len, i;
+
+       RTE_SET_USED(der_len);
+
+       for (i = 0; i < 8; i++) {
+               if (der[i] == 0x30) {
+                       der = &der[i];
+                       break;
+               }
+       }
+
+       if (der[0] != 0x30)
+               return -EINVAL;
+
+       if (der[1] == 0x82)
+               tlv = &der[4];
+       else if (der[1] == 0x81)
+               tlv = &der[3];
+       else
+               return -EINVAL;
+
+       len = tlv_decode(tlv, 0x02, &v, &vlen);
+       if (len < 0 || v[0] != 0x0 || vlen != 1) {
+               len = -EINVAL;
+               goto _error;
+       }
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &n, &nlen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &e, &elen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &d, &dlen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &p, &plen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &q, &qlen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &dp, &dplen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &dq, &dqlen);
+       if (len < 0)
+               goto _error;
+
+       tlv = tlv + len;
+       len = tlv_decode(tlv, 0x02, &qinv, &qinvlen);
+       if (len < 0)
+               goto _error;
+
+       xform->rsa.n.data = n;
+       xform->rsa.n.length = nlen;
+       xform->rsa.e.data = e;
+       xform->rsa.e.length = elen;
+       xform->rsa.d.data = d;
+       xform->rsa.d.length = dlen;
+       xform->rsa.qt.p.data = p;
+       xform->rsa.qt.p.length = plen;
+       xform->rsa.qt.q.data = q;
+       xform->rsa.qt.q.length = qlen;
+       xform->rsa.qt.dP.data = dp;
+       xform->rsa.qt.dP.length = dplen;
+       xform->rsa.qt.dQ.data = dq;
+       xform->rsa.qt.dQ.length = dqlen;
+       xform->rsa.qt.qInv.data = qinv;
+       xform->rsa.qt.qInv.length = qinvlen;
+
+       RTE_ASSERT((tlv + len - &der[0]) == der_len);
+       return 0;
+_error:
+       rte_free(v);
+       rte_free(n);
+       rte_free(e);
+       rte_free(d);
+       rte_free(p);
+       rte_free(q);
+       rte_free(dp);
+       rte_free(dq);
+       rte_free(qinv);
+       return len;
+}
+
+static int
+transform_rsa_param(struct rte_crypto_asym_xform *xform,
+               VhostUserCryptoAsymSessionParam *param)
+{
+       int ret = -EINVAL;
+
+       ret = virtio_crypto_asym_rsa_der_to_xform(param->key_buf, 
param->key_len, xform);
+       if (ret < 0)
+               goto _error;
+
+       switch (param->u.rsa.padding_algo) {
+       case VIRTIO_CRYPTO_RSA_RAW_PADDING:
+               xform->rsa.padding.type = RTE_CRYPTO_RSA_PADDING_NONE;
+               break;
+       case VIRTIO_CRYPTO_RSA_PKCS1_PADDING:
+               xform->rsa.padding.type = RTE_CRYPTO_RSA_PADDING_PKCS1_5;
+               break;
+       default:
+               VC_LOG_ERR("Unknown padding type");
+               goto _error;
+       }
+
+       switch (param->u.rsa.private_key_type) {
+       case VIRTIO_CRYPTO_RSA_PRIVATE_KEY_EXP:
+               xform->rsa.key_type = RTE_RSA_KEY_TYPE_EXP;
+               break;
+       case VIRTIO_CRYPTO_RSA_PRIVATE_KEY_QT:
+               xform->rsa.key_type = RTE_RSA_KEY_TYPE_QT;
+               break;
+       default:
+               VC_LOG_ERR("Unknown private key type");
+               goto _error;
+       }
+
+       xform->xform_type = RTE_CRYPTO_ASYM_XFORM_RSA;
+_error:
+       return ret;
+}
+
+static void
+vhost_crypto_create_asym_sess(struct vhost_crypto *vcrypto,
+               VhostUserCryptoSessionParam *sess_param)
+{
+       struct rte_cryptodev_asym_session *session = NULL;
+       struct vhost_crypto_session *vhost_session;
+       struct rte_crypto_asym_xform xform = {0};
+       int ret;
+
+       switch (sess_param->u.asym_sess.algo) {
+       case VIRTIO_CRYPTO_AKCIPHER_RSA:
+               ret = transform_rsa_param(&xform, &sess_param->u.asym_sess);
+               if (unlikely(ret)) {
+                       VC_LOG_ERR("Error transform session msg (%i)", ret);
+                       sess_param->session_id = ret;
+                       return;
+               }
+               break;
+       default:
+               VC_LOG_ERR("Invalid op algo");
+               sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+               return;
+       }
+
+       ret = rte_cryptodev_asym_session_create(vcrypto->cid, &xform,
+                       vcrypto->asym_sess_pool, (void *)&session);
+       if (!session) {
+               VC_LOG_ERR("Failed to create session");
+               sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+               return;
+       }
+
+       /* insert session to map */
+       vhost_session = rte_malloc(NULL, sizeof(*vhost_session), 0);
+       if (vhost_session == NULL) {
+               VC_LOG_ERR("Failed to alloc session memory");
+               sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+               return;
+       }
+
+       vhost_session->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
+       vhost_session->asym = session;
+       if ((rte_hash_add_key_data(vcrypto->session_map,
+                       &vcrypto->last_session_id, vhost_session) < 0)) {
+               VC_LOG_ERR("Failed to insert session to hash table");
+
+               if (rte_cryptodev_asym_session_free(vcrypto->cid, session) < 0)
+                       VC_LOG_ERR("Failed to free session");
+               sess_param->session_id = -VIRTIO_CRYPTO_ERR;
+               return;
+       }
+
+       VC_LOG_INFO("Session %"PRIu64" created for vdev %i.",
+                       vcrypto->last_session_id, vcrypto->dev->vid);
+
+       sess_param->session_id = vcrypto->last_session_id;
+       vcrypto->last_session_id++;
+}
+
+static void
+vhost_crypto_create_sess(struct vhost_crypto *vcrypto,
+               VhostUserCryptoSessionParam *sess_param)
+{
+       if (sess_param->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION)
+               vhost_crypto_create_asym_sess(vcrypto, sess_param);
+       else
+               vhost_crypto_create_sym_sess(vcrypto, sess_param);
+}
+
 static int
 vhost_crypto_close_sess(struct vhost_crypto *vcrypto, uint64_t session_id)
 {
-       struct rte_cryptodev_sym_session *session;
+       struct rte_cryptodev_asym_session *asym_session = NULL;
+       struct rte_cryptodev_sym_session *sym_session = NULL;
+       struct vhost_crypto_session *vhost_session = NULL;
        uint64_t sess_id = session_id;
        int ret;
 
        ret = rte_hash_lookup_data(vcrypto->session_map, &sess_id,
-                       (void **)&session);
-
+                               (void **)&vhost_session);
        if (unlikely(ret < 0)) {
-               VC_LOG_ERR("Failed to delete session %"PRIu64".", session_id);
+               VC_LOG_ERR("Failed to find session for id %"PRIu64".", 
session_id);
                return -VIRTIO_CRYPTO_INVSESS;
        }
 
-       if (rte_cryptodev_sym_session_free(vcrypto->cid, session) < 0) {
+       if (vhost_session->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+               sym_session = vhost_session->sym;
+       } else if (vhost_session->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+               asym_session = vhost_session->asym;
+       } else {
+               VC_LOG_ERR("Invalid session for id %"PRIu64".", session_id);
+               return -VIRTIO_CRYPTO_INVSESS;
+       }
+
+       if (sym_session != NULL &&
+                   rte_cryptodev_sym_session_free(vcrypto->cid, sym_session) < 
0) {
+               VC_LOG_DBG("Failed to free session");
+               return -VIRTIO_CRYPTO_ERR;
+       }
+
+       if (asym_session != NULL &&
+               rte_cryptodev_asym_session_free(vcrypto->cid, asym_session) < 
0) {
                VC_LOG_DBG("Failed to free session");
                return -VIRTIO_CRYPTO_ERR;
        }
@@ -429,6 +701,7 @@ vhost_crypto_close_sess(struct vhost_crypto *vcrypto, 
uint64_t session_id)
        VC_LOG_INFO("Session %"PRIu64" deleted for vdev %i.", sess_id,
                        vcrypto->dev->vid);
 
+       rte_free(vhost_session);
        return 0;
 }
 
@@ -1122,6 +1395,118 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, 
struct rte_crypto_op *op,
        return ret;
 }
 
+static __rte_always_inline uint8_t
+vhost_crypto_check_akcipher_request(struct virtio_crypto_akcipher_data_req 
*req)
+{
+       RTE_SET_USED(req);
+       return VIRTIO_CRYPTO_OK;
+}
+
+static __rte_always_inline uint8_t
+prepare_asym_rsa_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+               struct vhost_crypto_data_req *vc_req,
+               struct virtio_crypto_op_data_req *req,
+               struct vhost_crypto_desc *head,
+               uint32_t max_n_descs)
+{
+       uint8_t ret = vhost_crypto_check_akcipher_request(&req->u.akcipher_req);
+       struct rte_crypto_rsa_op_param *rsa = &op->asym->rsa;
+       struct vhost_crypto_desc *desc = head;
+       uint16_t wlen = 0;
+
+       if (unlikely(ret != VIRTIO_CRYPTO_OK))
+               goto error_exit;
+
+       /* prepare */
+       switch (vcrypto->option) {
+       case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
+               vc_req->wb_pool = vcrypto->wb_pool;
+               if (req->header.opcode == VIRTIO_CRYPTO_AKCIPHER_SIGN) {
+                       rsa->op_type = RTE_CRYPTO_ASYM_OP_SIGN;
+                       rsa->message.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RO);
+                       rsa->message.length = 
req->u.akcipher_req.para.src_data_len;
+                       rsa->sign.length = 
req->u.akcipher_req.para.dst_data_len;
+                       wlen = rsa->sign.length;
+                       desc = find_write_desc(head, desc, max_n_descs);
+                       if (unlikely(!desc)) {
+                               VC_LOG_ERR("Cannot find write location");
+                               ret = VIRTIO_CRYPTO_BADMSG;
+                               goto error_exit;
+                       }
+
+                       rsa->sign.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RW);
+                       if (unlikely(rsa->sign.data == NULL)) {
+                               ret = VIRTIO_CRYPTO_ERR;
+                               goto error_exit;
+                       }
+
+                       desc += 1;
+               } else if (req->header.opcode == VIRTIO_CRYPTO_AKCIPHER_VERIFY) 
{
+                       rsa->op_type = RTE_CRYPTO_ASYM_OP_VERIFY;
+                       rsa->sign.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RO);
+                       rsa->sign.length = 
req->u.akcipher_req.para.src_data_len;
+                       desc += 1;
+                       rsa->message.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RO);
+                       rsa->message.length = 
req->u.akcipher_req.para.dst_data_len;
+                       desc += 1;
+               } else if (req->header.opcode == 
VIRTIO_CRYPTO_AKCIPHER_ENCRYPT) {
+                       rsa->op_type = RTE_CRYPTO_ASYM_OP_ENCRYPT;
+                       rsa->message.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RO);
+                       rsa->message.length = 
req->u.akcipher_req.para.src_data_len;
+                       rsa->cipher.length = 
req->u.akcipher_req.para.dst_data_len;
+                       wlen = rsa->cipher.length;
+                       desc = find_write_desc(head, desc, max_n_descs);
+                       if (unlikely(!desc)) {
+                               VC_LOG_ERR("Cannot find write location");
+                               ret = VIRTIO_CRYPTO_BADMSG;
+                               goto error_exit;
+                       }
+
+                       rsa->cipher.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RW);
+                       if (unlikely(rsa->cipher.data == NULL)) {
+                               ret = VIRTIO_CRYPTO_ERR;
+                               goto error_exit;
+                       }
+
+                       desc += 1;
+               } else if (req->header.opcode == 
VIRTIO_CRYPTO_AKCIPHER_DECRYPT) {
+                       rsa->op_type = RTE_CRYPTO_ASYM_OP_DECRYPT;
+                       rsa->cipher.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RO);
+                       rsa->cipher.length = 
req->u.akcipher_req.para.src_data_len;
+                       desc += 1;
+                       rsa->message.data = get_data_ptr(vc_req, desc, 
VHOST_ACCESS_RO);
+                       rsa->message.length = 
req->u.akcipher_req.para.dst_data_len;
+                       desc += 1;
+               } else {
+                       goto error_exit;
+               }
+               break;
+       case RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE:
+       default:
+               ret = VIRTIO_CRYPTO_BADMSG;
+               goto error_exit;
+       }
+
+       op->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
+       op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+       vc_req->inhdr = get_data_ptr(vc_req, desc, VHOST_ACCESS_WO);
+       if (unlikely(vc_req->inhdr == NULL)) {
+               ret = VIRTIO_CRYPTO_BADMSG;
+               goto error_exit;
+       }
+
+       vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
+       vc_req->len = wlen + INHDR_LEN;
+       return 0;
+error_exit:
+       if (vc_req->wb)
+               free_wb_data(vc_req->wb, vc_req->wb_pool);
+
+       vc_req->len = INHDR_LEN;
+       return ret;
+}
+
 /**
  * Process on descriptor
  */
@@ -1132,17 +1517,21 @@ vhost_crypto_process_one_req(struct vhost_crypto 
*vcrypto,
                uint16_t desc_idx)
        __rte_no_thread_safety_analysis /* FIXME: requires iotlb_lock? */
 {
-       struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(op->sym->m_src);
-       struct rte_cryptodev_sym_session *session;
+       struct vhost_crypto_data_req *vc_req, *vc_req_out;
+       struct rte_cryptodev_asym_session *asym_session;
+       struct rte_cryptodev_sym_session *sym_session;
+       struct vhost_crypto_session *vhost_session;
+       struct vhost_crypto_desc *desc = descs;
+       uint32_t nb_descs = 0, max_n_descs, i;
+       struct vhost_crypto_data_req data_req;
        struct virtio_crypto_op_data_req req;
        struct virtio_crypto_inhdr *inhdr;
-       struct vhost_crypto_desc *desc = descs;
        struct vring_desc *src_desc;
        uint64_t session_id;
        uint64_t dlen;
-       uint32_t nb_descs = 0, max_n_descs, i;
        int err;
 
+       vc_req = &data_req;
        vc_req->desc_idx = desc_idx;
        vc_req->dev = vcrypto->dev;
        vc_req->vq = vq;
@@ -1225,12 +1614,14 @@ vhost_crypto_process_one_req(struct vhost_crypto 
*vcrypto,
        switch (req.header.opcode) {
        case VIRTIO_CRYPTO_CIPHER_ENCRYPT:
        case VIRTIO_CRYPTO_CIPHER_DECRYPT:
+               vc_req_out = rte_mbuf_to_priv(op->sym->m_src);
+               rte_memcpy(vc_req_out, vc_req, sizeof(struct 
vhost_crypto_data_req));
                session_id = req.header.session_id;
 
                /* one branch to avoid unnecessary table lookup */
-               if (vcrypto->cache_session_id != session_id) {
+               if (vcrypto->cache_sym_session_id != session_id) {
                        err = rte_hash_lookup_data(vcrypto->session_map,
-                                       &session_id, (void **)&session);
+                                       &session_id, (void **)&vhost_session);
                        if (unlikely(err < 0)) {
                                err = VIRTIO_CRYPTO_ERR;
                                VC_LOG_ERR("Failed to find session %"PRIu64,
@@ -1238,13 +1629,14 @@ vhost_crypto_process_one_req(struct vhost_crypto 
*vcrypto,
                                goto error_exit;
                        }
 
-                       vcrypto->cache_session = session;
-                       vcrypto->cache_session_id = session_id;
+                       vcrypto->cache_sym_session = vhost_session->sym;
+                       vcrypto->cache_sym_session_id = session_id;
                }
 
-               session = vcrypto->cache_session;
+               sym_session = vcrypto->cache_sym_session;
+               op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
 
-               err = rte_crypto_op_attach_sym_session(op, session);
+               err = rte_crypto_op_attach_sym_session(op, sym_session);
                if (unlikely(err < 0)) {
                        err = VIRTIO_CRYPTO_ERR;
                        VC_LOG_ERR("Failed to attach session to op");
@@ -1256,12 +1648,12 @@ vhost_crypto_process_one_req(struct vhost_crypto 
*vcrypto,
                        err = VIRTIO_CRYPTO_NOTSUPP;
                        break;
                case VIRTIO_CRYPTO_SYM_OP_CIPHER:
-                       err = prepare_sym_cipher_op(vcrypto, op, vc_req,
+                       err = prepare_sym_cipher_op(vcrypto, op, vc_req_out,
                                        &req.u.sym_req.u.cipher, desc,
                                        max_n_descs);
                        break;
                case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
-                       err = prepare_sym_chain_op(vcrypto, op, vc_req,
+                       err = prepare_sym_chain_op(vcrypto, op, vc_req_out,
                                        &req.u.sym_req.u.chain, desc,
                                        max_n_descs);
                        break;
@@ -1270,6 +1662,53 @@ vhost_crypto_process_one_req(struct vhost_crypto 
*vcrypto,
                        VC_LOG_ERR("Failed to process sym request");
                        goto error_exit;
                }
+               break;
+       case VIRTIO_CRYPTO_AKCIPHER_SIGN:
+       case VIRTIO_CRYPTO_AKCIPHER_VERIFY:
+       case VIRTIO_CRYPTO_AKCIPHER_ENCRYPT:
+       case VIRTIO_CRYPTO_AKCIPHER_DECRYPT:
+               session_id = req.header.session_id;
+
+               /* one branch to avoid unnecessary table lookup */
+               if (vcrypto->cache_asym_session_id != session_id) {
+                       err = rte_hash_lookup_data(vcrypto->session_map,
+                                       &session_id, (void **)&vhost_session);
+                       if (unlikely(err < 0)) {
+                               err = VIRTIO_CRYPTO_ERR;
+                               VC_LOG_ERR("Failed to find asym session 
%"PRIu64,
+                                                  session_id);
+                               goto error_exit;
+                       }
+
+                       vcrypto->cache_asym_session = vhost_session->asym;
+                       vcrypto->cache_asym_session_id = session_id;
+               }
+
+               asym_session = vcrypto->cache_asym_session;
+               op->type = RTE_CRYPTO_OP_TYPE_ASYMMETRIC;
+
+               err = rte_crypto_op_attach_asym_session(op, asym_session);
+               if (unlikely(err < 0)) {
+                       err = VIRTIO_CRYPTO_ERR;
+                       VC_LOG_ERR("Failed to attach asym session to op");
+                       goto error_exit;
+               }
+
+               vc_req_out = 
rte_cryptodev_asym_session_get_user_data(asym_session);
+               rte_memcpy(vc_req_out, vc_req, sizeof(struct 
vhost_crypto_data_req));
+               vc_req_out->wb = NULL;
+
+               switch (req.header.algo) {
+               case VIRTIO_CRYPTO_AKCIPHER_RSA:
+                       err = prepare_asym_rsa_op(vcrypto, op, vc_req_out,
+                                       &req, desc, max_n_descs);
+                       break;
+               }
+               if (unlikely(err != 0)) {
+                       VC_LOG_ERR("Failed to process asym request");
+                       goto error_exit;
+               }
+
                break;
        default:
                err = VIRTIO_CRYPTO_ERR;
@@ -1293,12 +1732,22 @@ static __rte_always_inline struct vhost_virtqueue *
 vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
                struct vhost_virtqueue *old_vq)
 {
-       struct rte_mbuf *m_src = op->sym->m_src;
-       struct rte_mbuf *m_dst = op->sym->m_dst;
-       struct vhost_crypto_data_req *vc_req = rte_mbuf_to_priv(m_src);
+       struct rte_mbuf *m_src = NULL, *m_dst = NULL;
+       struct vhost_crypto_data_req *vc_req;
        struct vhost_virtqueue *vq;
        uint16_t used_idx, desc_idx;
 
+       if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+               m_src = op->sym->m_src;
+               m_dst = op->sym->m_dst;
+               vc_req = rte_mbuf_to_priv(m_src);
+       } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+               vc_req = 
rte_cryptodev_asym_session_get_user_data(op->asym->session);
+       } else {
+               VC_LOG_ERR("Invalid crypto op type");
+               return NULL;
+       }
+
        if (unlikely(!vc_req)) {
                VC_LOG_ERR("Failed to retrieve vc_req");
                return NULL;
@@ -1320,25 +1769,36 @@ vhost_crypto_finalize_one_request(struct rte_crypto_op 
*op,
        vq->used->ring[desc_idx].id = vq->avail->ring[desc_idx];
        vq->used->ring[desc_idx].len = vc_req->len;
 
-       rte_mempool_put(m_src->pool, (void *)m_src);
-
-       if (m_dst)
-               rte_mempool_put(m_dst->pool, (void *)m_dst);
+       if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
+               rte_mempool_put(m_src->pool, (void *)m_src);
+               if (m_dst)
+                       rte_mempool_put(m_dst->pool, (void *)m_dst);
+       }
 
        return vc_req->vq;
 }
 
 static __rte_always_inline uint16_t
-vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
+vhost_crypto_complete_one_vm_requests(int vid, int qid, struct rte_crypto_op 
**ops,
                uint16_t nb_ops, int *callfd)
 {
+       struct virtio_net *dev = get_device(vid);
        uint16_t processed = 1;
        struct vhost_virtqueue *vq, *tmp_vq;
 
+       if (unlikely(dev == NULL)) {
+               VC_LOG_ERR("Invalid vid %i", vid);
+               return 0;
+       }
+
        if (unlikely(nb_ops == 0))
                return 0;
 
-       vq = vhost_crypto_finalize_one_request(ops[0], NULL);
+       rte_spinlock_lock(&dev->virtqueue_lock);
+       tmp_vq = dev->virtqueue[qid];
+       rte_spinlock_unlock(&dev->virtqueue_lock);
+
+       vq = vhost_crypto_finalize_one_request(ops[0], tmp_vq);
        if (unlikely(vq == NULL))
                return 0;
        tmp_vq = vq;
@@ -1383,7 +1843,7 @@ rte_vhost_crypto_driver_start(const char *path)
 
 int
 rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
-               struct rte_mempool *sess_pool,
+               struct rte_mempool *sym_sess_pool, struct rte_mempool 
*asym_sess_pool,
                int socket_id)
 {
        struct virtio_net *dev = get_device(vid);
@@ -1404,9 +1864,11 @@ rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
                return -ENOMEM;
        }
 
-       vcrypto->sess_pool = sess_pool;
+       vcrypto->sym_sess_pool = sym_sess_pool;
+       vcrypto->asym_sess_pool = asym_sess_pool;
        vcrypto->cid = cryptodev_id;
-       vcrypto->cache_session_id = UINT64_MAX;
+       vcrypto->cache_sym_session_id = UINT64_MAX;
+       vcrypto->cache_asym_session_id = UINT64_MAX;
        vcrypto->last_session_id = 1;
        vcrypto->dev = dev;
        vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
@@ -1577,7 +2039,12 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
                return 0;
        }
 
+       rte_spinlock_lock(&dev->virtqueue_lock);
        vq = dev->virtqueue[qid];
+       rte_spinlock_unlock(&dev->virtqueue_lock);
+
+       if (!vq || !vq->avail)
+               return 0;
 
        avail_idx = *((volatile uint16_t *)&vq->avail->idx);
        start_idx = vq->last_used_idx;
@@ -1659,7 +2126,7 @@ rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
 }
 
 uint16_t
-rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+rte_vhost_crypto_finalize_requests(int vid, int qid, struct rte_crypto_op 
**ops,
                uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
 {
        struct rte_crypto_op **tmp_ops = ops;
@@ -1668,7 +2135,7 @@ rte_vhost_crypto_finalize_requests(struct rte_crypto_op 
**ops,
        uint16_t idx = 0;
 
        while (left) {
-               count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
+               count = vhost_crypto_complete_one_vm_requests(vid, qid, 
tmp_ops, left,
                                &callfd);
                if (unlikely(count == 0))
                        break;
diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c
index 901a80bbaa..709a1df70d 100644
--- a/lib/vhost/vhost_user.c
+++ b/lib/vhost/vhost_user.c
@@ -396,7 +396,9 @@ vhost_user_set_features(struct virtio_net **pdev,
                        if (!vq)
                                continue;
 
+                       rte_spinlock_lock(&dev->virtqueue_lock);
                        dev->virtqueue[dev->nr_vring] = NULL;
+                       rte_spinlock_unlock(&dev->virtqueue_lock);
                        cleanup_vq(vq, 1);
                        cleanup_vq_inflight(dev, vq);
                        /* vhost_user_lock_all_queue_pairs locked all qps */
@@ -540,7 +542,9 @@ numa_realloc(struct virtio_net **pdev, struct 
vhost_virtqueue **pvq)
 
        if (vq != dev->virtqueue[vq->index]) {
                VHOST_LOG_CONFIG(dev->ifname, INFO, "reallocated virtqueue on 
node %d\n", node);
+               rte_spinlock_lock(&dev->virtqueue_lock);
                dev->virtqueue[vq->index] = vq;
+               rte_spinlock_unlock(&dev->virtqueue_lock);
        }
 
        if (vq_is_packed(dev)) {
diff --git a/lib/vhost/vhost_user.h b/lib/vhost/vhost_user.h
index edf7adb3c0..6174f32dcf 100644
--- a/lib/vhost/vhost_user.h
+++ b/lib/vhost/vhost_user.h
@@ -99,11 +99,10 @@ typedef struct VhostUserLog {
 /* Comply with Cryptodev-Linux */
 #define VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH  512
 #define VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH        64
+#define VHOST_USER_CRYPTO_MAX_KEY_LENGTH       1024
 
 /* Same structure as vhost-user backend session info */
-typedef struct VhostUserCryptoSessionParam {
-       int64_t session_id;
-       uint32_t op_code;
+typedef struct VhostUserCryptoSymSessionParam {
        uint32_t cipher_algo;
        uint32_t cipher_key_len;
        uint32_t hash_algo;
@@ -114,10 +113,37 @@ typedef struct VhostUserCryptoSessionParam {
        uint8_t dir;
        uint8_t hash_mode;
        uint8_t chaining_dir;
-       uint8_t *ciphe_key;
+       uint8_t *cipher_key;
        uint8_t *auth_key;
        uint8_t cipher_key_buf[VHOST_USER_CRYPTO_MAX_CIPHER_KEY_LENGTH];
        uint8_t auth_key_buf[VHOST_USER_CRYPTO_MAX_HMAC_KEY_LENGTH];
+} VhostUserCryptoSymSessionParam;
+
+
+typedef struct VhostUserCryptoAsymRsaParam {
+       uint32_t padding_algo;
+       uint32_t hash_algo;
+       uint8_t private_key_type;
+} VhostUserCryptoAsymRsaParam;
+
+typedef struct VhostUserCryptoAsymSessionParam {
+       uint32_t algo;
+       uint32_t key_type;
+       uint32_t key_len;
+       uint8_t *key;
+       union {
+               VhostUserCryptoAsymRsaParam rsa;
+       } u;
+       uint8_t key_buf[VHOST_USER_CRYPTO_MAX_KEY_LENGTH];
+} VhostUserCryptoAsymSessionParam;
+
+typedef struct VhostUserCryptoSessionParam {
+       uint32_t op_code;
+       union {
+               VhostUserCryptoSymSessionParam sym_sess;
+               VhostUserCryptoAsymSessionParam asym_sess;
+       } u;
+       uint64_t session_id;
 } VhostUserCryptoSessionParam;
 
 typedef struct VhostUserVringArea {
diff --git a/lib/vhost/virtio_crypto.h b/lib/vhost/virtio_crypto.h
index e3b93573c8..703a059768 100644
--- a/lib/vhost/virtio_crypto.h
+++ b/lib/vhost/virtio_crypto.h
@@ -9,6 +9,7 @@
 #define VIRTIO_CRYPTO_SERVICE_HASH   1
 #define VIRTIO_CRYPTO_SERVICE_MAC    2
 #define VIRTIO_CRYPTO_SERVICE_AEAD   3
+#define VIRTIO_CRYPTO_SERVICE_AKCIPHER 4
 
 #define VIRTIO_CRYPTO_OPCODE(service, op)   (((service) << 8) | (op))
 
@@ -29,6 +30,10 @@ struct virtio_crypto_ctrl_header {
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x02)
 #define VIRTIO_CRYPTO_AEAD_DESTROY_SESSION \
           VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x03)
+#define VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x04)
+#define VIRTIO_CRYPTO_AKCIPHER_DESTROY_SESSION \
+          VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x05)
        uint32_t opcode;
        uint32_t algo;
        uint32_t flag;
@@ -152,6 +157,63 @@ struct virtio_crypto_aead_create_session_req {
        uint8_t padding[32];
 };
 
+struct virtio_crypto_rsa_session_para {
+#define VIRTIO_CRYPTO_RSA_RAW_PADDING   0
+#define VIRTIO_CRYPTO_RSA_PKCS1_PADDING 1
+       uint32_t padding_algo;
+
+#define VIRTIO_CRYPTO_RSA_NO_HASH   0
+#define VIRTIO_CRYPTO_RSA_MD2       1
+#define VIRTIO_CRYPTO_RSA_MD3       2
+#define VIRTIO_CRYPTO_RSA_MD4       3
+#define VIRTIO_CRYPTO_RSA_MD5       4
+#define VIRTIO_CRYPTO_RSA_SHA1      5
+#define VIRTIO_CRYPTO_RSA_SHA256    6
+#define VIRTIO_CRYPTO_RSA_SHA384    7
+#define VIRTIO_CRYPTO_RSA_SHA512    8
+#define VIRTIO_CRYPTO_RSA_SHA224    9
+       uint32_t hash_algo;
+
+#define VIRTIO_CRYPTO_RSA_PRIVATE_KEY_UNKNOWN 0
+#define VIRTIO_CRYPTO_RSA_PRIVATE_KEY_EXP     1
+#define VIRTIO_CRYPTO_RSA_PRIVATE_KEY_QT      2
+       uint8_t private_key_type;
+};
+
+struct virtio_crypto_ecdsa_session_para {
+#define VIRTIO_CRYPTO_CURVE_UNKNOWN   0
+#define VIRTIO_CRYPTO_CURVE_NIST_P192 1
+#define VIRTIO_CRYPTO_CURVE_NIST_P224 2
+#define VIRTIO_CRYPTO_CURVE_NIST_P256 3
+#define VIRTIO_CRYPTO_CURVE_NIST_P384 4
+#define VIRTIO_CRYPTO_CURVE_NIST_P521 5
+       uint32_t curve_id;
+       uint32_t padding;
+};
+
+struct virtio_crypto_akcipher_session_para {
+#define VIRTIO_CRYPTO_NO_AKCIPHER    0
+#define VIRTIO_CRYPTO_AKCIPHER_RSA   1
+#define VIRTIO_CRYPTO_AKCIPHER_DSA   2
+#define VIRTIO_CRYPTO_AKCIPHER_ECDSA 3
+       uint32_t algo;
+
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PUBLIC  1
+#define VIRTIO_CRYPTO_AKCIPHER_KEY_TYPE_PRIVATE 2
+       uint32_t keytype;
+       uint32_t keylen;
+
+       union {
+               struct virtio_crypto_rsa_session_para rsa;
+               struct virtio_crypto_ecdsa_session_para ecdsa;
+       } u;
+};
+
+struct virtio_crypto_akcipher_create_session_req {
+       struct virtio_crypto_akcipher_session_para para;
+       uint8_t padding[36];
+};
+
 struct virtio_crypto_alg_chain_session_para {
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER  1
 #define VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH  2
@@ -219,6 +281,8 @@ struct virtio_crypto_op_ctrl_req {
                        mac_create_session;
                struct virtio_crypto_aead_create_session_req
                        aead_create_session;
+               struct virtio_crypto_akcipher_create_session_req
+                       akcipher_create_session;
                struct virtio_crypto_destroy_session_req
                        destroy_session;
                uint8_t padding[56];
@@ -238,6 +302,14 @@ struct virtio_crypto_op_header {
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x00)
 #define VIRTIO_CRYPTO_AEAD_DECRYPT \
        VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AEAD, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_ENCRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00)
+#define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01)
+#define VIRTIO_CRYPTO_AKCIPHER_SIGN \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02)
+#define VIRTIO_CRYPTO_AKCIPHER_VERIFY \
+       VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x03)
        uint32_t opcode;
        /* algo should be service-specific algorithms */
        uint32_t algo;
@@ -362,6 +434,16 @@ struct virtio_crypto_aead_data_req {
        uint8_t padding[32];
 };
 
+struct virtio_crypto_akcipher_para {
+       uint32_t src_data_len;
+       uint32_t dst_data_len;
+};
+
+struct virtio_crypto_akcipher_data_req {
+       struct virtio_crypto_akcipher_para para;
+       uint8_t padding[40];
+};
+
 /* The request of the data virtqueue's packet */
 struct virtio_crypto_op_data_req {
        struct virtio_crypto_op_header header;
@@ -371,6 +453,7 @@ struct virtio_crypto_op_data_req {
                struct virtio_crypto_hash_data_req hash_req;
                struct virtio_crypto_mac_data_req mac_req;
                struct virtio_crypto_aead_data_req aead_req;
+               struct virtio_crypto_akcipher_data_req akcipher_req;
                uint8_t padding[48];
        } u;
 };
@@ -380,6 +463,8 @@ struct virtio_crypto_op_data_req {
 #define VIRTIO_CRYPTO_BADMSG    2
 #define VIRTIO_CRYPTO_NOTSUPP   3
 #define VIRTIO_CRYPTO_INVSESS   4 /* Invalid session id */
+#define VIRTIO_CRYPTO_NOSPC     5 /* no free session ID */
+#define VIRTIO_CRYPTO_KEY_REJECTED 6 /* Signature verification failed */
 
 /* The accelerator hardware is ready */
 #define VIRTIO_CRYPTO_S_HW_READY  (1 << 0)
@@ -410,7 +495,7 @@ struct virtio_crypto_config {
        uint32_t max_cipher_key_len;
        /* Maximum length of authenticated key */
        uint32_t max_auth_key_len;
-       uint32_t reserve;
+       uint32_t akcipher_algo;
        /* Maximum size of each crypto request's content */
        uint64_t max_size;
 };
-- 
2.25.1


Reply via email to