This patch adds vhost_crypto sample application to DPDK.

Signed-off-by: Fan Zhang <roy.fan.zh...@intel.com>
---
 examples/vhost_crypto/Makefile |  59 +++++
 examples/vhost_crypto/main.c   | 588 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 647 insertions(+)
 create mode 100644 examples/vhost_crypto/Makefile
 create mode 100644 examples/vhost_crypto/main.c

diff --git a/examples/vhost_crypto/Makefile b/examples/vhost_crypto/Makefile
new file mode 100644
index 0000000..e457524
--- /dev/null
+++ b/examples/vhost_crypto/Makefile
@@ -0,0 +1,59 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(info This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+all:
+else
+
+# binary name
+APP = vhost-crypto
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O2 -D_FILE_OFFSET_BITS=64
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -D_GNU_SOURCE
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+
+endif
diff --git a/examples/vhost_crypto/main.c b/examples/vhost_crypto/main.c
new file mode 100644
index 0000000..2b24502
--- /dev/null
+++ b/examples/vhost_crypto/main.c
@@ -0,0 +1,588 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer   in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <getopt.h>
+
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_vhost.h>
+#include <rte_cryptodev.h>
+#include <rte_vhost_crypto.h>
+
+#define NB_VIRTIO_QUEUES               (1)
+#define MAX_PKT_BURST                  (32) /**< Max burst size for RX/TX */
+#define NB_MEMPOOL_OBJS                        (1024)
+#define NB_CRYPTO_DESCRIPTORS          (1024)
+#define NB_CACHE_OBJS                  (128)
+#define SESSION_MAP_ENTRIES            (1024) /**< Max nb sessions per vdev */
+
+#define MAX_NB_SOCKETS                 (32)
+#define DEF_SOCKET_FILE                        "/tmp/vhost_crypto1.socket"
+
+struct vhost_crypto_options {
+       char *socket_files[MAX_NB_SOCKETS];
+       uint32_t nb_sockets;
+       uint64_t cryptodev_mask;
+} options;
+
+struct vhost_crypto_info {
+       int vids[MAX_NB_SOCKETS];
+       struct rte_vhost_vring vqs[MAX_NB_SOCKETS][NB_VIRTIO_QUEUES];
+       struct rte_mempool *sess_pool;
+       struct rte_mempool *cop_pool;
+       uint32_t lcore_id;
+       uint8_t cid;
+       volatile uint32_t initialized;
+
+} info;
+
+#define SOCKET_FILE_KEYWORD    "socket-file"
+#define CRYPTODEV_MASK_KEYWORD "cryptodev-mask"
+
+/** support *SOCKET_FILE_PATH:CRYPTODEV_ID* format */
+static int
+parse_socket_arg(char *arg)
+{
+       uint32_t nb_sockets = options.nb_sockets;
+       size_t len = strlen(arg);
+
+       if (nb_sockets >= MAX_NB_SOCKETS) {
+               RTE_LOG(ERR, USER1, "Too many socket files!\n");
+               return -ENOMEM;
+       }
+
+       options.socket_files[nb_sockets] = rte_malloc(NULL, len, 0);
+       if (!options.socket_files[nb_sockets]) {
+               RTE_LOG(ERR, USER1, "Insufficient memory\n");
+               return -ENOMEM;
+       }
+
+       rte_memcpy(options.socket_files[nb_sockets], arg, len);
+
+       options.nb_sockets++;
+
+       return 0;
+}
+
+static int
+parse_cryptodev_mask(const char *q_arg)
+{
+       char *end = NULL;
+       uint64_t pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(q_arg, &end, 16);
+       if ((pm == '\0') || (end == NULL) || (*end != '\0'))
+               pm = 0;
+
+       options.cryptodev_mask = pm;
+       if (options.cryptodev_mask == 0 ||
+                       __builtin_popcountll(options.cryptodev_mask) > 1) {
+               printf("invalid cryptodev-mask specified\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static void
+vhost_crypto_usage(const char *prgname)
+{
+       printf("%s [EAL options] --\n"
+               "  --%s SOCKET-FILE-PATH\n"
+               "  --%s MASK: hexadecimal bitmask of crypto devices to 
configure\n",
+               prgname, SOCKET_FILE_KEYWORD, CRYPTODEV_MASK_KEYWORD);
+}
+
+static int
+vhost_crypto_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char *prgname = argv[0];
+       char **argvopt;
+       int option_index;
+       struct option lgopts[] = {
+                       {SOCKET_FILE_KEYWORD, required_argument, 0, 0},
+                       {CRYPTODEV_MASK_KEYWORD, required_argument, 0, 0},
+                       {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "s:",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               case 0:
+                       if (strcmp(lgopts[option_index].name,
+                                       SOCKET_FILE_KEYWORD) == 0) {
+                               ret = parse_socket_arg(optarg);
+                               if (ret < 0) {
+                                       vhost_crypto_usage(prgname);
+                                       return ret;
+                               }
+                       } else if (strcmp(lgopts[option_index].name,
+                                       CRYPTODEV_MASK_KEYWORD) == 0) {
+                               ret = parse_cryptodev_mask(optarg);
+                               if (ret < 0) {
+                                       vhost_crypto_usage(prgname);
+                                       return ret;
+                               }
+                       } else {
+                               vhost_crypto_usage(prgname);
+                               return -EINVAL;
+                       }
+                       break;
+               default:
+                       return -1;
+               }
+       }
+
+       if (options.nb_sockets == 0) {
+               options.socket_files[0] = strdup(DEF_SOCKET_FILE);
+               options.nb_sockets = 1;
+               RTE_LOG(INFO, USER1,
+                               "VHOST-CRYPTO: use default socket file %s\n",
+                               DEF_SOCKET_FILE);
+       }
+
+       if (options.cryptodev_mask == 0)
+               options.cryptodev_mask = (1 << options.nb_sockets) - 1;
+
+       return 0;
+}
+
+static int
+new_device(int vid)
+{
+       char path[PATH_MAX];
+       uint32_t i;
+       int ret;
+
+       ret = rte_vhost_get_ifname(vid, path, PATH_MAX);
+       if (ret) {
+               RTE_LOG(ERR, USER1, "Cannot find matched socket\n");
+               return ret;
+       }
+
+       ret = rte_vhost_crypto_create(vid, info.cid, info.sess_pool,
+                       rte_lcore_to_socket_id(info.lcore_id));
+       if (ret) {
+               RTE_LOG(ERR, USER1, "Cannot create vhost crypto\n");
+               return ret;
+       }
+
+       for (i = 0; i < options.nb_sockets; i++) {
+               if (strcmp(path, options.socket_files[i]) == 0)
+                       break;
+       }
+
+       if (i == options.nb_sockets) {
+               RTE_LOG(ERR, USER1, "Cannot find socket file from list\n");
+               return -1;
+       }
+
+       info.vids[i] = vid;
+
+       info.initialized = 1;
+
+       RTE_LOG(INFO, USER1, "New Vhost-crypto Device %s, Device ID %d\n", path,
+                       vid);
+       return 0;
+}
+
+static void
+destroy_device(int vid)
+{
+       info.initialized = 0;
+
+       rte_vhost_crypto_free(vid);
+
+       RTE_LOG(INFO, USER1, "Vhost Crypto Device %i Removed\n", vid);
+}
+
+static const struct vhost_device_ops virtio_crypto_device_ops = {
+       .new_device =  new_device,
+       .destroy_device = destroy_device,
+};
+
+
+static void clrscr(void)
+{
+       system("@cls||clear");
+}
+
+static int
+vhost_crypto_stats_worker(__rte_unused void *arg)
+{
+       uint64_t t_start, t_end, hz = rte_get_tsc_hz();
+       struct rte_cryptodev_stats stats;
+       uint64_t threshold = hz;
+       double throughput;
+
+       RTE_LOG(INFO, USER1, "VHOST-CRYPTO: start stats lcore\n");
+
+       t_start = rte_rdtsc_precise();
+       while (1) {
+               uint64_t interval;
+               uint32_t timeout;
+
+               if (unlikely(info.initialized == 0))
+                       continue;
+
+               t_end = rte_rdtsc_precise();
+               interval = t_end - t_start;
+               timeout = (interval > threshold) ? 1 : 0;
+               if (timeout)
+                       t_start = t_end;
+
+               if (unlikely(timeout)) {
+                       clrscr();
+
+                       rte_cryptodev_stats_get(info.cid, &stats);
+
+                       if (unlikely(stats.enqueued_count + stats.dequeued_count
+                                       + stats.enqueue_err_count +
+                                       stats.dequeue_err_count == 0))
+                               continue;
+
+                       throughput = ((double)stats.dequeued_count / interval)
+                                       * hz;
+
+                       printf("%12s%12s%12s%12s%12s%12s\n", "cryptodevid",
+                                       "enq reqs", "deq reqs", "enq err",
+                                       "deq err", "ops/sec");
+                       printf("%12u%12"PRIu64"%12"PRIu64"%12"PRIu64"%12"
+                                       PRIu64"%12.4f\n", info.cid,
+                                       stats.enqueued_count,
+                                       stats.dequeued_count,
+                                       stats.enqueue_err_count,
+                                       stats.dequeue_err_count, throughput);
+                       rte_cryptodev_stats_reset(info.cid);
+               }
+       }
+
+       return 0;
+}
+
+static __rte_always_inline int
+enqueue_and_realloc_cops(uint16_t q_id, struct rte_crypto_op **ops,
+               uint16_t nb_ops)
+{
+       uint16_t enqd;
+
+       enqd = rte_cryptodev_enqueue_burst(info.cid, q_id, ops,
+                       nb_ops);
+       if (unlikely(enqd < nb_ops)) {
+               RTE_LOG(ERR, USER1, "Cannot enqueue enough cops\n");
+               return -1;
+       }
+
+       if (unlikely(rte_crypto_op_bulk_alloc(info.cop_pool,
+                       RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops, enqd) < enqd)) {
+               RTE_LOG(ERR, USER1, "Failed to realloc cops\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static __rte_always_inline int
+dequeue_and_free_cops(uint16_t q_id, struct rte_crypto_op **ops,
+               uint32_t *inflight)
+{
+       uint16_t nb_ops = *inflight < MAX_PKT_BURST ? *inflight : MAX_PKT_BURST;
+       uint16_t deqd;
+
+       deqd = rte_cryptodev_dequeue_burst(info.cid, q_id, ops, nb_ops);
+
+       if (!deqd)
+               return 0;
+
+       rte_vhost_crypto_finalize_requests(ops, q_id, deqd);
+
+       rte_mempool_put_bulk(info.cop_pool, (void **)ops, deqd);
+
+       *inflight -= deqd;
+
+       return 0;
+}
+
+static int
+vhost_crypto_worker(__rte_unused void *arg)
+{
+       struct rte_crypto_op *ops[NB_VIRTIO_QUEUES][MAX_PKT_BURST + 1];
+       struct rte_crypto_op *ops_deq[NB_VIRTIO_QUEUES][MAX_PKT_BURST + 1];
+       uint64_t t_start, t_end, hz = rte_get_tsc_hz();
+       uint64_t threshold = hz / 1000;
+       uint32_t nb_inflight_ops[NB_VIRTIO_QUEUES];
+       uint32_t lcore_id = rte_lcore_id();
+       uint32_t i, j;
+       uint16_t fetched[NB_VIRTIO_QUEUES];
+
+       int ret = 0;
+
+       RTE_LOG(INFO, USER1, "Procesing on Core %u started\n", lcore_id);
+
+       for (i = 0; i < NB_VIRTIO_QUEUES; i++) {
+               if (rte_crypto_op_bulk_alloc(info.cop_pool,
+                               RTE_CRYPTO_OP_TYPE_SYMMETRIC, ops[i],
+                               MAX_PKT_BURST) < MAX_PKT_BURST) {
+                       RTE_LOG(ERR, USER1, "Failed to alloc cops\n");
+                       ret = -1;
+                       goto exit;
+               }
+
+               fetched[i] = 0;
+               nb_inflight_ops[i] = 0;
+       }
+
+       t_start = rte_rdtsc_precise();
+       while (1) {
+               uint64_t interval;
+               uint32_t timeout;
+
+               if (unlikely(info.initialized == 0))
+                       continue;
+
+               t_end = rte_rdtsc_precise();
+               interval = t_end - t_start;
+               timeout = (interval > threshold) ? 1 : 0;
+               if (timeout)
+                       t_start = t_end;
+
+               if (unlikely(timeout)) {
+                       for (j = 0; j < NB_VIRTIO_QUEUES; j++) {
+                               if (unlikely(enqueue_and_realloc_cops(j,
+                                               ops[j], fetched[j]) < 0))
+                                       goto exit;
+
+                               nb_inflight_ops[j] += fetched[j];
+                               fetched[j] = 0;
+
+                               if (unlikely(dequeue_and_free_cops(j,
+                                               ops_deq[j], &nb_inflight_ops[j])
+                                               < 0))
+                                       goto exit;
+                       }
+               }
+
+               for (j = 0; j < NB_VIRTIO_QUEUES; j++) {
+                       for (i = 0; i < options.nb_sockets; i++) {
+                               uint16_t to_fetch = MAX_PKT_BURST -
+                                               fetched[j];
+                               uint16_t available = NB_CACHE_OBJS -
+                                               nb_inflight_ops[j];
+
+                               fetched[j] += rte_vhost_crypto_fetch_requests(
+                                       info.vids[i], j, &ops[j][fetched[j]],
+                                       RTE_MIN(to_fetch, available));
+
+                               if (fetched[j] >= MAX_PKT_BURST) {
+                                       if (unlikely(enqueue_and_realloc_cops(j,
+                                                       ops[j], fetched[j])
+                                                       < 0))
+                                               goto exit;
+
+                                       nb_inflight_ops[j] += fetched[j];
+                                       fetched[j] = 0;
+                               }
+                       }
+
+                       if (nb_inflight_ops[j] >= MAX_PKT_BURST) {
+                               if (unlikely(dequeue_and_free_cops(j,
+                                               ops_deq[j],
+                                               &nb_inflight_ops[j]) < 0))
+                                       goto exit;
+                       }
+               }
+       }
+exit:
+       return ret;
+}
+
+
+static void
+unregister_drivers(int socket_num)
+{
+       int ret;
+
+       ret = rte_vhost_driver_unregister(options.socket_files[socket_num]);
+       if (ret != 0)
+               RTE_LOG(ERR, USER1,
+                       "Fail to unregister vhost driver for %s.\n",
+                       options.socket_files[socket_num]);
+}
+
+int
+main(int argc, char *argv[])
+{
+       struct rte_cryptodev_qp_conf qp_conf = {NB_CRYPTO_DESCRIPTORS};
+       struct rte_cryptodev_config config;
+       struct rte_cryptodev_info dev_info;
+       uint32_t cryptodev_id;
+       uint32_t worker_lcore;
+       uint32_t stats_lcore;
+       char name[128];
+       uint32_t i = 0;
+       int ret;
+
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               return -1;
+       argc -= ret;
+       argv += ret;
+
+       ret = vhost_crypto_parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Failed to parse arguments!\n");
+
+       worker_lcore = rte_get_next_lcore(0, 1, 0);
+       stats_lcore = rte_get_next_lcore(worker_lcore, 1, 0);
+       if (worker_lcore == RTE_MAX_LCORE || stats_lcore == RTE_MAX_LCORE)
+               rte_exit(EXIT_FAILURE, "Not enough lcore\n");
+
+       for (cryptodev_id = 0; cryptodev_id < rte_cryptodev_count();
+                       cryptodev_id++) {
+               uint64_t id = 1ULL << cryptodev_id;
+
+               if (id & options.cryptodev_mask)
+                       break;
+       }
+
+       if (cryptodev_id == rte_cryptodev_count()) {
+               RTE_LOG(ERR, USER1, "Not enough Cryptodevs\n");
+               goto error_exit;
+       }
+
+       rte_cryptodev_info_get(cryptodev_id, &dev_info);
+       if (dev_info.max_nb_queue_pairs < NB_VIRTIO_QUEUES) {
+               RTE_LOG(ERR, USER1, "Number of queues cannot over %u",
+                               dev_info.max_nb_queue_pairs);
+               goto error_exit;
+       }
+
+       config.nb_queue_pairs = NB_VIRTIO_QUEUES;
+       config.socket_id = rte_lcore_to_socket_id(worker_lcore);
+
+       ret = rte_cryptodev_configure(info.cid, &config);
+       if (ret < 0) {
+               RTE_LOG(ERR, USER1, "Failed to configure cryptodev %u",
+                               info.cid);
+               goto error_exit;
+       }
+
+       snprintf(name, 127, "SESS_POOL_%u", worker_lcore);
+       info.sess_pool = rte_mempool_create(name, SESSION_MAP_ENTRIES,
+                       rte_cryptodev_get_private_session_size(
+                       cryptodev_id), 64, 0, NULL, NULL, NULL, NULL,
+                       rte_lcore_to_socket_id(worker_lcore), 0);
+       if (!info.sess_pool) {
+               RTE_LOG(ERR, USER1, "Failed to create mempool");
+               goto error_exit;
+       }
+
+       snprintf(name, 127, "COPPOOL_%u", worker_lcore);
+       info.cop_pool = rte_vhost_crypto_create_cop_pool(name,
+                       RTE_CRYPTO_OP_TYPE_SYMMETRIC, NB_MEMPOOL_OBJS,
+                       NB_CACHE_OBJS, rte_lcore_to_socket_id(worker_lcore));
+       if (!info.cop_pool) {
+               RTE_LOG(ERR, USER1, "Lcore %u failed to create crypto pool",
+                               worker_lcore);
+               ret = -1;
+               goto error_exit;
+       }
+
+       for (i = 0; i < NB_VIRTIO_QUEUES; i++) {
+               ret = rte_cryptodev_queue_pair_setup(info.cid, i, &qp_conf,
+                               rte_lcore_to_socket_id(worker_lcore),
+                               info.sess_pool);
+               if (ret < 0) {
+                       RTE_LOG(ERR, USER1, "Failed to configure qp %u\n",
+                                       info.cid);
+                       goto error_exit;
+               }
+       }
+
+       info.cid = cryptodev_id;
+       info.lcore_id = worker_lcore;
+
+       if (rte_eal_remote_launch(vhost_crypto_worker, NULL, worker_lcore)
+                       < 0) {
+               RTE_LOG(ERR, USER1, "Failed to start worker lcore");
+               goto error_exit;
+       }
+       if (rte_eal_remote_launch(vhost_crypto_stats_worker, NULL, stats_lcore)
+                       < 0) {
+               RTE_LOG(ERR, USER1, "Failed to start stats lcore");
+               goto error_exit;
+       }
+
+       for (i = 0; i < options.nb_sockets; i++) {
+               if (rte_vhost_driver_register(options.socket_files[i], 0) < 0) {
+                       RTE_LOG(ERR, USER1, "socket %s already exists\n",
+                                       options.socket_files[i]);
+                       goto error_exit;
+               }
+
+               rte_vhost_driver_callback_register(options.socket_files[i],
+                               &virtio_crypto_device_ops);
+
+               if (rte_vhost_driver_start(options.socket_files[i]) < 0) {
+                       RTE_LOG(ERR, USER1, "failed to start vhost driver.\n");
+                       goto error_exit;
+               }
+       }
+
+       RTE_LCORE_FOREACH(worker_lcore)
+               rte_eal_wait_lcore(worker_lcore);
+
+       rte_mempool_free(info.sess_pool);
+       rte_mempool_free(info.cop_pool);
+
+       return 0;
+
+error_exit:
+       for (i = 0; i < options.nb_sockets; i++)
+               unregister_drivers(i);
+
+       rte_mempool_free(info.cop_pool);
+       rte_mempool_free(info.sess_pool);
+
+       return -1;
+}
-- 
2.9.5

Reply via email to