Mathieu Poirier <mathieu.poir...@linaro.org> writes:

> This patch provides the vhost-user backend implementation to work
> in tandem with the vhost-user-rng implementation of the QEMU VMM.
>
> It uses the vhost-user API so that other VMM can re-use the interface
> without having to write the driver again.
>
> Signed-off-by: Mathieu Poirier <mathieu.poir...@linaro.org>
> ---
>  tools/meson.build                        |   8 +
>  tools/vhost-user-rng/50-qemu-rng.json.in |   5 +
>  tools/vhost-user-rng/main.c              | 403 +++++++++++++++++++++++
>  tools/vhost-user-rng/meson.build         |  10 +
>  4 files changed, 426 insertions(+)
>  create mode 100644 tools/vhost-user-rng/50-qemu-rng.json.in
>  create mode 100644 tools/vhost-user-rng/main.c
>  create mode 100644 tools/vhost-user-rng/meson.build
>
> diff --git a/tools/meson.build b/tools/meson.build
> index 3e5a0abfa29f..66b0a11fbb45 100644
> --- a/tools/meson.build
> +++ b/tools/meson.build
> @@ -24,3 +24,11 @@ endif
>  if have_virtiofsd
>    subdir('virtiofsd')
>  endif
> +
> +have_virtiorng = (have_system and
> +    have_tools and
> +    'CONFIG_LINUX' in config_host)
> +
> +if have_virtiorng
> +  subdir('vhost-user-rng')
> +endif
> diff --git a/tools/vhost-user-rng/50-qemu-rng.json.in 
> b/tools/vhost-user-rng/50-qemu-rng.json.in
> new file mode 100644
> index 000000000000..9186c3c6fe1d
> --- /dev/null
> +++ b/tools/vhost-user-rng/50-qemu-rng.json.in
> @@ -0,0 +1,5 @@
> +{
> +  "description": "QEMU vhost-user-rng",
> +  "type": "bridge",
> +  "binary": "@libexecdir@/vhost-user-rng"
> +}
> diff --git a/tools/vhost-user-rng/main.c b/tools/vhost-user-rng/main.c
> new file mode 100644
> index 000000000000..c3b8f6922757
> --- /dev/null
> +++ b/tools/vhost-user-rng/main.c
> @@ -0,0 +1,403 @@
> +/*
> + * VIRTIO RNG Emulation via vhost-user
> + *
> + * Copyright (c) 2021 Mathieu Poirier <mathieu.poir...@linaro.org>
> + *
> + * SPDX-License-Identifier: GPL-2.0-or-later
> + */
> +
> +#define G_LOG_DOMAIN "vhost-user-rng"
> +#define G_LOG_USE_STRUCTURED 1
> +
> +#include <glib.h>
> +#include <gio/gio.h>
> +#include <gio/gunixsocketaddress.h>
> +#include <glib-unix.h>
> +#include <glib/gstdio.h>
> +#include <pthread.h>
> +#include <signal.h>
> +#include <stdio.h>
> +#include <stdbool.h>
> +#include <string.h>
> +#include <inttypes.h>
> +#include <fcntl.h>
> +#include <sys/ioctl.h>
> +#include <sys/types.h>
> +#include <sys/stat.h>
> +#include <sys/mman.h>
> +#include <time.h>
> +#include <unistd.h>
> +#include <endian.h>
> +#include <assert.h>
> +
> +#include "qemu/cutils.h"
> +#include "subprojects/libvhost-user/libvhost-user-glib.h"
> +#include "subprojects/libvhost-user/libvhost-user.h"
> +
> +#ifndef container_of
> +#define container_of(ptr, type, member) ({                      \
> +        const typeof(((type *) 0)->member) * __mptr = (ptr);     \
> +        (type *) ((char *) __mptr - offsetof(type, member)); })
> +#endif
> +
> +typedef struct {
> +    VugDev dev;
> +    struct itimerspec ts;
> +    timer_t rate_limit_timer;
> +    pthread_mutex_t rng_mutex;
> +    pthread_cond_t rng_cond;

I'm confused by the need for a mutex in a single-threaded application.

> +    int64_t quota_remaining;
> +    bool activate_timer;
> +    GMainLoop *loop;
> +} VuRNG;
> +
> +static gboolean print_cap, verbose;
> +static gchar *source_path, *socket_path;
> +static gint source_fd, socket_fd = -1;
> +
> +/* Defaults tailored on virtio-rng.c */
> +static uint32_t period_ms = 1 << 16;
> +static uint64_t max_bytes = INT64_MAX;
> +
> +static void check_rate_limit(union sigval sv)
> +{
> +    VuRNG *rng = sv.sival_ptr;
> +    bool wakeup = false;
> +
> +    pthread_mutex_lock(&rng->rng_mutex);
> +    /*
> +     * The timer has expired and the guest has used all available
> +     * entropy, which means function vu_rng_handle_request() is waiting
> +     * on us.  As such wake it up once we're done here.
> +     */
> +    if (rng->quota_remaining == 0) {
> +        wakeup = true;
> +    }
> +
> +    /*
> +     * Reset the entropy available to the guest and tell function
> +     * vu_rng_handle_requests() to start the timer before using it.
> +     */
> +    rng->quota_remaining = max_bytes;
> +    rng->activate_timer = true;
> +    pthread_mutex_unlock(&rng->rng_mutex);
> +
> +    if (wakeup) {
> +        pthread_cond_signal(&rng->rng_cond);
> +    }
> +}
> +
> +static void setup_timer(VuRNG *rng)
> +{
> +    struct sigevent sev;
> +    int ret;
> +
> +    memset(&rng->ts, 0, sizeof(struct itimerspec));
> +    rng->ts.it_value.tv_sec = period_ms / 1000;
> +    rng->ts.it_value.tv_nsec = (period_ms % 1000) * 1000000;
> +
> +    /*
> +     * Call function check_rate_limit() as if it was the start of
> +     * a new thread when the timer expires.
> +     */
> +    sev.sigev_notify = SIGEV_THREAD;
> +    sev.sigev_notify_function = check_rate_limit;
> +    sev.sigev_value.sival_ptr = rng;
> +    /* Needs to be NULL if defaults attributes are to be used. */
> +    sev.sigev_notify_attributes = NULL;
> +    ret = timer_create(CLOCK_MONOTONIC, &sev, &rng->rate_limit_timer);
> +    if (ret < 0) {
> +        fprintf(stderr, "timer_create() failed\n");
> +    }

Ahh I see why now. I think you could avoid this by using glib's own
internal g_timeout_add() function. This would then create a timer which
would call it's callback periodically (if it returns true to persist the
GSource). As the whole execution is effectively event driven you would
avoid the need for locking.

> +
> +}
> +
> +
> +/* Virtio helpers */
> +static uint64_t rng_get_features(VuDev *dev)
> +{
> +    if (verbose) {
> +        g_info("%s: replying", __func__);
> +    }
> +    return 0;
> +}
> +
> +static void rng_set_features(VuDev *dev, uint64_t features)
> +{
> +    if (verbose && features) {
> +        g_autoptr(GString) s = g_string_new("Requested un-handled feature");
> +        g_string_append_printf(s, " 0x%" PRIx64 "", features);
> +        g_info("%s: %s", __func__, s->str);
> +    }
> +}
> +
> +static void vu_rng_handle_requests(VuDev *dev, int qidx)
> +{
> +    VuRNG *rng = container_of(dev, VuRNG, dev.parent);
> +    VuVirtq *vq = vu_get_queue(dev, qidx);
> +    VuVirtqElement *elem;
> +    size_t to_read;
> +    int len, ret;
> +
> +    for (;;) {
> +        /* Get element in the vhost virtqueue */
> +        elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
> +        if (!elem) {
> +            break;
> +        }
> +
> +        /* Get the amount of entropy to read from the vhost server */
> +        to_read = elem->in_sg[0].iov_len;
> +
> +        pthread_mutex_lock(&rng->rng_mutex);
> +
> +        /*
> +         * We have consumed all entropy available for this time slice.
> +         * Wait for the timer (check_rate_limit()) to tell us about the
> +         * start of a new time slice.
> +         */
> +        if (rng->quota_remaining == 0) {
> +            pthread_cond_wait(&rng->rng_cond, &rng->rng_mutex);
> +        }

Hmm this complicates things. Ideally you wouldn't want to block here on
processing the virtqueue. This will end up block the guest. I'll need to
think about this.

-- 
Alex Bennée

Reply via email to