On Tue, Feb 06, 2024 at 09:29:29AM +0200, Manos Pitsidianakis wrote:
> On Mon, 05 Feb 2024 19:26, Stefan Hajnoczi <stefa...@redhat.com> wrote:
> > Hanna Czenczek <hre...@redhat.com> noticed that the safety of
> > `vq_aio_context[vq->value] = ctx;` with user-defined vq->value inputs is
> > not obvious.
> > 
> > The code is structured in validate() + apply() steps so input validation
> > is there, but it happens way earlier and there is nothing that
> > guarantees apply() can only be called with validated inputs.
> > 
> > This patch moves the validate() call inside the apply() function so
> > validation is guaranteed. I also added the bounds checking assertion
> > that Hanna suggested.
> > 
> > Signed-off-by: Stefan Hajnoczi <stefa...@redhat.com>
> > ---
> > hw/block/virtio-blk.c | 192 +++++++++++++++++++++++-------------------
> > 1 file changed, 107 insertions(+), 85 deletions(-)
> > 
> > diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> > index 227d83569f..e8b37fd5f4 100644
> > --- a/hw/block/virtio-blk.c
> > +++ b/hw/block/virtio-blk.c
> > @@ -1485,6 +1485,72 @@ static int virtio_blk_load_device(VirtIODevice 
> > *vdev, QEMUFile *f,
> >     return 0;
> > }
> > 
> > +static void virtio_resize_cb(void *opaque)
> > +{
> > +    VirtIODevice *vdev = opaque;
> > +
> > +    assert(qemu_get_current_aio_context() == qemu_get_aio_context());
> > +    virtio_notify_config(vdev);
> > +}
> > +
> > +static void virtio_blk_resize(void *opaque)
> > +{
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
> > +
> > +    /*
> > +     * virtio_notify_config() needs to acquire the BQL,
> > +     * so it can't be called from an iothread. Instead, schedule
> > +     * it to be run in the main context BH.
> > +     */
> > +    aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, 
> > vdev);
> > +}
> > +
> > +static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
> > +{
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > +
> > +    for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > +        VirtQueue *vq = virtio_get_queue(vdev, i);
> > +        virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
> > +    }
> > +}
> > +
> > +static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
> > +{
> > +    VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > +
> > +    for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > +        VirtQueue *vq = virtio_get_queue(vdev, i);
> > +        virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
> > +    }
> > +}
> > +
> > +/* Suspend virtqueue ioeventfd processing during drain */
> > +static void virtio_blk_drained_begin(void *opaque)
> > +{
> > +    VirtIOBlock *s = opaque;
> > +
> > +    if (s->ioeventfd_started) {
> > +        virtio_blk_ioeventfd_detach(s);
> > +    }
> > +}
> > +
> > +/* Resume virtqueue ioeventfd processing after drain */
> > +static void virtio_blk_drained_end(void *opaque)
> > +{
> > +    VirtIOBlock *s = opaque;
> > +
> > +    if (s->ioeventfd_started) {
> > +        virtio_blk_ioeventfd_attach(s);
> > +    }
> > +}
> > +
> > +static const BlockDevOps virtio_block_ops = {
> > +    .resize_cb     = virtio_blk_resize,
> > +    .drained_begin = virtio_blk_drained_begin,
> > +    .drained_end   = virtio_blk_drained_end,
> > +};
> > +
> > static bool
> > validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
> >         uint16_t num_queues, Error **errp)
> > @@ -1547,81 +1613,33 @@ 
> > validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
> >     return true;
> > }
> > 
> > -static void virtio_resize_cb(void *opaque)
> > -{
> > -    VirtIODevice *vdev = opaque;
> > -
> > -    assert(qemu_get_current_aio_context() == qemu_get_aio_context());
> > -    virtio_notify_config(vdev);
> > -}
> > -
> > -static void virtio_blk_resize(void *opaque)
> > -{
> > -    VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
> > -
> > -    /*
> > -     * virtio_notify_config() needs to acquire the BQL,
> > -     * so it can't be called from an iothread. Instead, schedule
> > -     * it to be run in the main context BH.
> > -     */
> > -    aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, 
> > vdev);
> > -}
> > -
> > -static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
> > -{
> > -    VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > -
> > -    for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > -        VirtQueue *vq = virtio_get_queue(vdev, i);
> > -        virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
> > -    }
> > -}
> > -
> > -static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
> > -{
> > -    VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > -
> > -    for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > -        VirtQueue *vq = virtio_get_queue(vdev, i);
> > -        virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
> > -    }
> > -}
> > -
> > -/* Suspend virtqueue ioeventfd processing during drain */
> > -static void virtio_blk_drained_begin(void *opaque)
> > -{
> > -    VirtIOBlock *s = opaque;
> > -
> > -    if (s->ioeventfd_started) {
> > -        virtio_blk_ioeventfd_detach(s);
> > -    }
> > -}
> > -
> > -/* Resume virtqueue ioeventfd processing after drain */
> > -static void virtio_blk_drained_end(void *opaque)
> > -{
> > -    VirtIOBlock *s = opaque;
> > -
> > -    if (s->ioeventfd_started) {
> > -        virtio_blk_ioeventfd_attach(s);
> > -    }
> > -}
> > -
> > -static const BlockDevOps virtio_block_ops = {
> > -    .resize_cb     = virtio_blk_resize,
> > -    .drained_begin = virtio_blk_drained_begin,
> > -    .drained_end   = virtio_blk_drained_end,
> > -};
> > -
> > -/* Generate vq:AioContext mappings from a validated iothread-vq-mapping 
> > list */
> > -static void
> > -apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
> > -                 AioContext **vq_aio_context, uint16_t num_queues)
> > +/**
> > + * apply_iothread_vq_mapping:
> > + * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
> > + * @vq_aio_context: The array of AioContext pointers to fill in.
> > + * @num_queues: The length of @vq_aio_context.
> > + * @errp: If an error occurs, a pointer to the area to store the error.
> > + *
> > + * Fill in the AioContext for each virtqueue in the @vq_aio_context array 
> > given
> > + * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
> > + *
> > + * Returns: %true on success, %false on failure.
> > + **/
> > +static bool apply_iothread_vq_mapping(
> > +        IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
> > +        AioContext **vq_aio_context,
> > +        uint16_t num_queues,
> > +        Error **errp)
> > {
> >     IOThreadVirtQueueMappingList *node;
> >     size_t num_iothreads = 0;
> >     size_t cur_iothread = 0;
> > 
> > +    if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
> > +                                           num_queues, errp)) {
> > +        return false;
> > +    }
> > +
> >     for (node = iothread_vq_mapping_list; node; node = node->next) {
> >         num_iothreads++;
> >     }
> > @@ -1638,6 +1656,7 @@ apply_vq_mapping(IOThreadVirtQueueMappingList 
> > *iothread_vq_mapping_list,
> > 
> >             /* Explicit vq:IOThread assignment */
> >             for (vq = node->value->vqs; vq; vq = vq->next) {
> > +                assert(vq->value < num_queues);
> >                 vq_aio_context[vq->value] = ctx;
> >             }
> >         } else {
> > @@ -1650,6 +1669,8 @@ apply_vq_mapping(IOThreadVirtQueueMappingList 
> > *iothread_vq_mapping_list,
> > 
> >         cur_iothread++;
> >     }
> > +
> > +    return true;
> > }
> > 
> > /* Context: BQL held */
> > @@ -1660,6 +1681,14 @@ static bool 
> > virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
> >     BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
> >     VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
> > 
> > +    if (conf->iothread && conf->iothread_vq_mapping_list) {
> > +        if (conf->iothread) {
> > +            error_setg(errp, "iothread and iothread-vq-mapping properties "
> > +                             "cannot be set at the same time");
> > +            return false;
> > +        }
> > +    }
> > +
> >     if (conf->iothread || conf->iothread_vq_mapping_list) {
> >         if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
> >             error_setg(errp,
> > @@ -1685,8 +1714,14 @@ static bool 
> > virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
> >     s->vq_aio_context = g_new(AioContext *, conf->num_queues);
> > 
> >     if (conf->iothread_vq_mapping_list) {
> > -        apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
> > -                         conf->num_queues);
> > +        if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
> > +                                       s->vq_aio_context,
> > +                                       conf->num_queues,
> > +                                       errp)) {
> > +            g_free(s->vq_aio_context);
> > +            s->vq_aio_context = NULL;
> > +            return false;
> > +        }
> >     } else if (conf->iothread) {
> >         AioContext *ctx = iothread_get_aio_context(conf->iothread);
> >         for (unsigned i = 0; i < conf->num_queues; i++) {
> > @@ -1996,19 +2031,6 @@ static void virtio_blk_device_realize(DeviceState 
> > *dev, Error **errp)
> >         return;
> >     }
> > 
> > -    if (conf->iothread_vq_mapping_list) {
> > -        if (conf->iothread) {
> > -            error_setg(errp, "iothread and iothread-vq-mapping properties "
> > -                             "cannot be set at the same time");
> > -            return;
> > -        }
> > -
> > -        if 
> > (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
> > -                                               conf->num_queues, errp)) {
> > -            return;
> > -        }
> > -    }
> > -
> >     s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
> >                                             s->host_features);
> >     virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
> > -- 
> > 2.43.0
> > 
> > 
> 
> 
> virtio_block_ops and methods are moved around without changes in the diff,
> is that on purpose? If no the patch and history would be less noisy.

Yes, it's because I moved the validate() function immediately before the
apply() function. Previously they were far apart. I guess git's diff
algorithm optimized for the shortest patch rather than the most readable
patch.

> Regardless:
> 
> Reviewed-by: Manos Pitsidianakis <manos.pitsidiana...@linaro.org>
> 

Attachment: signature.asc
Description: PGP signature

Reply via email to