From: Stefan Hajnoczi <stefanha@redhat.com>
To: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
Cc: qemu-block@nongnu.org, qemu-devel@nongnu.org,
Kevin Wolf <kwolf@redhat.com>, Hanna Reitz <hreitz@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Michael Roth <michael.roth@amd.com>,
Markus Armbruster <armbru@redhat.com>
Subject: Re: [PATCH 1/5] virtio-blk: enforce iothread-vq-mapping validation
Date: Tue, 6 Feb 2024 10:18:30 -0500 [thread overview]
Message-ID: <20240206151830.GA66397@fedora> (raw)
In-Reply-To: <8fbve.tkrjtk9401p1@linaro.org>
[-- Attachment #1: Type: text/plain, Size: 10847 bytes --]
On Tue, Feb 06, 2024 at 09:29:29AM +0200, Manos Pitsidianakis wrote:
> On Mon, 05 Feb 2024 19:26, Stefan Hajnoczi <stefanha@redhat.com> wrote:
> > Hanna Czenczek <hreitz@redhat.com> noticed that the safety of
> > `vq_aio_context[vq->value] = ctx;` with user-defined vq->value inputs is
> > not obvious.
> >
> > The code is structured in validate() + apply() steps so input validation
> > is there, but it happens way earlier and there is nothing that
> > guarantees apply() can only be called with validated inputs.
> >
> > This patch moves the validate() call inside the apply() function so
> > validation is guaranteed. I also added the bounds checking assertion
> > that Hanna suggested.
> >
> > Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
> > ---
> > hw/block/virtio-blk.c | 192 +++++++++++++++++++++++-------------------
> > 1 file changed, 107 insertions(+), 85 deletions(-)
> >
> > diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c
> > index 227d83569f..e8b37fd5f4 100644
> > --- a/hw/block/virtio-blk.c
> > +++ b/hw/block/virtio-blk.c
> > @@ -1485,6 +1485,72 @@ static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
> > return 0;
> > }
> >
> > +static void virtio_resize_cb(void *opaque)
> > +{
> > + VirtIODevice *vdev = opaque;
> > +
> > + assert(qemu_get_current_aio_context() == qemu_get_aio_context());
> > + virtio_notify_config(vdev);
> > +}
> > +
> > +static void virtio_blk_resize(void *opaque)
> > +{
> > + VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
> > +
> > + /*
> > + * virtio_notify_config() needs to acquire the BQL,
> > + * so it can't be called from an iothread. Instead, schedule
> > + * it to be run in the main context BH.
> > + */
> > + aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
> > +}
> > +
> > +static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
> > +{
> > + VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > +
> > + for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > + VirtQueue *vq = virtio_get_queue(vdev, i);
> > + virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
> > + }
> > +}
> > +
> > +static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
> > +{
> > + VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > +
> > + for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > + VirtQueue *vq = virtio_get_queue(vdev, i);
> > + virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
> > + }
> > +}
> > +
> > +/* Suspend virtqueue ioeventfd processing during drain */
> > +static void virtio_blk_drained_begin(void *opaque)
> > +{
> > + VirtIOBlock *s = opaque;
> > +
> > + if (s->ioeventfd_started) {
> > + virtio_blk_ioeventfd_detach(s);
> > + }
> > +}
> > +
> > +/* Resume virtqueue ioeventfd processing after drain */
> > +static void virtio_blk_drained_end(void *opaque)
> > +{
> > + VirtIOBlock *s = opaque;
> > +
> > + if (s->ioeventfd_started) {
> > + virtio_blk_ioeventfd_attach(s);
> > + }
> > +}
> > +
> > +static const BlockDevOps virtio_block_ops = {
> > + .resize_cb = virtio_blk_resize,
> > + .drained_begin = virtio_blk_drained_begin,
> > + .drained_end = virtio_blk_drained_end,
> > +};
> > +
> > static bool
> > validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
> > uint16_t num_queues, Error **errp)
> > @@ -1547,81 +1613,33 @@ validate_iothread_vq_mapping_list(IOThreadVirtQueueMappingList *list,
> > return true;
> > }
> >
> > -static void virtio_resize_cb(void *opaque)
> > -{
> > - VirtIODevice *vdev = opaque;
> > -
> > - assert(qemu_get_current_aio_context() == qemu_get_aio_context());
> > - virtio_notify_config(vdev);
> > -}
> > -
> > -static void virtio_blk_resize(void *opaque)
> > -{
> > - VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
> > -
> > - /*
> > - * virtio_notify_config() needs to acquire the BQL,
> > - * so it can't be called from an iothread. Instead, schedule
> > - * it to be run in the main context BH.
> > - */
> > - aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev);
> > -}
> > -
> > -static void virtio_blk_ioeventfd_detach(VirtIOBlock *s)
> > -{
> > - VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > -
> > - for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > - VirtQueue *vq = virtio_get_queue(vdev, i);
> > - virtio_queue_aio_detach_host_notifier(vq, s->vq_aio_context[i]);
> > - }
> > -}
> > -
> > -static void virtio_blk_ioeventfd_attach(VirtIOBlock *s)
> > -{
> > - VirtIODevice *vdev = VIRTIO_DEVICE(s);
> > -
> > - for (uint16_t i = 0; i < s->conf.num_queues; i++) {
> > - VirtQueue *vq = virtio_get_queue(vdev, i);
> > - virtio_queue_aio_attach_host_notifier(vq, s->vq_aio_context[i]);
> > - }
> > -}
> > -
> > -/* Suspend virtqueue ioeventfd processing during drain */
> > -static void virtio_blk_drained_begin(void *opaque)
> > -{
> > - VirtIOBlock *s = opaque;
> > -
> > - if (s->ioeventfd_started) {
> > - virtio_blk_ioeventfd_detach(s);
> > - }
> > -}
> > -
> > -/* Resume virtqueue ioeventfd processing after drain */
> > -static void virtio_blk_drained_end(void *opaque)
> > -{
> > - VirtIOBlock *s = opaque;
> > -
> > - if (s->ioeventfd_started) {
> > - virtio_blk_ioeventfd_attach(s);
> > - }
> > -}
> > -
> > -static const BlockDevOps virtio_block_ops = {
> > - .resize_cb = virtio_blk_resize,
> > - .drained_begin = virtio_blk_drained_begin,
> > - .drained_end = virtio_blk_drained_end,
> > -};
> > -
> > -/* Generate vq:AioContext mappings from a validated iothread-vq-mapping list */
> > -static void
> > -apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
> > - AioContext **vq_aio_context, uint16_t num_queues)
> > +/**
> > + * apply_iothread_vq_mapping:
> > + * @iothread_vq_mapping_list: The mapping of virtqueues to IOThreads.
> > + * @vq_aio_context: The array of AioContext pointers to fill in.
> > + * @num_queues: The length of @vq_aio_context.
> > + * @errp: If an error occurs, a pointer to the area to store the error.
> > + *
> > + * Fill in the AioContext for each virtqueue in the @vq_aio_context array given
> > + * the iothread-vq-mapping parameter in @iothread_vq_mapping_list.
> > + *
> > + * Returns: %true on success, %false on failure.
> > + **/
> > +static bool apply_iothread_vq_mapping(
> > + IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
> > + AioContext **vq_aio_context,
> > + uint16_t num_queues,
> > + Error **errp)
> > {
> > IOThreadVirtQueueMappingList *node;
> > size_t num_iothreads = 0;
> > size_t cur_iothread = 0;
> >
> > + if (!validate_iothread_vq_mapping_list(iothread_vq_mapping_list,
> > + num_queues, errp)) {
> > + return false;
> > + }
> > +
> > for (node = iothread_vq_mapping_list; node; node = node->next) {
> > num_iothreads++;
> > }
> > @@ -1638,6 +1656,7 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
> >
> > /* Explicit vq:IOThread assignment */
> > for (vq = node->value->vqs; vq; vq = vq->next) {
> > + assert(vq->value < num_queues);
> > vq_aio_context[vq->value] = ctx;
> > }
> > } else {
> > @@ -1650,6 +1669,8 @@ apply_vq_mapping(IOThreadVirtQueueMappingList *iothread_vq_mapping_list,
> >
> > cur_iothread++;
> > }
> > +
> > + return true;
> > }
> >
> > /* Context: BQL held */
> > @@ -1660,6 +1681,14 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
> > BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
> > VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
> >
> > + if (conf->iothread && conf->iothread_vq_mapping_list) {
> > + if (conf->iothread) {
> > + error_setg(errp, "iothread and iothread-vq-mapping properties "
> > + "cannot be set at the same time");
> > + return false;
> > + }
> > + }
> > +
> > if (conf->iothread || conf->iothread_vq_mapping_list) {
> > if (!k->set_guest_notifiers || !k->ioeventfd_assign) {
> > error_setg(errp,
> > @@ -1685,8 +1714,14 @@ static bool virtio_blk_vq_aio_context_init(VirtIOBlock *s, Error **errp)
> > s->vq_aio_context = g_new(AioContext *, conf->num_queues);
> >
> > if (conf->iothread_vq_mapping_list) {
> > - apply_vq_mapping(conf->iothread_vq_mapping_list, s->vq_aio_context,
> > - conf->num_queues);
> > + if (!apply_iothread_vq_mapping(conf->iothread_vq_mapping_list,
> > + s->vq_aio_context,
> > + conf->num_queues,
> > + errp)) {
> > + g_free(s->vq_aio_context);
> > + s->vq_aio_context = NULL;
> > + return false;
> > + }
> > } else if (conf->iothread) {
> > AioContext *ctx = iothread_get_aio_context(conf->iothread);
> > for (unsigned i = 0; i < conf->num_queues; i++) {
> > @@ -1996,19 +2031,6 @@ static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
> > return;
> > }
> >
> > - if (conf->iothread_vq_mapping_list) {
> > - if (conf->iothread) {
> > - error_setg(errp, "iothread and iothread-vq-mapping properties "
> > - "cannot be set at the same time");
> > - return;
> > - }
> > -
> > - if (!validate_iothread_vq_mapping_list(conf->iothread_vq_mapping_list,
> > - conf->num_queues, errp)) {
> > - return;
> > - }
> > - }
> > -
> > s->config_size = virtio_get_config_size(&virtio_blk_cfg_size_params,
> > s->host_features);
> > virtio_init(vdev, VIRTIO_ID_BLOCK, s->config_size);
> > --
> > 2.43.0
> >
> >
>
>
> virtio_block_ops and methods are moved around without changes in the diff,
> is that on purpose? If no the patch and history would be less noisy.
Yes, it's because I moved the validate() function immediately before the
apply() function. Previously they were far apart. I guess git's diff
algorithm optimized for the shortest patch rather than the most readable
patch.
> Regardless:
>
> Reviewed-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org>
>
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
next prev parent reply other threads:[~2024-02-06 15:19 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-02-05 17:26 [PATCH 0/5] virtio-blk: iothread-vq-mapping cleanups Stefan Hajnoczi
2024-02-05 17:26 ` [PATCH 1/5] virtio-blk: enforce iothread-vq-mapping validation Stefan Hajnoczi
2024-02-06 7:29 ` Manos Pitsidianakis
2024-02-06 15:18 ` Stefan Hajnoczi [this message]
2024-02-06 15:07 ` Hanna Czenczek
2024-02-05 17:26 ` [PATCH 2/5] virtio-blk: clarify that there is at least 1 virtqueue Stefan Hajnoczi
2024-02-06 7:23 ` Manos Pitsidianakis
2024-02-06 15:08 ` Hanna Czenczek
2024-02-05 17:26 ` [PATCH 3/5] virtio-blk: add vq_rq[] bounds check in virtio_blk_dma_restart_cb() Stefan Hajnoczi
2024-02-06 7:20 ` Manos Pitsidianakis
2024-02-06 15:09 ` Hanna Czenczek
2024-02-05 17:26 ` [PATCH 4/5] virtio-blk: declare VirtIOBlock::rq with a type Stefan Hajnoczi
2024-02-06 7:16 ` Manos Pitsidianakis
2024-02-06 15:10 ` Hanna Czenczek
2024-02-05 17:26 ` [PATCH 5/5] monitor: use aio_co_reschedule_self() Stefan Hajnoczi
2024-02-06 7:28 ` Manos Pitsidianakis
2024-02-06 15:11 ` Hanna Czenczek
2024-02-07 7:04 ` Markus Armbruster
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240206151830.GA66397@fedora \
--to=stefanha@redhat.com \
--cc=armbru@redhat.com \
--cc=hreitz@redhat.com \
--cc=kwolf@redhat.com \
--cc=manos.pitsidianakis@linaro.org \
--cc=michael.roth@amd.com \
--cc=mst@redhat.com \
--cc=qemu-block@nongnu.org \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).