qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Philippe Mathieu-Daudé" <philmd@redhat.com>
To: Stefano Garzarella <sgarzare@redhat.com>
Cc: Fam Zheng <fam@euphon.net>, Kevin Wolf <kwolf@redhat.com>,
	qemu-block@nongnu.org, qemu-devel@nongnu.org,
	Max Reitz <mreitz@redhat.com>,
	Stefan Hajnoczi <stefanha@redhat.com>
Subject: Re: [PATCH v5 15/15] block/nvme: Use an array of EventNotifier
Date: Fri, 21 Aug 2020 15:09:13 +0200	[thread overview]
Message-ID: <fd1386d6-7663-04a0-f641-c44a48f821d4@redhat.com> (raw)
In-Reply-To: <20200821102937.bartqudtcfesdmzr@steredhat>

On 8/21/20 12:29 PM, Stefano Garzarella wrote:
> On Thu, Aug 20, 2020 at 06:59:01PM +0200, Philippe Mathieu-Daudé wrote:
>> In preparation of using multiple IRQ (thus multiple eventfds)
>> make BDRVNVMeState::irq_notifier an array (for now of a single
>> element, the admin queue notifier).
>>
>> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
>> Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
>> ---
>>  block/nvme.c | 31 +++++++++++++++++++++----------
>>  1 file changed, 21 insertions(+), 10 deletions(-)
>>
>> diff --git a/block/nvme.c b/block/nvme.c
>> index a61e86a83eb..fe8a40b7ede 100644
>> --- a/block/nvme.c
>> +++ b/block/nvme.c
>> @@ -106,6 +106,12 @@ QEMU_BUILD_BUG_ON(offsetof(NVMeRegs, doorbells) != 0x1000);
>>  #define INDEX_ADMIN     0
>>  #define INDEX_IO(n)     (1 + n)
>>  
>> +/* This driver shares a single MSIX IRQ for the admin and I/O queues */
>> +enum {
>> +    MSIX_SHARED_IRQ_IDX = 0,
>> +    MSIX_IRQ_COUNT = 1
>> +};
>> +
>>  struct BDRVNVMeState {
>>      AioContext *aio_context;
>>      QEMUVFIOState *vfio;
>> @@ -120,7 +126,7 @@ struct BDRVNVMeState {
>>      /* How many uint32_t elements does each doorbell entry take. */
>>      size_t doorbell_scale;
>>      bool write_cache_supported;
>> -    EventNotifier irq_notifier;
>> +    EventNotifier irq_notifier[MSIX_IRQ_COUNT];
>>  
>>      uint64_t nsze; /* Namespace size reported by identify command */
>>      int nsid;      /* The namespace id to read/write data. */
>> @@ -631,7 +637,8 @@ static bool nvme_poll_queues(BDRVNVMeState *s)
>>  
>>  static void nvme_handle_event(EventNotifier *n)
>>  {
>> -    BDRVNVMeState *s = container_of(n, BDRVNVMeState, irq_notifier);
>> +    BDRVNVMeState *s = container_of(n, BDRVNVMeState,
>> +                                    irq_notifier[MSIX_SHARED_IRQ_IDX]);
>>  
>>      trace_nvme_handle_event(s);
>>      event_notifier_test_and_clear(n);
>> @@ -683,7 +690,8 @@ out_error:
>>  static bool nvme_poll_cb(void *opaque)
>>  {
>>      EventNotifier *e = opaque;
>> -    BDRVNVMeState *s = container_of(e, BDRVNVMeState, irq_notifier);
>> +    BDRVNVMeState *s = container_of(e, BDRVNVMeState,
>> +                                    irq_notifier[MSIX_SHARED_IRQ_IDX]);
>>  
>>      trace_nvme_poll_cb(s);
>>      return nvme_poll_queues(s);
>> @@ -705,7 +713,7 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
>>      s->device = g_strdup(device);
>>      s->nsid = namespace;
>>      s->aio_context = bdrv_get_aio_context(bs);
>> -    ret = event_notifier_init(&s->irq_notifier, 0);
>> +    ret = event_notifier_init(&s->irq_notifier[MSIX_SHARED_IRQ_IDX], 0);
>>      if (ret) {
>>          error_setg(errp, "Failed to init event notifier");
>>          return ret;
>> @@ -784,12 +792,13 @@ static int nvme_init(BlockDriverState *bs, const char *device, int namespace,
>>          }
>>      }
>>  
>> -    ret = qemu_vfio_pci_init_irq(s->vfio, &s->irq_notifier,
>> +    ret = qemu_vfio_pci_init_irq(s->vfio, s->irq_notifier,
> 
> Maybe we can use '&s->irq_notifier[MSIX_SHARED_IRQ_IDX]' to match the other
> changes.

This makes the following patch in the next series (using multiple
queues) simpler, but if you prefer I don't mind using your suggestion
here, then adding another patch to directly use the array address
(instead of the address of the 1st element in that array). As you
wish :)

> 
>>                                   VFIO_PCI_MSIX_IRQ_INDEX, errp);
>>      if (ret) {
>>          goto out;
>>      }
>> -    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
>> +    aio_set_event_notifier(bdrv_get_aio_context(bs),
>> +                           &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
>>                             false, nvme_handle_event, nvme_poll_cb);
>>  
>>      nvme_identify(bs, namespace, &local_err);
>> @@ -872,9 +881,10 @@ static void nvme_close(BlockDriverState *bs)
>>          nvme_free_queue_pair(s->queues[i]);
>>      }
>>      g_free(s->queues);
>> -    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
>> +    aio_set_event_notifier(bdrv_get_aio_context(bs),
>> +                           &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
>>                             false, NULL, NULL);
>> -    event_notifier_cleanup(&s->irq_notifier);
>> +    event_notifier_cleanup(&s->irq_notifier[MSIX_SHARED_IRQ_IDX]);
>>      qemu_vfio_pci_unmap_bar(s->vfio, 0, (void *)s->regs, 0, NVME_BAR_SIZE);
>>      qemu_vfio_close(s->vfio);
>>  
>> @@ -1381,7 +1391,8 @@ static void nvme_detach_aio_context(BlockDriverState *bs)
>>          q->completion_bh = NULL;
>>      }
>>  
>> -    aio_set_event_notifier(bdrv_get_aio_context(bs), &s->irq_notifier,
>> +    aio_set_event_notifier(bdrv_get_aio_context(bs),
>> +                           &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
>>                             false, NULL, NULL);
>>  }
>>  
>> @@ -1391,7 +1402,7 @@ static void nvme_attach_aio_context(BlockDriverState *bs,
>>      BDRVNVMeState *s = bs->opaque;
>>  
>>      s->aio_context = new_context;
>> -    aio_set_event_notifier(new_context, &s->irq_notifier,
>> +    aio_set_event_notifier(new_context, &s->irq_notifier[MSIX_SHARED_IRQ_IDX],
>>                             false, nvme_handle_event, nvme_poll_cb);
>>  
>>      for (int i = 0; i < s->nr_queues; i++) {
>> -- 
>> 2.26.2
>>
>>
> 



  reply	other threads:[~2020-08-21 13:10 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-20 16:58 [PATCH v5 00/15] block/nvme: Various cleanups required to use multiple queues Philippe Mathieu-Daudé
2020-08-20 16:58 ` [PATCH v5 01/15] block/nvme: Replace magic value by SCALE_MS definition Philippe Mathieu-Daudé
2020-08-21  9:33   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 02/15] block/nvme: Avoid further processing if trace event not enabled Philippe Mathieu-Daudé
2020-08-20 16:58 ` [PATCH v5 03/15] block/nvme: Let nvme_create_queue_pair() fail gracefully Philippe Mathieu-Daudé
2020-08-21  9:44   ` Stefano Garzarella
2020-08-21 13:36     ` Philippe Mathieu-Daudé
2020-08-21 13:54       ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 04/15] block/nvme: Define INDEX macros to ease code review Philippe Mathieu-Daudé
2020-08-21  9:52   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 05/15] block/nvme: Improve error message when IO queue creation failed Philippe Mathieu-Daudé
2020-08-21  9:54   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 06/15] block/nvme: Use common error path in nvme_add_io_queue() Philippe Mathieu-Daudé
2020-08-21  9:55   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 07/15] block/nvme: Rename local variable Philippe Mathieu-Daudé
2020-08-21  9:57   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 08/15] block/nvme: Use union of NvmeIdCtrl / NvmeIdNs structures Philippe Mathieu-Daudé
2020-08-21 10:03   ` Stefano Garzarella
2020-08-21 13:27     ` Philippe Mathieu-Daudé
2020-08-21 13:52       ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 09/15] block/nvme: Replace qemu_try_blockalign0 by qemu_try_blockalign/memset Philippe Mathieu-Daudé
2020-08-21 10:07   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 10/15] block/nvme: Replace qemu_try_blockalign(bs) by qemu_try_memalign(pg_sz) Philippe Mathieu-Daudé
2020-08-21 10:08   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 11/15] block/nvme: Simplify nvme_init_queue() arguments Philippe Mathieu-Daudé
2020-08-21 10:10   ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 12/15] block/nvme: Replace BDRV_POLL_WHILE by AIO_WAIT_WHILE Philippe Mathieu-Daudé
2020-08-21 10:15   ` Stefano Garzarella
2020-08-21 13:15     ` Philippe Mathieu-Daudé
2020-08-21 13:47       ` Stefano Garzarella
2020-08-20 16:58 ` [PATCH v5 13/15] block/nvme: Simplify nvme_create_queue_pair() arguments Philippe Mathieu-Daudé
2020-08-21 10:20   ` Stefano Garzarella
2020-08-20 16:59 ` [PATCH v5 14/15] block/nvme: Extract nvme_poll_queue() Philippe Mathieu-Daudé
2020-08-21 10:23   ` Stefano Garzarella
2020-08-20 16:59 ` [PATCH v5 15/15] block/nvme: Use an array of EventNotifier Philippe Mathieu-Daudé
2020-08-21 10:29   ` Stefano Garzarella
2020-08-21 13:09     ` Philippe Mathieu-Daudé [this message]
2020-08-21 13:46       ` Stefano Garzarella

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=fd1386d6-7663-04a0-f641-c44a48f821d4@redhat.com \
    --to=philmd@redhat.com \
    --cc=fam@euphon.net \
    --cc=kwolf@redhat.com \
    --cc=mreitz@redhat.com \
    --cc=qemu-block@nongnu.org \
    --cc=qemu-devel@nongnu.org \
    --cc=sgarzare@redhat.com \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).