From: "Cédric Le Goater" <clg@redhat.com>
To: John Johnson <john.g.johnson@oracle.com>, qemu-devel@nongnu.org
Subject: Re: [PATCH v1 14/24] vfio-user: get and set IRQs
Date: Tue, 13 Dec 2022 17:39:49 +0100 [thread overview]
Message-ID: <9fb35cf6-53bf-1309-c27e-de3bece7fbe1@redhat.com> (raw)
In-Reply-To: <5532e8b1721cdf68d8932c747dc6b5f42738e139.1667542066.git.john.g.johnson@oracle.com>
On 11/9/22 00:13, John Johnson wrote:
> Signed-off-by: Elena Ufimtseva <elena.ufimtseva@oracle.com>
> Signed-off-by: John G Johnson <john.g.johnson@oracle.com>
> Signed-off-by: Jagannathan Raman <jag.raman@oracle.com>
> ---
> hw/vfio/pci.c | 7 ++-
> hw/vfio/user-protocol.h | 25 +++++++++
> hw/vfio/user.c | 135 ++++++++++++++++++++++++++++++++++++++++++++++++
> 3 files changed, 166 insertions(+), 1 deletion(-)
>
> diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
> index 7abe44e..be39a4e 100644
> --- a/hw/vfio/pci.c
> +++ b/hw/vfio/pci.c
> @@ -713,7 +713,8 @@ retry:
> ret = vfio_enable_vectors(vdev, false);
> if (ret) {
> if (ret < 0) {
> - error_report("vfio: Error: Failed to setup MSI fds: %m");
> + error_report("vfio: Error: Failed to setup MSI fds: %s",
> + strerror(-ret));
This change belongs to another patch.
> } else {
> error_report("vfio: Error: Failed to enable %d "
> "MSI vectors, retry with %d", vdev->nr_vectors, ret);
> @@ -2712,6 +2713,7 @@ static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
> irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
>
> ret = VDEV_GET_IRQ_INFO(vbasedev, &irq_info);
> +
> if (ret) {
> /* This can fail for an old kernel or legacy PCI dev */
> trace_vfio_populate_device_get_irq_info_failure(strerror(errno));
> @@ -3593,6 +3595,9 @@ static void vfio_user_pci_realize(PCIDevice *pdev, Error **errp)
> goto out_teardown;
> }
>
> + vfio_register_err_notifier(vdev);
> + vfio_register_req_notifier(vdev);
> +
> return;
>
> out_teardown:
> diff --git a/hw/vfio/user-protocol.h b/hw/vfio/user-protocol.h
> index 124340c..31704cf 100644
> --- a/hw/vfio/user-protocol.h
> +++ b/hw/vfio/user-protocol.h
> @@ -141,6 +141,31 @@ typedef struct {
> } VFIOUserRegionInfo;
>
> /*
> + * VFIO_USER_DEVICE_GET_IRQ_INFO
> + * imported from struct vfio_irq_info
> + */
> +typedef struct {
> + VFIOUserHdr hdr;
> + uint32_t argsz;
> + uint32_t flags;
> + uint32_t index;
> + uint32_t count;
> +} VFIOUserIRQInfo;
> +
> +/*
> + * VFIO_USER_DEVICE_SET_IRQS
> + * imported from struct vfio_irq_set
> + */
> +typedef struct {
> + VFIOUserHdr hdr;
> + uint32_t argsz;
> + uint32_t flags;
> + uint32_t index;
> + uint32_t start;
> + uint32_t count;
> +} VFIOUserIRQSet;
> +
> +/*
> * VFIO_USER_REGION_READ
> * VFIO_USER_REGION_WRITE
> */
> diff --git a/hw/vfio/user.c b/hw/vfio/user.c
> index 1453bb5..815385b 100644
> --- a/hw/vfio/user.c
> +++ b/hw/vfio/user.c
> @@ -1164,6 +1164,117 @@ static int vfio_user_get_region_info(VFIOProxy *proxy,
> return 0;
> }
>
> +static int vfio_user_get_irq_info(VFIOProxy *proxy,
> + struct vfio_irq_info *info)
> +{
> + VFIOUserIRQInfo msg;
> +
> + memset(&msg, 0, sizeof(msg));
> + vfio_user_request_msg(&msg.hdr, VFIO_USER_DEVICE_GET_IRQ_INFO,
> + sizeof(msg), 0);
> + msg.argsz = info->argsz;
> + msg.index = info->index;
> +
> + vfio_user_send_wait(proxy, &msg.hdr, NULL, 0, false);
> + if (msg.hdr.flags & VFIO_USER_ERROR) {
> + return -msg.hdr.error_reply;
> + }
> +
> + memcpy(info, &msg.argsz, sizeof(*info));
> + return 0;
> +}
> +
> +static int irq_howmany(int *fdp, uint32_t cur, uint32_t max)
intriguing routine. See comment below.
> +{
> + int n = 0;
> +
> + if (fdp[cur] != -1) {
> + do {
> + n++;
> + } while (n < max && fdp[cur + n] != -1);
> + } else {
> + do {
> + n++;
> + } while (n < max && fdp[cur + n] == -1);
> + }
> +
> + return n;
> +}
> +
> +static int vfio_user_set_irqs(VFIOProxy *proxy, struct vfio_irq_set *irq)
> +{
> + g_autofree VFIOUserIRQSet *msgp = NULL;
> + uint32_t size, nfds, send_fds, sent_fds, max;
> +
> + if (irq->argsz < sizeof(*irq)) {
> + error_printf("vfio_user_set_irqs argsz too small\n");
> + return -EINVAL;
> + }
> +
> + /*
> + * Handle simple case
> + */
> + if ((irq->flags & VFIO_IRQ_SET_DATA_EVENTFD) == 0) {
> + size = sizeof(VFIOUserHdr) + irq->argsz;
> + msgp = g_malloc0(size);
> +
> + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_SET_IRQS, size, 0);
> + msgp->argsz = irq->argsz;
> + msgp->flags = irq->flags;
> + msgp->index = irq->index;
> + msgp->start = irq->start;
> + msgp->count = irq->count;
> +
> + vfio_user_send_wait(proxy, &msgp->hdr, NULL, 0, false);
> + if (msgp->hdr.flags & VFIO_USER_ERROR) {
> + return -msgp->hdr.error_reply;
> + }
> +
> + return 0;
> + }
> +
> + /*
> + * Calculate the number of FDs to send
> + * and adjust argsz
> + */
> + nfds = (irq->argsz - sizeof(*irq)) / sizeof(int);
> + irq->argsz = sizeof(*irq);
> + msgp = g_malloc0(sizeof(*msgp));
> + /*
> + * Send in chunks if over max_send_fds
> + */
> + for (sent_fds = 0; nfds > sent_fds; sent_fds += send_fds) {
> + VFIOUserFDs *arg_fds, loop_fds;
> +
> + /* must send all valid FDs or all invalid FDs in single msg */
why is that ?
> + max = nfds - sent_fds;
> + if (max > proxy->max_send_fds) {
> + max = proxy->max_send_fds;
> + }
> + send_fds = irq_howmany((int *)irq->data, sent_fds, max);
sent_fds can never be -1 but irq_howmany() is taking into account this
value. Why ?
> +
> + vfio_user_request_msg(&msgp->hdr, VFIO_USER_DEVICE_SET_IRQS,
> + sizeof(*msgp), 0);
> + msgp->argsz = irq->argsz;
> + msgp->flags = irq->flags;
> + msgp->index = irq->index;
> + msgp->start = irq->start + sent_fds;
> + msgp->count = send_fds;
> +
> + loop_fds.send_fds = send_fds;
> + loop_fds.recv_fds = 0;
> + loop_fds.fds = (int *)irq->data + sent_fds;
> + arg_fds = loop_fds.fds[0] != -1 ? &loop_fds : NULL;
> +
> + vfio_user_send_wait(proxy, &msgp->hdr, arg_fds, 0, false);
> + if (msgp->hdr.flags & VFIO_USER_ERROR) {
> + return -msgp->hdr.error_reply;
> + }
> + }
> +
> + return 0;
> +}
> +
> static int vfio_user_region_read(VFIOProxy *proxy, uint8_t index, off_t offset,
> uint32_t count, void *data)
> {
> @@ -1277,6 +1388,28 @@ static int vfio_user_io_get_region_info(VFIODevice *vbasedev,
> return 0;
> }
>
> +static int vfio_user_io_get_irq_info(VFIODevice *vbasedev,
> + struct vfio_irq_info *irq)
> +{
> + int ret;
> +
> + ret = vfio_user_get_irq_info(vbasedev->proxy, irq);
> + if (ret) {
> + return ret;
> + }
> +
> + if (irq->index > vbasedev->num_irqs) {
> + return -EINVAL;
> + }
> + return 0;
> +}
> +
> +static int vfio_user_io_set_irqs(VFIODevice *vbasedev,
> + struct vfio_irq_set *irqs)
> +{
> + return vfio_user_set_irqs(vbasedev->proxy, irqs);
> +}
> +
> static int vfio_user_io_region_read(VFIODevice *vbasedev, uint8_t index,
> off_t off, uint32_t size, void *data)
> {
> @@ -1294,6 +1427,8 @@ static int vfio_user_io_region_write(VFIODevice *vbasedev, uint8_t index,
> VFIODevIO vfio_dev_io_sock = {
> .get_info = vfio_user_io_get_info,
> .get_region_info = vfio_user_io_get_region_info,
> + .get_irq_info = vfio_user_io_get_irq_info,
> + .set_irqs = vfio_user_io_set_irqs,
> .region_read = vfio_user_io_region_read,
> .region_write = vfio_user_io_region_write,
> };
next prev parent reply other threads:[~2022-12-13 16:40 UTC|newest]
Thread overview: 82+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-08 23:13 [PATCH v1 00/24] vfio-user client John Johnson
2022-11-08 23:13 ` [PATCH v1 01/24] vfio-user: introduce vfio-user protocol specification John Johnson
2022-11-08 23:13 ` [PATCH v1 02/24] vfio-user: add VFIO base abstract class John Johnson
2022-12-09 13:54 ` John Levon
2022-12-09 16:04 ` Cédric Le Goater
2022-12-12 20:30 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 03/24] vfio-user: add container IO ops vector John Johnson
2022-12-09 14:10 ` John Levon
2022-12-09 16:10 ` Cédric Le Goater
2022-12-12 9:40 ` Philippe Mathieu-Daudé
2022-11-08 23:13 ` [PATCH v1 04/24] vfio-user: add region cache John Johnson
2022-12-09 14:15 ` John Levon
2022-12-12 7:44 ` Cédric Le Goater
2022-12-12 11:42 ` Philippe Mathieu-Daudé
2023-02-02 5:21 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 05/24] vfio-user: add device IO ops vector John Johnson
2022-12-09 14:43 ` John Levon
2022-12-12 7:59 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 06/24] vfio-user: Define type vfio_user_pci_dev_info John Johnson
2022-12-09 15:23 ` John Levon
2022-12-12 9:01 ` Cédric Le Goater
2022-12-12 11:03 ` John Levon
2022-12-12 11:46 ` Philippe Mathieu-Daudé
2022-12-12 20:44 ` John Johnson
2022-12-12 21:32 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 07/24] vfio-user: connect vfio proxy to remote server John Johnson
2022-12-09 15:23 ` John Levon
2022-12-12 16:24 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 08/24] vfio-user: define socket receive functions John Johnson
2022-12-09 15:34 ` John Levon
2022-12-13 10:45 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 09/24] vfio-user: define socket send functions John Johnson
2022-12-09 15:52 ` John Levon
2022-12-13 13:48 ` Cédric Le Goater
2023-02-02 5:21 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 10/24] vfio-user: get device info John Johnson
2022-12-09 15:57 ` John Levon
2022-12-12 20:28 ` John Johnson
2022-12-13 14:06 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 11/24] vfio-user: get region info John Johnson
2022-12-09 17:04 ` John Levon
2022-12-13 15:15 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 12/24] vfio-user: region read/write John Johnson
2022-12-09 17:11 ` John Levon
2022-12-13 16:13 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 13/24] vfio-user: pci_user_realize PCI setup John Johnson
2022-12-09 17:22 ` John Levon
2022-12-13 16:13 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 14/24] vfio-user: get and set IRQs John Johnson
2022-12-09 17:29 ` John Levon
2022-12-12 20:28 ` John Johnson
2022-12-13 16:39 ` Cédric Le Goater [this message]
2022-12-13 23:10 ` John Johnson
2023-02-02 5:21 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 15/24] vfio-user: forward msix BAR accesses to server John Johnson
2022-12-09 17:45 ` John Levon
2022-12-14 17:00 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 16/24] vfio-user: proxy container connect/disconnect John Johnson
2022-12-09 17:54 ` John Levon
2022-12-14 17:59 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 17/24] vfio-user: dma map/unmap operations John Johnson
2022-12-15 12:39 ` Cédric Le Goater
2022-11-08 23:13 ` [PATCH v1 18/24] vfio-user: add dma_unmap_all John Johnson
2022-12-09 17:58 ` John Levon
2022-11-08 23:13 ` [PATCH v1 19/24] vfio-user: secure DMA support John Johnson
2022-12-09 18:01 ` John Levon
2022-12-12 20:31 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 20/24] vfio-user: dma read/write operations John Johnson
2022-12-09 18:11 ` John Levon
2022-11-08 23:13 ` [PATCH v1 21/24] vfio-user: pci reset John Johnson
2022-12-09 18:13 ` John Levon
2022-11-08 23:13 ` [PATCH v1 22/24] vfio-user: add 'x-msg-timeout' option that specifies msg wait times John Johnson
2022-12-09 18:14 ` John Levon
2022-12-15 12:56 ` Cédric Le Goater
2022-12-16 4:22 ` John Johnson
2022-11-08 23:13 ` [PATCH v1 23/24] vfio-user: add coalesced posted writes John Johnson
2022-12-09 18:16 ` John Levon
2022-11-08 23:13 ` [PATCH v1 24/24] vfio-user: add trace points John Johnson
2022-12-15 12:59 ` Cédric Le Goater
2022-12-12 9:41 ` [PATCH v1 00/24] vfio-user client Philippe Mathieu-Daudé
2022-12-16 11:31 ` Cédric Le Goater
2023-02-02 5:20 ` John Johnson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9fb35cf6-53bf-1309-c27e-de3bece7fbe1@redhat.com \
--to=clg@redhat.com \
--cc=john.g.johnson@oracle.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).