qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Alexey Kardashevskiy <aik@ozlabs.ru>
To: Alex Williamson <alex.williamson@redhat.com>
Cc: Alexander Graf <agraf@suse.de>,
	qemu-ppc@nongnu.org, qemu-devel@nongnu.org,
	David Gibson <david@gibson.dropbear.id.au>
Subject: Re: [Qemu-devel] [PATCH v4 02/12] vfio: Create VFIOAddressSpace objects as needed
Date: Tue, 10 Sep 2013 18:09:23 +1000	[thread overview]
Message-ID: <522ED3B3.3090707@ozlabs.ru> (raw)
In-Reply-To: <1378405493.3246.248.camel@ul30vt.home>

On 09/06/2013 04:24 AM, Alex Williamson wrote:
> On Fri, 2013-08-30 at 20:15 +1000, Alexey Kardashevskiy wrote:
>> From: David Gibson <david@gibson.dropbear.id.au>
>>
>> So far, VFIO has a notion of different logical DMA address spaces, but
>> only ever uses one (system memory).  This patch extends this, creating
>> new VFIOAddressSpace objects as necessary, according to the AddressSpace
>> reported by the PCI subsystem for this device's DMAs.
>>
>> This isn't enough yet to support guest side IOMMUs with VFIO, but it does
>> mean we could now support VFIO devices on, for example, a guest side PCI
>> host bridge which maps system memory at somewhere other than 0 in PCI
>> space.
>>
>> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
>> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
>> ---
>>  hw/misc/vfio.c | 43 +++++++++++++++++++++++++++++++++++--------
>>  1 file changed, 35 insertions(+), 8 deletions(-)
>>
>> diff --git a/hw/misc/vfio.c b/hw/misc/vfio.c
>> index 93a316e..c16f41b 100644
>> --- a/hw/misc/vfio.c
>> +++ b/hw/misc/vfio.c
>> @@ -133,9 +133,10 @@ enum {
>>  typedef struct VFIOAddressSpace {
>>      AddressSpace *as;
>>      QLIST_HEAD(, VFIOContainer) containers;
>> +    QLIST_ENTRY(VFIOAddressSpace) list;
>>  } VFIOAddressSpace;
>>  
>> -static VFIOAddressSpace vfio_address_space_memory;
>> +QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces;
>>  
>>  struct VFIOGroup;
>>  
>> @@ -2611,10 +2612,34 @@ static int vfio_load_rom(VFIODevice *vdev)
>>      return 0;
>>  }
>>  
>> -static void vfio_address_space_init(VFIOAddressSpace *space, AddressSpace *as)
>> +static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
>>  {
>> +    VFIOAddressSpace *space;
>> +
>> +    QLIST_FOREACH(space, &vfio_address_spaces, list) {
>> +        if (space->as == as) {
>> +            return space;
>> +        }
>> +    }
>> +
>> +    /* No suitable VFIOAddressSpace, create a new one */
>> +    space = g_malloc0(sizeof(*space));
>>      space->as = as;
>>      QLIST_INIT(&space->containers);
>> +
>> +    QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
>> +
>> +    return space;
>> +}
>> +
>> +static void vfio_put_address_space(VFIOAddressSpace *space)
>> +{
>> +    if (!QLIST_EMPTY(&space->containers)) {
>> +        return;
>> +    }
>> +
>> +    QLIST_REMOVE(space, list);
>> +    g_free(space);
>>  }
>>  
>>  static int vfio_connect_container(VFIOGroup *group, VFIOAddressSpace *space)
>> @@ -2699,6 +2724,8 @@ static void vfio_disconnect_container(VFIOGroup *group)
>>      group->container = NULL;
>>  
>>      if (QLIST_EMPTY(&container->group_list)) {
>> +        VFIOAddressSpace *space = container->space;
>> +
>>          if (container->iommu_data.release) {
>>              container->iommu_data.release(container);
>>          }
>> @@ -2706,6 +2733,8 @@ static void vfio_disconnect_container(VFIOGroup *group)
>>          DPRINTF("vfio_disconnect_container: close container->fd\n");
>>          close(container->fd);
>>          g_free(container);
>> +
>> +        vfio_put_address_space(space);
>>      }
>>  }
>>  
>> @@ -3076,6 +3105,7 @@ static int vfio_initfn(PCIDevice *pdev)
>>  {
>>      VFIODevice *pvdev, *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
>>      VFIOGroup *group;
>> +    VFIOAddressSpace *space;
>>      char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name;
>>      ssize_t len;
>>      struct stat st;
>> @@ -3111,14 +3141,12 @@ static int vfio_initfn(PCIDevice *pdev)
>>      DPRINTF("%s(%04x:%02x:%02x.%x) group %d\n", __func__, vdev->host.domain,
>>              vdev->host.bus, vdev->host.slot, vdev->host.function, groupid);
>>  
>> -    if (pci_device_iommu_address_space(pdev) != &address_space_memory) {
>> -        error_report("vfio: DMA address space must be system memory");
>> -        return -EINVAL;
>> -    }
>> +    space = vfio_get_address_space(pci_device_iommu_address_space(pdev));
>>  
>> -    group = vfio_get_group(groupid, &vfio_address_space_memory);
>> +    group = vfio_get_group(groupid, space);
>>      if (!group) {
>>          error_report("vfio: failed to get group %d", groupid);
>> +        vfio_put_address_space(space);
>>          return -ENOENT;
>>      }
>>  
> 
> Kind of a code flow issue here, on teardown we have:
> 
> vfio_put_group
>   vfio_disconnect_container
>     vfio_put_address_space
> 
> On setup we do:
> 
> vfio_get_address_space
> vfio_get_group
>   vfio_connect_container
> 
> We could easily move vfio_get_address_space into vfio_get_group to make
> things a little more balanced.  It doesn't seem like too much additional
> to pass the address space through vfio_get_group into
> vfio_connect_container so that we could have a completely symmetric flow
> though.

I can do that. I will just need to call vfio_put_address_space() on every
branch which returns NULL. Or rework a bit more. So I ended up with this:

(not a patch, just cut-n-paste).
===

-static VFIOGroup *vfio_get_group(int groupid, VFIOAddressSpace *space)
+static VFIOGroup *vfio_get_group(int groupid, AddressSpace *as)
 {
+    VFIOAddressSpace *space;
     VFIOGroup *group;
     char path[32];
     struct vfio_group_status status = { .argsz = sizeof(status) };

+    space = vfio_get_address_space(as);
+
     QLIST_FOREACH(group, &group_list, next) {
         if (group->groupid == groupid) {
             /* Found it.  Now is it already in the right context? */
@@ -2723,7 +2755,7 @@ static VFIOGroup *vfio_get_group(int groupid,
VFIOAddressSpace *space)
             } else {
                 error_report("vfio: group %d used in multiple address spaces",
                              group->groupid);
-                return NULL;
+                goto error_exit;
             }
         }
     }
@@ -2734,24 +2766,19 @@ static VFIOGroup *vfio_get_group(int groupid,
VFIOAddressSpace *space)
     group->fd = qemu_open(path, O_RDWR);
     if (group->fd < 0) {
         error_report("vfio: error opening %s: %m", path);
-        g_free(group);
-        return NULL;
+        goto free_group_exit;
     }

     if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
         error_report("vfio: error getting group status: %m");
-        close(group->fd);
-        g_free(group);
-        return NULL;
+        goto close_fd_exit;
     }

     if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
         error_report("vfio: error, group %d is not viable, please ensure "
                      "all devices within the iommu_group are bound to their "
                      "vfio bus driver.", groupid);
-        close(group->fd);
-        g_free(group);
-        return NULL;
+        goto close_fd_exit;
     }

     group->groupid = groupid;
@@ -2759,14 +2786,23 @@ static VFIOGroup *vfio_get_group(int groupid,
VFIOAddressSpace *space)

     if (vfio_connect_container(group, space)) {
         error_report("vfio: failed to setup container for group %d", groupid);
-        close(group->fd);
-        g_free(group);
-        return NULL;
+        goto close_fd_exit;
     }

     QLIST_INSERT_HEAD(&group_list, group, next);

     return group;
+
+close_fd_exit:
+    close(group->fd);
+
+free_group_exit:
+    g_free(group);
+
+error_exit:
+    vfio_put_address_space(space);
+
+    return NULL;
 }

===

Is that ok? Split it into 2 patches for easier review?


> 
> 
>> @@ -3339,7 +3367,6 @@ static const TypeInfo vfio_pci_dev_info = {
>>  
>>  static void register_vfio_pci_dev_type(void)
>>  {
>> -    vfio_address_space_init(&vfio_address_space_memory, &address_space_memory);
>>      type_register_static(&vfio_pci_dev_info);
>>  }
>>  
> 
> 
> 


-- 
Alexey

  reply	other threads:[~2013-09-10  8:09 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-08-30 10:15 [Qemu-devel] [PATCH v4 00/12] vfio on spapr-ppc64 Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 01/12] vfio: Introduce VFIO address spaces Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 02/12] vfio: Create VFIOAddressSpace objects as needed Alexey Kardashevskiy
2013-09-05 18:24   ` Alex Williamson
2013-09-10  8:09     ` Alexey Kardashevskiy [this message]
2013-09-10 21:51       ` Alex Williamson
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 03/12] vfio: Add guest side IOMMU support Alexey Kardashevskiy
2013-09-05 18:49   ` Alex Williamson
2013-09-10  8:22     ` Alexey Kardashevskiy
2013-09-10 22:02       ` Alex Williamson
2013-09-11  6:15         ` Paolo Bonzini
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 04/12] spapr vfio: add vfio_container_spapr_get_info() Alexey Kardashevskiy
2013-09-05 19:01   ` Alex Williamson
2013-09-10  8:36     ` Alexey Kardashevskiy
2013-09-10 22:11       ` Alex Williamson
2013-09-13 10:11         ` Alexey Kardashevskiy
2013-09-25 20:29           ` Alex Williamson
2013-09-26 10:22             ` Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 05/12] spapr_pci: convert init to realize Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 06/12] spapr_pci: add spapr_pci trace Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 07/12] spapr_pci: converts fprintf to error_report Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 08/12] spapr_iommu: introduce SPAPR_TCE_TABLE class Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 09/12] spapr_iommu: add SPAPR VFIO IOMMU Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 10/12] spapr vfio: add spapr-pci-vfio-host-bridge to support vfio Alexey Kardashevskiy
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 11/12] spapr vfio: enable for spapr Alexey Kardashevskiy
2013-09-05 19:05   ` Alex Williamson
2013-09-10  9:00     ` Alexey Kardashevskiy
2013-09-10 22:13       ` Alex Williamson
2013-09-13 11:34         ` Alexey Kardashevskiy
2013-09-25 20:33           ` Alex Williamson
2013-08-30 10:15 ` [Qemu-devel] [PATCH v4 12/12] spapr kvm vfio: enable in-kernel acceleration Alexey Kardashevskiy
2013-09-05  6:43 ` [Qemu-devel] [PATCH v4 00/12] vfio on spapr-ppc64 Alexey Kardashevskiy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=522ED3B3.3090707@ozlabs.ru \
    --to=aik@ozlabs.ru \
    --cc=agraf@suse.de \
    --cc=alex.williamson@redhat.com \
    --cc=david@gibson.dropbear.id.au \
    --cc=qemu-devel@nongnu.org \
    --cc=qemu-ppc@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).