From: Auger Eric <eric.auger@redhat.com>
To: Greg Kurz <groug@kaod.org>
Cc: peter.maydell@linaro.org, yi.l.liu@intel.com, cdall@kernel.org,
mst@redhat.com, jean-philippe.brucker@arm.com,
qemu-devel@nongnu.org, peterx@redhat.com,
alex.williamson@redhat.com, qemu-arm@nongnu.org,
eric.auger.pro@gmail.com
Subject: Re: [Qemu-devel] [RFC v2 07/28] hw/vfio/common: Refactor container initialization
Date: Wed, 24 Oct 2018 11:00:14 +0200 [thread overview]
Message-ID: <ff16761c-9524-4a3e-b000-6cb2ade91b8f@redhat.com> (raw)
In-Reply-To: <20181022163923.35ad7a4e@bahia.lan>
Hi Greg,
On 10/22/18 4:39 PM, Greg Kurz wrote:
> On Fri, 21 Sep 2018 10:17:58 +0200
> Eric Auger <eric.auger@redhat.com> wrote:
>
>> To prepare for testing yet another extension, let's
>> refactor the code. We introduce vfio_iommu_get_type()
>> helper which selects the richest API (v2 first). Then
>> vfio_init_container() does the SET_CONTAINER and
>> SET_IOMMU ioctl calls. So we end up with a switch/case
>> on the iommu_type which should be a little bit more readable
>> when introducing the NESTING extension check. Also ioctl's
>> get called once per iommu_type.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>> ---
>> hw/vfio/common.c | 102 ++++++++++++++++++++++++++++++-----------------
>> 1 file changed, 65 insertions(+), 37 deletions(-)
>>
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 7c185e5a2e..53b8f773cc 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -1036,12 +1036,58 @@ static void vfio_put_address_space(VFIOAddressSpace *space)
>> }
>> }
>>
>> +/*
>> + * vfio_iommu_get_type - selects the richest iommu_type (v2 first)
>> + * nested only is selected if requested by @force_nested
>
> It seems the second line belongs to patch 8.
Yes it does. thanks for spotting that.
>
> Appart from that, this definitely makes the code more readable.
>
> Reviewed-by: Greg Kurz <groug@kaod.org>
I will send this patch separately.
Thanks
Eric
>
>> + */
>> +static int vfio_iommu_get_type(VFIOContainer *container,
>> + Error **errp)
>> +{
>> + int fd = container->fd;
>> +
>> + if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
>> + return VFIO_TYPE1v2_IOMMU;
>> + } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
>> + return VFIO_TYPE1_IOMMU;
>> + } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
>> + return VFIO_SPAPR_TCE_v2_IOMMU;
>> + } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) {
>> + return VFIO_SPAPR_TCE_IOMMU;
>> + } else {
>> + error_setg(errp, "No available IOMMU models");
>> + return -EINVAL;
>> + }
>> +}
>> +
>> +static int vfio_init_container(VFIOContainer *container, int group_fd,
>> + int iommu_type, Error **errp)
>> +{
>> + int ret;
>> +
>> + ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
>> + if (ret) {
>> + error_setg_errno(errp, errno, "failed to set group container");
>> + return -errno;
>> + }
>> +
>> + ret = ioctl(container->fd, VFIO_SET_IOMMU, iommu_type);
>> + if (ret) {
>> + error_setg_errno(errp, errno, "failed to set iommu for container");
>> + return -errno;
>> + }
>> + container->iommu_type = iommu_type;
>> + return 0;
>> +}
>> +
>> static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>> Error **errp)
>> {
>> VFIOContainer *container;
>> int ret, fd;
>> VFIOAddressSpace *space;
>> + int iommu_type;
>> + bool v2 = false;
>> +
>>
>> space = vfio_get_address_space(as);
>>
>> @@ -1101,23 +1147,20 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>> container->fd = fd;
>> QLIST_INIT(&container->giommu_list);
>> QLIST_INIT(&container->hostwin_list);
>> - if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) ||
>> - ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) {
>> - bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU);
>> +
>> + iommu_type = vfio_iommu_get_type(container, errp);
>> + if (iommu_type < 0) {
>> + goto free_container_exit;
>> + }
>> +
>> + switch (iommu_type) {
>> + case VFIO_TYPE1v2_IOMMU:
>> + case VFIO_TYPE1_IOMMU:
>> + {
>> struct vfio_iommu_type1_info info;
>>
>> - ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
>> + ret = vfio_init_container(container, group->fd, iommu_type, errp);
>> if (ret) {
>> - error_setg_errno(errp, errno, "failed to set group container");
>> - ret = -errno;
>> - goto free_container_exit;
>> - }
>> -
>> - container->iommu_type = v2 ? VFIO_TYPE1v2_IOMMU : VFIO_TYPE1_IOMMU;
>> - ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
>> - if (ret) {
>> - error_setg_errno(errp, errno, "failed to set iommu for container");
>> - ret = -errno;
>> goto free_container_exit;
>> }
>>
>> @@ -1137,28 +1180,16 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>> }
>> vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
>> container->pgsizes = info.iova_pgsizes;
>> - } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
>> - ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
>> + break;
>> + }
>> + case VFIO_SPAPR_TCE_v2_IOMMU:
>> + v2 = true;
>> + case VFIO_SPAPR_TCE_IOMMU:
>> + {
>> struct vfio_iommu_spapr_tce_info info;
>> - bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU);
>>
>> - ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd);
>> + ret = vfio_init_container(container, group->fd, iommu_type, errp);
>> if (ret) {
>> - error_setg_errno(errp, errno, "failed to set group container");
>> - ret = -errno;
>> - goto free_container_exit;
>> - }
>> - container->iommu_type =
>> - v2 ? VFIO_SPAPR_TCE_v2_IOMMU : VFIO_SPAPR_TCE_IOMMU;
>> - ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
>> - if (ret) {
>> - container->iommu_type = VFIO_SPAPR_TCE_IOMMU;
>> - v2 = false;
>> - ret = ioctl(fd, VFIO_SET_IOMMU, container->iommu_type);
>> - }
>> - if (ret) {
>> - error_setg_errno(errp, errno, "failed to set iommu for container");
>> - ret = -errno;
>> goto free_container_exit;
>> }
>>
>> @@ -1222,10 +1253,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>> info.dma32_window_size - 1,
>> 0x1000);
>> }
>> - } else {
>> - error_setg(errp, "No available IOMMU models");
>> - ret = -EINVAL;
>> - goto free_container_exit;
>> + }
>> }
>>
>> vfio_kvm_device_add_group(group);
>
>
next prev parent reply other threads:[~2018-10-24 9:00 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20180921081819.9203-1-eric.auger@redhat.com>
2018-10-18 10:30 ` [Qemu-devel] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration Liu, Yi L
2018-10-18 15:16 ` Auger Eric
2018-10-19 8:02 ` Liu, Yi L
[not found] ` <20180921081819.9203-8-eric.auger@redhat.com>
2018-10-22 14:39 ` [Qemu-devel] [RFC v2 07/28] hw/vfio/common: Refactor container initialization Greg Kurz
2018-10-24 9:00 ` Auger Eric [this message]
2018-11-23 16:28 ` [Qemu-devel] [RFC v2 00/28] vSMMUv3/pSMMUv3 2 stage VFIO integration Shameerali Kolothum Thodi
2018-11-26 9:56 ` Auger Eric
2018-11-26 15:48 ` Auger Eric
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ff16761c-9524-4a3e-b000-6cb2ade91b8f@redhat.com \
--to=eric.auger@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=cdall@kernel.org \
--cc=eric.auger.pro@gmail.com \
--cc=groug@kaod.org \
--cc=jean-philippe.brucker@arm.com \
--cc=mst@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=peterx@redhat.com \
--cc=qemu-arm@nongnu.org \
--cc=qemu-devel@nongnu.org \
--cc=yi.l.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).