From: Alex Williamson <alex.williamson@redhat.com>
To: Yi Liu <yi.l.liu@intel.com>
Cc: jgg@nvidia.com, kevin.tian@intel.com, joro@8bytes.org,
robin.murphy@arm.com, cohuck@redhat.com, eric.auger@redhat.com,
nicolinc@nvidia.com, kvm@vger.kernel.org, mjrosato@linux.ibm.com,
chao.p.peng@linux.intel.com, yi.y.sun@linux.intel.com,
peterx@redhat.com, jasowang@redhat.com,
shameerali.kolothum.thodi@huawei.com, lulu@redhat.com,
suravee.suthikulpanit@amd.com,
intel-gvt-dev@lists.freedesktop.org,
intel-gfx@lists.freedesktop.org, linux-s390@vger.kernel.org,
xudong.hao@intel.com, yan.y.zhao@intel.com,
terrence.xu@intel.com, yanting.jiang@intel.com
Subject: Re: [PATCH v3 08/12] vfio/pci: Renaming for accepting device fd in hot reset path
Date: Tue, 4 Apr 2023 15:23:43 -0600 [thread overview]
Message-ID: <20230404152343.790d6e78.alex.williamson@redhat.com> (raw)
In-Reply-To: <20230401144429.88673-9-yi.l.liu@intel.com>
On Sat, 1 Apr 2023 07:44:25 -0700
Yi Liu <yi.l.liu@intel.com> wrote:
> No functional change is intended.
>
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
> Tested-by: Yanting Jiang <yanting.jiang@intel.com>
> Signed-off-by: Yi Liu <yi.l.liu@intel.com>
> ---
> drivers/vfio/pci/vfio_pci_core.c | 52 ++++++++++++++++----------------
> 1 file changed, 26 insertions(+), 26 deletions(-)
>
> diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
> index 2a510b71edcb..da6325008872 100644
> --- a/drivers/vfio/pci/vfio_pci_core.c
> +++ b/drivers/vfio/pci/vfio_pci_core.c
> @@ -177,10 +177,10 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_core_device *vdev)
> }
> }
>
> -struct vfio_pci_group_info;
> +struct vfio_pci_file_info;
> static void vfio_pci_dev_set_try_reset(struct vfio_device_set *dev_set);
> static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
> - struct vfio_pci_group_info *groups,
> + struct vfio_pci_file_info *info,
> struct iommufd_ctx *iommufd_ctx);
>
> /*
> @@ -800,7 +800,7 @@ static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
> return 0;
> }
>
> -struct vfio_pci_group_info {
> +struct vfio_pci_file_info {
> int count;
> struct file **files;
> };
> @@ -1257,14 +1257,14 @@ static int vfio_pci_ioctl_get_pci_hot_reset_info(
> }
>
> static int
> -vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
> - struct vfio_pci_hot_reset *hdr,
> - bool slot,
> - struct vfio_pci_hot_reset __user *arg)
> +vfio_pci_ioctl_pci_hot_reset_files(struct vfio_pci_core_device *vdev,
> + struct vfio_pci_hot_reset *hdr,
> + bool slot,
> + struct vfio_pci_hot_reset __user *arg)
> {
> - int32_t *group_fds;
> + int32_t *fds;
> struct file **files;
> - struct vfio_pci_group_info info;
> + struct vfio_pci_file_info info;
> int file_idx, count = 0, ret = 0;
>
> /*
> @@ -1281,17 +1281,17 @@ vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
> if (hdr->count > count)
> return -EINVAL;
>
> - group_fds = kcalloc(hdr->count, sizeof(*group_fds), GFP_KERNEL);
> + fds = kcalloc(hdr->count, sizeof(*fds), GFP_KERNEL);
> files = kcalloc(hdr->count, sizeof(*files), GFP_KERNEL);
> - if (!group_fds || !files) {
> - kfree(group_fds);
> + if (!fds || !files) {
> + kfree(fds);
> kfree(files);
> return -ENOMEM;
> }
>
> - if (copy_from_user(group_fds, arg->group_fds,
> - hdr->count * sizeof(*group_fds))) {
> - kfree(group_fds);
> + if (copy_from_user(fds, arg->group_fds,
> + hdr->count * sizeof(*fds))) {
> + kfree(fds);
> kfree(files);
> return -EFAULT;
> }
> @@ -1301,7 +1301,7 @@ vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
> * the reset
> */
> for (file_idx = 0; file_idx < hdr->count; file_idx++) {
> - struct file *file = fget(group_fds[file_idx]);
> + struct file *file = fget(fds[file_idx]);
>
> if (!file) {
> ret = -EBADF;
> @@ -1318,9 +1318,9 @@ vfio_pci_ioctl_pci_hot_reset_groups(struct vfio_pci_core_device *vdev,
> files[file_idx] = file;
> }
>
> - kfree(group_fds);
> + kfree(fds);
>
> - /* release reference to groups on error */
> + /* release reference to fds on error */
> if (ret)
> goto hot_reset_release;
>
> @@ -1358,7 +1358,7 @@ static int vfio_pci_ioctl_pci_hot_reset(struct vfio_pci_core_device *vdev,
> return -ENODEV;
>
> if (hdr.count)
> - return vfio_pci_ioctl_pci_hot_reset_groups(vdev, &hdr, slot, arg);
> + return vfio_pci_ioctl_pci_hot_reset_files(vdev, &hdr, slot, arg);
>
> iommufd = vfio_iommufd_physical_ictx(&vdev->vdev);
>
> @@ -2329,16 +2329,16 @@ const struct pci_error_handlers vfio_pci_core_err_handlers = {
> };
> EXPORT_SYMBOL_GPL(vfio_pci_core_err_handlers);
>
> -static bool vfio_dev_in_groups(struct vfio_pci_core_device *vdev,
> - struct vfio_pci_group_info *groups)
> +static bool vfio_dev_in_files(struct vfio_pci_core_device *vdev,
> + struct vfio_pci_file_info *info)
> {
> unsigned int i;
>
> - if (!groups)
> + if (!info)
> return false;
>
> - for (i = 0; i < groups->count; i++)
> - if (vfio_file_has_dev(groups->files[i], &vdev->vdev))
> + for (i = 0; i < info->count; i++)
> + if (vfio_file_has_dev(info->files[i], &vdev->vdev))
> return true;
> return false;
> }
> @@ -2429,7 +2429,7 @@ static bool vfio_dev_in_iommufd_ctx(struct vfio_pci_core_device *vdev,
> * get each memory_lock.
> */
> static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
> - struct vfio_pci_group_info *groups,
> + struct vfio_pci_file_info *info,
> struct iommufd_ctx *iommufd_ctx)
> {
> struct vfio_pci_core_device *cur_mem;
> @@ -2478,7 +2478,7 @@ static int vfio_pci_dev_set_hot_reset(struct vfio_device_set *dev_set,
> * the calling device is in a singleton dev_set.
> */
> if (cur_vma->vdev.open_count &&
> - !vfio_dev_in_groups(cur_vma, groups) &&
> + !vfio_dev_in_files(cur_vma, info) &&
> !vfio_dev_in_iommufd_ctx(cur_vma, iommufd_ctx) &&
> (dev_set->device_count > 1)) {
> ret = -EINVAL;
At this point, vfio_dev_in_files() supports both group and cdev fds and
these can be used for both regular IOMMU protected and no-IOMMU
devices AFAICT. We only add this 1-off dev_set device count test and
its subtle side-effects in order to support the null-array mode, which
IMO really has yet to be shown as a requirement.
IIRC, we were wanting to add that mode as part of the cdev interface so
that the existence of cdevs implies this support, but now we're already
making use of vfio_pci_hot_reset_info.flags to indicate group-id vs
dev-id in the output, so does anything prevent us from setting another
bit there if/when this feature proves itself useful and error free, to
indicate it's an available mode for the hot-reset ioctl?
With that I think we could drop patches 4 & 5 with a plan for
introducing them later without trying to strong arm the feature in
without a proven and available use case now. Thanks,
Alex
next prev parent reply other threads:[~2023-04-04 21:24 UTC|newest]
Thread overview: 142+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-04-01 14:44 [PATCH v3 00/12] Introduce new methods for verifying ownership in vfio PCI hot reset Yi Liu
2023-04-01 14:44 ` [PATCH v3 01/12] vfio/pci: Update comment around group_fd get in vfio_pci_ioctl_pci_hot_reset() Yi Liu
2023-04-04 13:59 ` Eric Auger
2023-04-04 14:37 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 02/12] vfio/pci: Only check ownership of opened devices in hot reset Yi Liu
2023-04-04 13:59 ` Eric Auger
2023-04-04 14:37 ` Liu, Yi L
2023-04-04 15:18 ` Eric Auger
2023-04-04 15:29 ` Liu, Yi L
2023-04-04 15:59 ` Eric Auger
2023-04-05 11:41 ` Jason Gunthorpe
2023-04-05 15:14 ` Eric Auger
2023-04-01 14:44 ` [PATCH v3 03/12] vfio/pci: Move the existing hot reset logic to be a helper Yi Liu
2023-04-04 13:59 ` Eric Auger
2023-04-04 14:24 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 04/12] vfio-iommufd: Add helper to retrieve iommufd_ctx and devid for vfio_device Yi Liu
2023-04-04 15:28 ` Eric Auger
2023-04-04 21:48 ` Alex Williamson
2023-04-21 7:11 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 05/12] vfio/pci: Allow passing zero-length fd array in VFIO_DEVICE_PCI_HOT_RESET Yi Liu
2023-04-04 16:54 ` Eric Auger
2023-04-04 20:18 ` Alex Williamson
2023-04-05 7:55 ` Liu, Yi L
2023-04-05 8:01 ` Liu, Yi L
2023-04-05 15:36 ` Alex Williamson
2023-04-05 16:46 ` Jason Gunthorpe
2023-04-05 8:02 ` Eric Auger
2023-04-05 8:09 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 06/12] vfio: Refine vfio file kAPIs for vfio PCI hot reset Yi Liu
2023-04-05 8:27 ` Eric Auger
2023-04-05 9:23 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 07/12] vfio: Accpet device file from vfio PCI hot reset path Yi Liu
2023-04-04 20:31 ` Alex Williamson
2023-04-05 8:07 ` Eric Auger
2023-04-05 8:10 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 08/12] vfio/pci: Renaming for accepting device fd in " Yi Liu
2023-04-04 21:23 ` Alex Williamson [this message]
2023-04-05 9:32 ` Eric Auger
2023-04-01 14:44 ` [PATCH v3 09/12] vfio/pci: Accept device fd in VFIO_DEVICE_PCI_HOT_RESET ioctl Yi Liu
2023-04-05 9:36 ` Eric Auger
2023-04-01 14:44 ` [PATCH v3 10/12] vfio: Mark cdev usage in vfio_device Yi Liu
2023-04-05 11:48 ` Eric Auger
2023-04-21 7:06 ` Liu, Yi L
2023-04-01 14:44 ` [PATCH v3 11/12] iommufd: Define IOMMUFD_INVALID_ID in uapi Yi Liu
2023-04-04 21:00 ` Alex Williamson
2023-04-05 9:31 ` Liu, Yi L
2023-04-05 15:13 ` Alex Williamson
2023-04-05 15:17 ` Liu, Yi L
2023-04-05 11:46 ` Eric Auger
2023-04-01 14:44 ` [PATCH v3 12/12] vfio/pci: Report dev_id in VFIO_DEVICE_GET_PCI_HOT_RESET_INFO Yi Liu
2023-04-03 9:25 ` Liu, Yi L
2023-04-03 15:01 ` Alex Williamson
2023-04-03 15:22 ` Liu, Yi L
2023-04-03 15:32 ` Alex Williamson
2023-04-03 16:12 ` Jason Gunthorpe
2023-04-07 10:09 ` Liu, Yi L
2023-04-07 12:03 ` Alex Williamson
2023-04-07 13:24 ` Liu, Yi L
2023-04-07 13:51 ` Alex Williamson
2023-04-07 14:04 ` Liu, Yi L
2023-04-07 15:14 ` Alex Williamson
2023-04-07 15:47 ` Liu, Yi L
2023-04-07 21:07 ` Alex Williamson
2023-04-08 5:07 ` Liu, Yi L
2023-04-08 14:20 ` Alex Williamson
2023-04-09 11:58 ` Yi Liu
2023-04-09 13:29 ` Alex Williamson
2023-04-10 8:48 ` Liu, Yi L
2023-04-10 14:41 ` Alex Williamson
2023-04-10 15:18 ` Liu, Yi L
2023-04-10 15:23 ` Alex Williamson
2023-04-11 13:34 ` Jason Gunthorpe
2023-04-11 13:33 ` Jason Gunthorpe
2023-04-11 6:16 ` Liu, Yi L
2023-04-04 22:20 ` Alex Williamson
2023-04-05 12:19 ` Eric Auger
2023-04-05 14:04 ` Liu, Yi L
2023-04-05 16:25 ` Alex Williamson
2023-04-05 16:37 ` Jason Gunthorpe
2023-04-05 16:52 ` Alex Williamson
2023-04-05 17:23 ` Jason Gunthorpe
2023-04-05 18:56 ` Alex Williamson
2023-04-05 19:18 ` Alex Williamson
2023-04-05 19:21 ` Jason Gunthorpe
2023-04-05 19:49 ` Alex Williamson
2023-04-05 23:22 ` Jason Gunthorpe
2023-04-06 10:02 ` Liu, Yi L
2023-04-06 17:53 ` Alex Williamson
2023-04-07 10:09 ` Liu, Yi L
2023-04-11 13:24 ` Jason Gunthorpe
[not found] ` <20230411095417.240bac39.alex.williamson@redhat.com>
[not found] ` <20230411111117.0766ad52.alex.williamson@redhat.com>
2023-04-11 18:40 ` Jason Gunthorpe
2023-04-11 21:58 ` Alex Williamson
2023-04-12 0:01 ` Jason Gunthorpe
2023-04-12 7:27 ` Tian, Kevin
2023-04-12 15:05 ` Jason Gunthorpe
2023-04-12 17:01 ` Alex Williamson
2023-04-13 2:57 ` Tian, Kevin
2023-04-12 10:09 ` Liu, Yi L
2023-04-12 16:54 ` Alex Williamson
2023-04-12 16:50 ` Alex Williamson
2023-04-12 20:06 ` Jason Gunthorpe
2023-04-13 8:25 ` Tian, Kevin
2023-04-13 11:50 ` Jason Gunthorpe
2023-04-13 14:35 ` Liu, Yi L
2023-04-13 14:41 ` Jason Gunthorpe
2023-04-13 18:07 ` Alex Williamson
2023-04-14 9:11 ` Tian, Kevin
2023-04-14 11:38 ` Liu, Yi L
2023-04-14 17:10 ` Alex Williamson
2023-04-17 4:20 ` Liu, Yi L
2023-04-17 19:01 ` Alex Williamson
2023-04-17 19:31 ` Jason Gunthorpe
2023-04-17 20:06 ` Alex Williamson
2023-04-18 3:24 ` Tian, Kevin
2023-04-18 4:10 ` Alex Williamson
2023-04-18 5:02 ` Tian, Kevin
2023-04-18 12:59 ` Jason Gunthorpe
2023-04-18 16:44 ` Alex Williamson
2023-04-18 10:34 ` Liu, Yi L
2023-04-18 16:49 ` Alex Williamson
2023-04-18 12:57 ` Jason Gunthorpe
2023-04-18 18:39 ` Alex Williamson
2023-04-20 12:10 ` Liu, Yi L
2023-04-20 14:08 ` Alex Williamson
2023-04-21 22:35 ` Jason Gunthorpe
2023-04-23 14:46 ` Liu, Yi L
2023-04-26 7:22 ` Liu, Yi L
2023-04-26 13:20 ` Alex Williamson
2023-04-26 15:08 ` Liu, Yi L
2023-04-14 16:34 ` Alex Williamson
2023-04-17 13:39 ` Jason Gunthorpe
2023-04-18 1:28 ` Tian, Kevin
2023-04-18 10:23 ` Liu, Yi L
2023-04-18 13:02 ` Jason Gunthorpe
2023-04-23 10:28 ` Liu, Yi L
2023-04-24 17:38 ` Jason Gunthorpe
2023-04-17 14:05 ` Jason Gunthorpe
2023-04-12 7:14 ` Tian, Kevin
2023-04-06 6:34 ` Liu, Yi L
2023-04-06 17:07 ` Alex Williamson
2023-04-05 17:58 ` Eric Auger
2023-04-06 5:31 ` Liu, Yi L
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230404152343.790d6e78.alex.williamson@redhat.com \
--to=alex.williamson@redhat.com \
--cc=chao.p.peng@linux.intel.com \
--cc=cohuck@redhat.com \
--cc=eric.auger@redhat.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=intel-gvt-dev@lists.freedesktop.org \
--cc=jasowang@redhat.com \
--cc=jgg@nvidia.com \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-s390@vger.kernel.org \
--cc=lulu@redhat.com \
--cc=mjrosato@linux.ibm.com \
--cc=nicolinc@nvidia.com \
--cc=peterx@redhat.com \
--cc=robin.murphy@arm.com \
--cc=shameerali.kolothum.thodi@huawei.com \
--cc=suravee.suthikulpanit@amd.com \
--cc=terrence.xu@intel.com \
--cc=xudong.hao@intel.com \
--cc=yan.y.zhao@intel.com \
--cc=yanting.jiang@intel.com \
--cc=yi.l.liu@intel.com \
--cc=yi.y.sun@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox