From mboxrd@z Thu Jan 1 00:00:00 1970 From: Alex Williamson Subject: Re: [PATCH v6 08/22] vfio: VFIO_IOMMU_CACHE_INVALIDATE Date: Thu, 21 Mar 2019 16:43:00 -0600 Message-ID: <20190321164300.12dc2dcb@x1.home> References: <20190317172232.1068-1-eric.auger@redhat.com> <20190317172232.1068-9-eric.auger@redhat.com> Mime-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8BIT Return-path: In-Reply-To: <20190317172232.1068-9-eric.auger@redhat.com> Sender: linux-kernel-owner@vger.kernel.org To: Eric Auger Cc: eric.auger.pro@gmail.com, iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org, kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu, joro@8bytes.org, jacob.jun.pan@linux.intel.com, yi.l.liu@linux.intel.com, jean-philippe.brucker@arm.com, will.deacon@arm.com, robin.murphy@arm.com, kevin.tian@intel.com, ashok.raj@intel.com, marc.zyngier@arm.com, christoffer.dall@arm.com, peter.maydell@linaro.org, vincent.stehle@arm.com List-Id: iommu@lists.linux-foundation.org On Sun, 17 Mar 2019 18:22:18 +0100 Eric Auger wrote: > From: "Liu, Yi L" > > When the guest "owns" the stage 1 translation structures, the host > IOMMU driver has no knowledge of caching structure updates unless > the guest invalidation requests are trapped and passed down to the > host. > > This patch adds the VFIO_IOMMU_CACHE_INVALIDATE ioctl with aims > at propagating guest stage1 IOMMU cache invalidations to the host. > > Signed-off-by: Liu, Yi L > Signed-off-by: Eric Auger > > --- > > v2 -> v3: > - introduce vfio_iommu_for_each_dev back in this patch > > v1 -> v2: > - s/TLB/CACHE > - remove vfio_iommu_task usage > - commit message rewording > --- > drivers/vfio/vfio_iommu_type1.c | 47 +++++++++++++++++++++++++++++++++ > include/uapi/linux/vfio.h | 13 +++++++++ > 2 files changed, 60 insertions(+) > > diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c > index 222e9199edbf..12a40b9db6aa 100644 > --- a/drivers/vfio/vfio_iommu_type1.c > +++ b/drivers/vfio/vfio_iommu_type1.c > @@ -113,6 +113,26 @@ struct vfio_regions { > #define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \ > (!list_empty(&iommu->domain_list)) > static struct foo { struct iommu_domain *domain; void *data; }; > +/* iommu->lock must be held */ > +static int > +vfio_iommu_for_each_dev(struct vfio_iommu *iommu, void *data, > + int (*fn)(struct device *, void *)) > +{ > + struct vfio_domain *d; > + struct vfio_group *g; > + int ret = 0; struct foo bar = { .data = data }; > + > + list_for_each_entry(d, &iommu->domain_list, next) { bar.domain = d->domain; > + list_for_each_entry(g, &d->group_list, next) { > + ret = iommu_group_for_each_dev(g->iommu_group, > + data, fn); s/data/&bar/ > + if (ret) > + break; > + } > + } > + return ret; > +} > + > static int put_pfn(unsigned long pfn, int prot); > > /* > @@ -1681,6 +1701,15 @@ vfio_attach_pasid_table(struct vfio_iommu *iommu, > return ret; > } > > +static int vfio_cache_inv_fn(struct device *dev, void *data) > +{ struct foo *bar = data; > + struct vfio_iommu_type1_cache_invalidate *ustruct = > + (struct vfio_iommu_type1_cache_invalidate *)data; ... = bar->data; > + struct iommu_domain *d = iommu_get_domain_for_dev(dev); ... = bar->domain; ¯\_(ツ)_/¯ seems more efficient that doing a lookup. > + > + return iommu_cache_invalidate(d, dev, &ustruct->info); > +} > + > static long vfio_iommu_type1_ioctl(void *iommu_data, > unsigned int cmd, unsigned long arg) > { > @@ -1767,6 +1796,24 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, > } else if (cmd == VFIO_IOMMU_DETACH_PASID_TABLE) { > vfio_detach_pasid_table(iommu); > return 0; > + } else if (cmd == VFIO_IOMMU_CACHE_INVALIDATE) { > + struct vfio_iommu_type1_cache_invalidate ustruct; > + int ret; > + > + minsz = offsetofend(struct vfio_iommu_type1_cache_invalidate, > + info); > + > + if (copy_from_user(&ustruct, (void __user *)arg, minsz)) > + return -EFAULT; > + > + if (ustruct.argsz < minsz || ustruct.flags) > + return -EINVAL; > + > + mutex_lock(&iommu->lock); > + ret = vfio_iommu_for_each_dev(iommu, &ustruct, > + vfio_cache_inv_fn); Guess what has a version field that never gets checked ;) Thanks, Alex > + mutex_unlock(&iommu->lock); > + return ret; > } > > return -ENOTTY; > diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h > index 329d378565d9..29f0ef2d805d 100644 > --- a/include/uapi/linux/vfio.h > +++ b/include/uapi/linux/vfio.h > @@ -776,6 +776,19 @@ struct vfio_iommu_type1_attach_pasid_table { > #define VFIO_IOMMU_ATTACH_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 22) > #define VFIO_IOMMU_DETACH_PASID_TABLE _IO(VFIO_TYPE, VFIO_BASE + 23) > > +/** > + * VFIO_IOMMU_CACHE_INVALIDATE - _IOWR(VFIO_TYPE, VFIO_BASE + 24, > + * struct vfio_iommu_type1_cache_invalidate) > + * > + * Propagate guest IOMMU cache invalidation to the host. > + */ > +struct vfio_iommu_type1_cache_invalidate { > + __u32 argsz; > + __u32 flags; > + struct iommu_cache_invalidate_info info; > +}; > +#define VFIO_IOMMU_CACHE_INVALIDATE _IO(VFIO_TYPE, VFIO_BASE + 24) > + > /* -------- Additional API for SPAPR TCE (Server POWERPC) IOMMU -------- */ > > /*