* [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation
@ 2022-06-05 16:11 Tina Zhang
2022-07-15 7:17 ` Joerg Roedel
2022-07-15 7:45 ` Jean-Philippe Brucker
0 siblings, 2 replies; 4+ messages in thread
From: Tina Zhang @ 2022-06-05 16:11 UTC (permalink / raw)
To: iommu; +Cc: joro, will, kevin.tian, jean-philippe, baolu.lu, Tina Zhang
Map/unmap_pags() allows map and unmap multiple pages of the same size
in one call, which can improve performance by reducing the numbers of
vmexits. With map/unmap_pages() implemented, the prior map/unmap()
callbacks are deprecated.
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
---
drivers/iommu/virtio-iommu.c | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 25be4b822aa0..3c943dbd9fd0 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
return 0;
}
-static int viommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t pgsize, size_t pgcount,
+ int prot, gfp_t gfp, size_t *mapped)
{
int ret;
u32 flags;
+ size_t size = pgsize * pgcount;
u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
viommu_del_mappings(vdomain, iova, end);
+ else if (mapped)
+ *mapped = size;
return ret;
}
-static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
- size_t size, struct iommu_iotlb_gather *gather)
+static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
+ size_t pgsize, size_t pgcount,
+ struct iommu_iotlb_gather *gather)
{
int ret = 0;
size_t unmapped;
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
+ size_t size = pgsize * pgcount;
unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
@@ -1018,8 +1024,8 @@ static struct iommu_ops viommu_ops = {
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
- .map = viommu_map,
- .unmap = viommu_unmap,
+ .map_pages = viommu_map_pages,
+ .unmap_pages = viommu_unmap_pages,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread* Re: [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation
2022-06-05 16:11 [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation Tina Zhang
@ 2022-07-15 7:17 ` Joerg Roedel
2022-07-15 7:45 ` Jean-Philippe Brucker
1 sibling, 0 replies; 4+ messages in thread
From: Joerg Roedel @ 2022-07-15 7:17 UTC (permalink / raw)
To: Tina Zhang, jean-philippe
Cc: iommu, will, kevin.tian, jean-philippe, baolu.lu
Hey Jean-Philippe,
can you have a look at this please?
Thanks,
Joerg
On Mon, Jun 06, 2022 at 12:11:52AM +0800, Tina Zhang wrote:
> Map/unmap_pags() allows map and unmap multiple pages of the same size
> in one call, which can improve performance by reducing the numbers of
> vmexits. With map/unmap_pages() implemented, the prior map/unmap()
> callbacks are deprecated.
>
> Signed-off-by: Tina Zhang <tina.zhang@intel.com>
> ---
> drivers/iommu/virtio-iommu.c | 18 ++++++++++++------
> 1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index 25be4b822aa0..3c943dbd9fd0 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
> return 0;
> }
>
> -static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> - phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> +static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
> + phys_addr_t paddr, size_t pgsize, size_t pgcount,
> + int prot, gfp_t gfp, size_t *mapped)
> {
> int ret;
> u32 flags;
> + size_t size = pgsize * pgcount;
> u64 end = iova + size - 1;
> struct virtio_iommu_req_map map;
> struct viommu_domain *vdomain = to_viommu_domain(domain);
> @@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> if (ret)
> viommu_del_mappings(vdomain, iova, end);
> + else if (mapped)
> + *mapped = size;
>
> return ret;
> }
>
> -static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
> - size_t size, struct iommu_iotlb_gather *gather)
> +static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
> + size_t pgsize, size_t pgcount,
> + struct iommu_iotlb_gather *gather)
> {
> int ret = 0;
> size_t unmapped;
> struct virtio_iommu_req_unmap unmap;
> struct viommu_domain *vdomain = to_viommu_domain(domain);
> + size_t size = pgsize * pgcount;
>
> unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
> if (unmapped < size)
> @@ -1018,8 +1024,8 @@ static struct iommu_ops viommu_ops = {
> .owner = THIS_MODULE,
> .default_domain_ops = &(const struct iommu_domain_ops) {
> .attach_dev = viommu_attach_dev,
> - .map = viommu_map,
> - .unmap = viommu_unmap,
> + .map_pages = viommu_map_pages,
> + .unmap_pages = viommu_unmap_pages,
> .iova_to_phys = viommu_iova_to_phys,
> .iotlb_sync = viommu_iotlb_sync,
> .free = viommu_domain_free,
> --
> 2.34.1
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation
2022-06-05 16:11 [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation Tina Zhang
2022-07-15 7:17 ` Joerg Roedel
@ 2022-07-15 7:45 ` Jean-Philippe Brucker
2022-07-15 8:10 ` Joerg Roedel
1 sibling, 1 reply; 4+ messages in thread
From: Jean-Philippe Brucker @ 2022-07-15 7:45 UTC (permalink / raw)
To: Tina Zhang; +Cc: iommu, joro, will, kevin.tian, baolu.lu
On Mon, Jun 06, 2022 at 12:11:52AM +0800, Tina Zhang wrote:
> Map/unmap_pags() allows map and unmap multiple pages of the same size
> in one call, which can improve performance by reducing the numbers of
> vmexits. With map/unmap_pages() implemented, the prior map/unmap()
> callbacks are deprecated.
>
> Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
> ---
> drivers/iommu/virtio-iommu.c | 18 ++++++++++++------
> 1 file changed, 12 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index 25be4b822aa0..3c943dbd9fd0 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -788,11 +788,13 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
> return 0;
> }
>
> -static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> - phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
> +static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
> + phys_addr_t paddr, size_t pgsize, size_t pgcount,
> + int prot, gfp_t gfp, size_t *mapped)
> {
> int ret;
> u32 flags;
> + size_t size = pgsize * pgcount;
> u64 end = iova + size - 1;
> struct virtio_iommu_req_map map;
> struct viommu_domain *vdomain = to_viommu_domain(domain);
> @@ -823,17 +825,21 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
> ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
> if (ret)
> viommu_del_mappings(vdomain, iova, end);
> + else if (mapped)
> + *mapped = size;
>
> return ret;
> }
>
> -static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
> - size_t size, struct iommu_iotlb_gather *gather)
> +static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
> + size_t pgsize, size_t pgcount,
> + struct iommu_iotlb_gather *gather)
> {
> int ret = 0;
> size_t unmapped;
> struct virtio_iommu_req_unmap unmap;
> struct viommu_domain *vdomain = to_viommu_domain(domain);
> + size_t size = pgsize * pgcount;
>
> unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
> if (unmapped < size)
> @@ -1018,8 +1024,8 @@ static struct iommu_ops viommu_ops = {
> .owner = THIS_MODULE,
> .default_domain_ops = &(const struct iommu_domain_ops) {
> .attach_dev = viommu_attach_dev,
> - .map = viommu_map,
> - .unmap = viommu_unmap,
> + .map_pages = viommu_map_pages,
> + .unmap_pages = viommu_unmap_pages,
> .iova_to_phys = viommu_iova_to_phys,
> .iotlb_sync = viommu_iotlb_sync,
> .free = viommu_domain_free,
> --
> 2.34.1
>
^ permalink raw reply [flat|nested] 4+ messages in thread* Re: [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation
2022-07-15 7:45 ` Jean-Philippe Brucker
@ 2022-07-15 8:10 ` Joerg Roedel
0 siblings, 0 replies; 4+ messages in thread
From: Joerg Roedel @ 2022-07-15 8:10 UTC (permalink / raw)
To: Jean-Philippe Brucker; +Cc: Tina Zhang, iommu, will, kevin.tian, baolu.lu
On Fri, Jul 15, 2022 at 08:45:13AM +0100, Jean-Philippe Brucker wrote:
> On Mon, Jun 06, 2022 at 12:11:52AM +0800, Tina Zhang wrote:
> > Map/unmap_pags() allows map and unmap multiple pages of the same size
> > in one call, which can improve performance by reducing the numbers of
> > vmexits. With map/unmap_pages() implemented, the prior map/unmap()
> > callbacks are deprecated.
> >
> > Signed-off-by: Tina Zhang <tina.zhang@intel.com>
>
> Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Patch is now applied, Thanks Tina and Jean-Philippe.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-07-15 8:10 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2022-06-05 16:11 [PATCH v2] iommu/virtio: Add map/unmap_pages() callbacks implementation Tina Zhang
2022-07-15 7:17 ` Joerg Roedel
2022-07-15 7:45 ` Jean-Philippe Brucker
2022-07-15 8:10 ` Joerg Roedel
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox