From: Jacob Pan <jacob.pan@linux.microsoft.com>
To: Jason Gunthorpe <jgg@nvidia.com>
Cc: iommu@lists.linux.dev,
Jean-Philippe Brucker <jean-philippe@linaro.org>,
Joerg Roedel <joro@8bytes.org>,
Robin Murphy <robin.murphy@arm.com>,
virtualization@lists.linux.dev, Will Deacon <will@kernel.org>,
Eric Auger <eric.auger@redhat.com>,
patches@lists.linux.dev, jacob.pan@linux.microsoft.com
Subject: Re: [PATCH 3/5] iommu/virtio: Move to domain_alloc_paging()
Date: Wed, 12 Feb 2025 11:22:35 -0800 [thread overview]
Message-ID: <20250212112235.714b0a14@DESKTOP-0403QTC.> (raw)
In-Reply-To: <3-v1-91eed9c8014a+53a37-iommu_virtio_domains_jgg@nvidia.com>
Hi Jason,
On Fri, 7 Feb 2025 10:46:03 -0400
Jason Gunthorpe <jgg@nvidia.com> wrote:
> virtio has the complication that it sometimes wants to return a paging
> domain for IDENTITY which makes this conversion a little different
> than other drivers.
>
> Add a viommu_domain_alloc_paging() that combines
> viommu_domain_alloc() and viommu_domain_finalise() to always return a
> fully initialized and finalized paging domain.
>
Slightly off the topic, still related to paging domain.
virtio spec for page table extension was introduced a while ago[1],
which adds support for guest owned page tables for paging domains.
Do you foresee the implementation can leverage your generic iommu_pt
work? i.e. for building guest IO page tables. It will add a new flavor
(page table based in addition to map/unmap) to
viommu_domain_alloc_paging() i think.
[1]
https://lore.kernel.org/all/20231006160947.2227396-8-jean-philippe@linaro.org/T/
> Use viommu_domain_alloc_identity() to implement the special non-bypass
> IDENTITY flow by calling viommu_domain_alloc_paging() then
> viommu_domain_map_identity().
>
> Remove support for deferred finalize and the vdomain->mutex.
>
> Remove core support for domain_alloc() IDENTITY as virtio was the last
> driver using it.
>
> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
> ---
> drivers/iommu/iommu.c | 6 --
> drivers/iommu/virtio-iommu.c | 114
> ++++++++++++++++------------------- 2 files changed, 53
> insertions(+), 67 deletions(-)
>
> diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
> index ee33d26dfcd40d..73a05b34de4768 100644
> --- a/drivers/iommu/iommu.c
> +++ b/drivers/iommu/iommu.c
> @@ -1599,12 +1599,6 @@ static struct iommu_domain
> *__iommu_alloc_identity_domain(struct device *dev) domain =
> ops->domain_alloc_identity(dev); if (IS_ERR(domain))
> return domain;
> - } else if (ops->domain_alloc) {
> - domain = ops->domain_alloc(IOMMU_DOMAIN_IDENTITY);
> - if (!domain)
> - return ERR_PTR(-ENOMEM);
> - if (IS_ERR(domain))
> - return domain;
> } else {
> return ERR_PTR(-EOPNOTSUPP);
> }
> diff --git a/drivers/iommu/virtio-iommu.c
> b/drivers/iommu/virtio-iommu.c index c71a996760bddb..79b471c03b6ee4
> 100644 --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -63,7 +63,6 @@ struct viommu_mapping {
> struct viommu_domain {
> struct iommu_domain domain;
> struct viommu_dev *viommu;
> - struct mutex mutex; /* protects
> viommu pointer */ unsigned int id;
> u32 map_flags;
>
> @@ -97,6 +96,8 @@ struct viommu_event {
> };
> };
>
> +static struct viommu_domain viommu_identity_domain;
> +
> #define to_viommu_domain(domain) \
> container_of(domain, struct viommu_domain, domain)
>
> @@ -653,65 +654,45 @@ static void viommu_event_handler(struct
> virtqueue *vq)
> /* IOMMU API */
>
> -static struct iommu_domain *viommu_domain_alloc(unsigned type)
> +static struct iommu_domain *viommu_domain_alloc_paging(struct device
> *dev) {
> - struct viommu_domain *vdomain;
> -
> - if (type != IOMMU_DOMAIN_UNMANAGED &&
> - type != IOMMU_DOMAIN_DMA &&
> - type != IOMMU_DOMAIN_IDENTITY)
> - return NULL;
> -
> - vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
> - if (!vdomain)
> - return NULL;
> -
> - mutex_init(&vdomain->mutex);
> - spin_lock_init(&vdomain->mappings_lock);
> - vdomain->mappings = RB_ROOT_CACHED;
> -
> - return &vdomain->domain;
> -}
> -
> -static int viommu_domain_finalise(struct viommu_endpoint *vdev,
> - struct iommu_domain *domain)
> -{
> - int ret;
> - unsigned long viommu_page_size;
> + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
> struct viommu_dev *viommu = vdev->viommu;
> - struct viommu_domain *vdomain = to_viommu_domain(domain);
> + unsigned long viommu_page_size;
> + struct viommu_domain *vdomain;
> + int ret;
>
> viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
> if (viommu_page_size > PAGE_SIZE) {
> dev_err(vdev->dev,
> "granule 0x%lx larger than system page size
> 0x%lx\n", viommu_page_size, PAGE_SIZE);
> - return -ENODEV;
> + return ERR_PTR(-ENODEV);
> }
>
> + vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
> + if (!vdomain)
> + return ERR_PTR(-ENOMEM);
> +
> + spin_lock_init(&vdomain->mappings_lock);
> + vdomain->mappings = RB_ROOT_CACHED;
> +
> ret = ida_alloc_range(&viommu->domain_ids,
> viommu->first_domain, viommu->last_domain, GFP_KERNEL);
> - if (ret < 0)
> - return ret;
> -
> - vdomain->id = (unsigned int)ret;
> -
> - domain->pgsize_bitmap = viommu->pgsize_bitmap;
> - domain->geometry = viommu->geometry;
> -
> - vdomain->map_flags = viommu->map_flags;
> - vdomain->viommu = viommu;
> -
> - if (domain->type == IOMMU_DOMAIN_IDENTITY) {
> - ret = viommu_domain_map_identity(vdev, vdomain);
> - if (ret) {
> - ida_free(&viommu->domain_ids, vdomain->id);
> - vdomain->viommu = NULL;
> - return ret;
> - }
> + if (ret < 0) {
> + kfree(vdomain);
> + return ERR_PTR(ret);
> }
>
> - return 0;
> + vdomain->id = (unsigned int)ret;
> +
> + vdomain->domain.pgsize_bitmap = viommu->pgsize_bitmap;
> + vdomain->domain.geometry = viommu->geometry;
> +
> + vdomain->map_flags = viommu->map_flags;
> + vdomain->viommu = viommu;
> +
> + return &vdomain->domain;
> }
>
> static void viommu_domain_free(struct iommu_domain *domain)
> @@ -727,6 +708,28 @@ static void viommu_domain_free(struct
> iommu_domain *domain) kfree(vdomain);
> }
>
> +static struct iommu_domain *viommu_domain_alloc_identity(struct
> device *dev) +{
> + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
> + struct iommu_domain *domain;
> + int ret;
> +
> + if (virtio_has_feature(vdev->viommu->vdev,
> + VIRTIO_IOMMU_F_BYPASS_CONFIG))
> + return &viommu_identity_domain.domain;
> +
> + domain = viommu_domain_alloc_paging(dev);
> + if (IS_ERR(domain))
> + return domain;
> +
> + ret = viommu_domain_map_identity(vdev,
> to_viommu_domain(domain));
> + if (ret) {
> + viommu_domain_free(domain);
> + return ERR_PTR(ret);
> + }
> + return domain;
> +}
> +
> static int viommu_attach_dev(struct iommu_domain *domain, struct
> device *dev) {
> int ret = 0;
> @@ -734,20 +737,8 @@ static int viommu_attach_dev(struct iommu_domain
> *domain, struct device *dev) struct viommu_endpoint *vdev =
> dev_iommu_priv_get(dev); struct viommu_domain *vdomain =
> to_viommu_domain(domain);
> - mutex_lock(&vdomain->mutex);
> - if (!vdomain->viommu) {
> - /*
> - * Properly initialize the domain now that we know
> which viommu
> - * owns it.
> - */
> - ret = viommu_domain_finalise(vdev, domain);
> - } else if (vdomain->viommu != vdev->viommu) {
> - ret = -EINVAL;
> - }
> - mutex_unlock(&vdomain->mutex);
> -
> - if (ret)
> - return ret;
> + if (vdomain->viommu != vdev->viommu)
> + return -EINVAL;
>
> /*
> * In the virtio-iommu device, when attaching the endpoint
> to a new @@ -1098,7 +1089,8 @@ static bool viommu_capable(struct
> device *dev, enum iommu_cap cap) static struct iommu_ops viommu_ops =
> { .identity_domain = &viommu_identity_domain.domain,
> .capable = viommu_capable,
> - .domain_alloc = viommu_domain_alloc,
> + .domain_alloc_identity = viommu_domain_alloc_identity,
> + .domain_alloc_paging = viommu_domain_alloc_paging,
> .probe_device = viommu_probe_device,
> .release_device = viommu_release_device,
> .device_group = viommu_device_group,
next prev parent reply other threads:[~2025-02-12 19:22 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-02-07 14:46 [PATCH 0/5] Convert virtio-iommu to domain_alloc_paging() Jason Gunthorpe
2025-02-07 14:46 ` [PATCH 1/5] iommu/virtio: Break out bypass identity support into a global static Jason Gunthorpe
2025-02-12 0:43 ` Jacob Pan
2025-02-18 18:21 ` Jason Gunthorpe
2025-02-21 11:35 ` Jean-Philippe Brucker
2025-02-24 19:37 ` Jason Gunthorpe
2025-02-07 14:46 ` [PATCH 2/5] iommu: Add domain_alloc_identity() Jason Gunthorpe
2025-02-12 13:56 ` Robin Murphy
2025-02-12 14:03 ` Jason Gunthorpe
2025-02-12 14:16 ` Robin Murphy
2025-02-12 14:45 ` Jason Gunthorpe
2025-02-07 14:46 ` [PATCH 3/5] iommu/virtio: Move to domain_alloc_paging() Jason Gunthorpe
2025-02-12 19:22 ` Jacob Pan [this message]
2025-02-12 23:30 ` Jason Gunthorpe
2025-02-13 5:47 ` Jacob Pan
2025-02-18 20:01 ` Jason Gunthorpe
[not found] ` <67ad876d.170a0220.3c21dc.85ceSMTPIN_ADDED_BROKEN@mx.google.com>
2025-02-13 9:46 ` Jean-Philippe Brucker
2025-02-13 17:03 ` Yu Zhang
2025-02-13 18:09 ` Jean-Philippe Brucker
2025-02-19 9:39 ` Yu Zhang
2025-02-19 10:35 ` Jean-Philippe Brucker
2025-02-19 11:11 ` Yu Zhang
2025-02-19 11:57 ` Jean-Philippe Brucker
2025-02-19 13:10 ` Yi Liu
2025-02-20 2:58 ` Baolu Lu
2025-02-20 3:44 ` Yu Zhang
2025-02-07 14:46 ` [PATCH 4/5] iommu: Do not call domain_alloc() in iommu_sva_domain_alloc() Jason Gunthorpe
2025-02-07 14:46 ` [PATCH 5/5] iommu: Hide ops.domain_alloc behind CONFIG_FSL_PAMU Jason Gunthorpe
2025-02-12 0:41 ` [PATCH 0/5] Convert virtio-iommu to domain_alloc_paging() Jacob Pan
2025-02-12 12:50 ` Jason Gunthorpe
2025-02-12 18:50 ` Jacob Pan
2025-02-12 20:10 ` Robin Murphy
2025-02-21 11:42 ` Jean-Philippe Brucker
2025-02-24 19:39 ` Jason Gunthorpe
[not found] ` <67abee53.170a0220.154671.ae28SMTPIN_ADDED_BROKEN@mx.google.com>
2025-02-12 11:58 ` Jean-Philippe Brucker
2025-02-12 17:05 ` Jacob Pan
[not found] ` <67acd4e2.630a0220.365aab.e098SMTPIN_ADDED_BROKEN@mx.google.com>
2025-02-12 19:16 ` Jean-Philippe Brucker
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250212112235.714b0a14@DESKTOP-0403QTC. \
--to=jacob.pan@linux.microsoft.com \
--cc=eric.auger@redhat.com \
--cc=iommu@lists.linux.dev \
--cc=jean-philippe@linaro.org \
--cc=jgg@nvidia.com \
--cc=joro@8bytes.org \
--cc=patches@lists.linux.dev \
--cc=robin.murphy@arm.com \
--cc=virtualization@lists.linux.dev \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox