public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Samiullah Khawaja <skhawaja@google.com>
To: Jacob Pan <jacob.pan@linux.microsoft.com>
Cc: linux-kernel@vger.kernel.org,
	 "iommu@lists.linux.dev" <iommu@lists.linux.dev>,
	Jason Gunthorpe <jgg@nvidia.com>,
	 Alex Williamson <alex@shazbot.org>,
	Joerg Roedel <joro@8bytes.org>,
	 David Matlack <dmatlack@google.com>,
	Robin Murphy <robin.murphy@arm.com>,
	 Nicolin Chen <nicolinc@nvidia.com>,
	"Tian, Kevin" <kevin.tian@intel.com>,
	 Yi Liu <yi.l.liu@intel.com>,
	pasha.tatashin@soleen.com, Will Deacon <will@kernel.org>,
	 Baolu Lu <baolu.lu@linux.intel.com>
Subject: Re: [PATCH V2 01/11] iommufd: Support a HWPT without an iommu driver for noiommu
Date: Wed, 18 Mar 2026 18:38:14 +0000	[thread overview]
Message-ID: <abrvYSZ425p6a17D@google.com> (raw)
In-Reply-To: <20260312155637.376854-2-jacob.pan@linux.microsoft.com>

On Thu, Mar 12, 2026 at 08:56:27AM -0700, Jacob Pan wrote:
>From: Jason Gunthorpe <jgg@nvidia.com>
>
>Create just a little part of a real iommu driver, enough to
>slot in under the dev_iommu_ops() and allow iommufd to call
>domain_alloc_paging_flags() and fail everything else.
>
>This allows explicitly creating a HWPT under an IOAS.
>
>Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
>Signed-off-by: Jacob Pan <jacob.pan@linux.microsoft.com>
>---
> drivers/iommu/iommufd/Makefile          |  1 +
> drivers/iommu/iommufd/hw_pagetable.c    | 11 ++-
> drivers/iommu/iommufd/hwpt_noiommu.c    | 91 +++++++++++++++++++++++++
> drivers/iommu/iommufd/iommufd_private.h |  2 +
> 4 files changed, 103 insertions(+), 2 deletions(-)
> create mode 100644 drivers/iommu/iommufd/hwpt_noiommu.c
>
>diff --git a/drivers/iommu/iommufd/Makefile b/drivers/iommu/iommufd/Makefile
>index 71d692c9a8f4..2b1a020b14a6 100644
>--- a/drivers/iommu/iommufd/Makefile
>+++ b/drivers/iommu/iommufd/Makefile
>@@ -10,6 +10,7 @@ iommufd-y := \
> 	vfio_compat.o \
> 	viommu.o
>
>+iommufd-$(CONFIG_VFIO_NOIOMMU) += hwpt_noiommu.o
> iommufd-$(CONFIG_IOMMUFD_TEST) += selftest.o
>
> obj-$(CONFIG_IOMMUFD) += iommufd.o
>diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
>index fe789c2dc0c9..37316d77277d 100644
>--- a/drivers/iommu/iommufd/hw_pagetable.c
>+++ b/drivers/iommu/iommufd/hw_pagetable.c
>@@ -8,6 +8,13 @@
> #include "../iommu-priv.h"
> #include "iommufd_private.h"
>
>+static const struct iommu_ops *get_iommu_ops(struct iommufd_device *idev)
>+{
>+	if (IS_ENABLED(CONFIG_VFIO_NOIOMMU) && !idev->igroup->group)
>+		return &iommufd_noiommu_ops;
>+	return dev_iommu_ops(idev->dev);
>+}
>+
> static void __iommufd_hwpt_destroy(struct iommufd_hw_pagetable *hwpt)
> {
> 	if (hwpt->domain)
>@@ -114,7 +121,7 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
> 				IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
> 				IOMMU_HWPT_FAULT_ID_VALID |
> 				IOMMU_HWPT_ALLOC_PASID;
>-	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
>+	const struct iommu_ops *ops = get_iommu_ops(idev);
> 	struct iommufd_hwpt_paging *hwpt_paging;
> 	struct iommufd_hw_pagetable *hwpt;
> 	int rc;
>@@ -229,7 +236,7 @@ iommufd_hwpt_nested_alloc(struct iommufd_ctx *ictx,
> 			  struct iommufd_device *idev, u32 flags,
> 			  const struct iommu_user_data *user_data)
> {
>-	const struct iommu_ops *ops = dev_iommu_ops(idev->dev);
>+	const struct iommu_ops *ops = get_iommu_ops(idev);
> 	struct iommufd_hwpt_nested *hwpt_nested;
> 	struct iommufd_hw_pagetable *hwpt;
> 	int rc;
>diff --git a/drivers/iommu/iommufd/hwpt_noiommu.c b/drivers/iommu/iommufd/hwpt_noiommu.c
>new file mode 100644
>index 000000000000..0aa99f581ca3
>--- /dev/null
>+++ b/drivers/iommu/iommufd/hwpt_noiommu.c
>@@ -0,0 +1,91 @@
>+// SPDX-License-Identifier: GPL-2.0-only
>+/*
>+ * Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES
>+ */
>+#include <linux/iommu.h>
>+#include <linux/generic_pt/iommu.h>
>+#include "iommufd_private.h"
>+
>+static const struct iommu_domain_ops noiommu_amdv1_ops;
>+
>+struct noiommu_domain {
>+	union {
>+		struct iommu_domain domain;
>+		struct pt_iommu_amdv1 amdv1;
>+	};
>+	spinlock_t lock;
>+};
>+PT_IOMMU_CHECK_DOMAIN(struct noiommu_domain, amdv1.iommu, domain);
>+
>+static void noiommu_change_top(struct pt_iommu *iommu_table,
>+			       phys_addr_t top_paddr, unsigned int top_level)
>+{
>+}
>+
>+static spinlock_t *noiommu_get_top_lock(struct pt_iommu *iommupt)
>+{
>+	struct noiommu_domain *domain =
>+		container_of(iommupt, struct noiommu_domain, amdv1.iommu);
>+
>+	return &domain->lock;
>+}
>+
>+static const struct pt_iommu_driver_ops noiommu_driver_ops = {
>+	.get_top_lock = noiommu_get_top_lock,
>+	.change_top = noiommu_change_top,
>+};
>+
>+static struct iommu_domain *
>+noiommu_alloc_paging_flags(struct device *dev, u32 flags,
>+			   const struct iommu_user_data *user_data)
>+{
>+	struct pt_iommu_amdv1_cfg cfg = {};
>+	struct noiommu_domain *dom;
>+	int rc;
>+
>+	if (flags || user_data)
>+		return ERR_PTR(-EOPNOTSUPP);
>+
>+	cfg.common.hw_max_vasz_lg2 = 64;
>+	cfg.common.hw_max_oasz_lg2 = 52;
>+	cfg.starting_level = 2;
>+	cfg.common.features =
>+		(BIT(PT_FEAT_DYNAMIC_TOP) | BIT(PT_FEAT_AMDV1_ENCRYPT_TABLES) |
>+		 BIT(PT_FEAT_AMDV1_FORCE_COHERENCE));
>+
>+	dom = kzalloc(sizeof(*dom), GFP_KERNEL);
>+	if (!dom)
>+		return ERR_PTR(-ENOMEM);
>+
>+	spin_lock_init(&dom->lock);
>+	dom->amdv1.iommu.nid = NUMA_NO_NODE;
>+	dom->amdv1.iommu.driver_ops = &noiommu_driver_ops;
>+	dom->domain.ops = &noiommu_amdv1_ops;
>+
>+	/* Use mock page table which is based on AMDV1 */
>+	rc = pt_iommu_amdv1_init(&dom->amdv1, &cfg, GFP_KERNEL);
>+	if (rc) {
>+		kfree(dom);
>+		return ERR_PTR(rc);
>+	}
>+
>+	return &dom->domain;
>+}
>+
>+static void noiommu_domain_free(struct iommu_domain *iommu_domain)
>+{
>+	struct noiommu_domain *domain =
>+		container_of(iommu_domain, struct noiommu_domain, domain);
>+
>+	pt_iommu_deinit(&domain->amdv1.iommu);
>+	kfree(domain);
>+}
>+
>+static const struct iommu_domain_ops noiommu_amdv1_ops = {
>+	IOMMU_PT_DOMAIN_OPS(amdv1),

I understand that this fits in really well into the iommufd/hwpt
construction, but do we need page tables for this as all the
iova-to-phys information should be available in the IOPT in IOAS? As the
get_pa() function introduced in the later patch is only used for noiommu
use-cases, it can use the IOPT to get the physical addresses?
>+	.free = noiommu_domain_free,
>+};
>+
>+struct iommu_ops iommufd_noiommu_ops = {
>+	.domain_alloc_paging_flags = noiommu_alloc_paging_flags,
>+};
>diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
>index 6ac1965199e9..9c18c5eb1899 100644
>--- a/drivers/iommu/iommufd/iommufd_private.h
>+++ b/drivers/iommu/iommufd/iommufd_private.h
>@@ -464,6 +464,8 @@ static inline void iommufd_hw_pagetable_put(struct iommufd_ctx *ictx,
> 	refcount_dec(&hwpt->obj.users);
> }
>
>+extern struct iommu_ops iommufd_noiommu_ops;
>+
> struct iommufd_attach;
>
> struct iommufd_group {
>-- 
>2.34.1
>

  reply	other threads:[~2026-03-18 18:38 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-03-12 15:56 [PATCH V2 00/11] iommufd: Enable noiommu mode for cdev Jacob Pan
2026-03-12 15:56 ` [PATCH V2 01/11] iommufd: Support a HWPT without an iommu driver for noiommu Jacob Pan
2026-03-18 18:38   ` Samiullah Khawaja [this message]
2026-03-23 13:17     ` Jason Gunthorpe
2026-03-24 17:42       ` Samiullah Khawaja
2026-03-22  9:24   ` Mostafa Saleh
2026-03-23 21:11     ` Jacob Pan
2026-03-23 22:10       ` Jason Gunthorpe
2026-03-12 15:56 ` [PATCH V2 02/11] iommufd: Move igroup allocation to a function Jacob Pan
2026-03-18 18:39   ` Samiullah Khawaja
2026-03-22  9:41   ` Mostafa Saleh
2026-03-23 22:51     ` Jacob Pan
2026-03-23 16:46   ` Samiullah Khawaja
2026-03-12 15:56 ` [PATCH V2 03/11] iommufd: Allow binding to a noiommu device Jacob Pan
2026-03-22  9:54   ` Mostafa Saleh
2026-03-23 13:20     ` Jason Gunthorpe
2026-03-24 19:13     ` Jacob Pan
2026-03-12 15:56 ` [PATCH V2 04/11] iommufd: Add an ioctl IOMMU_IOAS_GET_PA to query PA from IOVA Jacob Pan
2026-03-12 15:56 ` [PATCH V2 05/11] vfio: Allow null group for noiommu without containers Jacob Pan
2026-03-22  9:59   ` Mostafa Saleh
2026-03-23 13:21     ` Jason Gunthorpe
2026-03-12 15:56 ` [PATCH V2 06/11] vfio: Introduce and set noiommu flag on vfio_device Jacob Pan
2026-03-22 10:02   ` Mostafa Saleh
2026-03-12 15:56 ` [PATCH V2 07/11] vfio: Update noiommu device detection logic for cdev Jacob Pan
2026-03-22 10:04   ` Mostafa Saleh
2026-03-12 15:56 ` [PATCH V2 08/11] vfio: Enable cdev noiommu mode under iommufd Jacob Pan
2026-03-14  8:09   ` kernel test robot
2026-03-12 15:56 ` [PATCH V2 09/11] vfio:selftest: Handle VFIO noiommu cdev Jacob Pan
2026-03-12 15:56 ` [PATCH V2 10/11] selftests/vfio: Add iommufd noiommu mode selftest for cdev Jacob Pan
2026-03-12 15:56 ` [PATCH V2 11/11] Doc: Update VFIO NOIOMMU mode Jacob Pan
2026-03-13 17:48   ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=abrvYSZ425p6a17D@google.com \
    --to=skhawaja@google.com \
    --cc=alex@shazbot.org \
    --cc=baolu.lu@linux.intel.com \
    --cc=dmatlack@google.com \
    --cc=iommu@lists.linux.dev \
    --cc=jacob.pan@linux.microsoft.com \
    --cc=jgg@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=kevin.tian@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=nicolinc@nvidia.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=robin.murphy@arm.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox