From: Lu Baolu <baolu.lu@linux.intel.com>
To: Jason Gunthorpe <jgg@ziepe.ca>, Kevin Tian <kevin.tian@intel.com>,
Joerg Roedel <joro@8bytes.org>, Will Deacon <will@kernel.org>,
Robin Murphy <robin.murphy@arm.com>,
Jean-Philippe Brucker <jean-philippe@linaro.org>,
Nicolin Chen <nicolinc@nvidia.com>, Yi Liu <yi.l.liu@intel.com>,
Jacob Pan <jacob.jun.pan@linux.intel.com>
Cc: iommu@lists.linux.dev, linux-kselftest@vger.kernel.org,
virtualization@lists.linux-foundation.org,
linux-kernel@vger.kernel.org, Lu Baolu <baolu.lu@linux.intel.com>
Subject: [RFC PATCHES 13/17] iommufd: Add a timer for each iommufd fault data
Date: Tue, 30 May 2023 13:37:20 +0800 [thread overview]
Message-ID: <20230530053724.232765-14-baolu.lu@linux.intel.com> (raw)
In-Reply-To: <20230530053724.232765-1-baolu.lu@linux.intel.com>
In case that user space failed to read or respond the pending faults. As
the per-fault iommufd data will be possibly accessed in two different
contexts: user reading/responding and the timer expiring, add a reference
counter for each iommufd fault data and free the data only after all the
reference counters are released.
The page fault response timeout value is device-specific and indicates how
long the bus/device will wait for a response to a page fault request. The
timeout value is added to the per-device fault cookie. Ideally, it should
be calculated according to the platform configuration (PCI, ACPI, device
tree, etc.). This defines a default value of 1 second in case that no
platform opt-in is available. This default value is roughly estimated and
subject to be changed according to real use cases.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
drivers/iommu/iommufd/iommufd_private.h | 8 +++
drivers/iommu/iommufd/device.c | 3 +
drivers/iommu/iommufd/hw_pagetable.c | 80 +++++++++++++++++++++++--
3 files changed, 87 insertions(+), 4 deletions(-)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 0985e83a611f..f5b8a53044c4 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -249,9 +249,12 @@ struct hw_pgtable_fault {
struct iommufd_fault {
struct device *dev;
ioasid_t pasid;
+ struct iommufd_hw_pagetable *hwpt;
struct iommu_hwpt_pgfault fault;
/* List head at hw_pgtable_fault:deliver or response */
struct list_head item;
+ struct timer_list timer;
+ refcount_t users;
};
/*
@@ -336,6 +339,11 @@ struct iommufd_device {
struct iommufd_fault_cookie {
struct iommufd_device *idev;
+ /*
+ * The maximum number of milliseconds that a device will wait for a
+ * response to a page fault request.
+ */
+ unsigned long timeout;
};
static inline struct iommufd_device *
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 3408f1fc3e9f..6ad46638f4e1 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -374,6 +374,8 @@ static int iommufd_group_setup_msi(struct iommufd_group *igroup,
return 0;
}
+#define IOMMUFD_DEFAULT_IOPF_TIMEOUT 1000
+
static int iommufd_device_set_fault_cookie(struct iommufd_hw_pagetable *hwpt,
struct iommufd_device *idev,
ioasid_t pasid)
@@ -387,6 +389,7 @@ static int iommufd_device_set_fault_cookie(struct iommufd_hw_pagetable *hwpt,
if (!fcookie)
return -ENOMEM;
fcookie->idev = idev;
+ fcookie->timeout = IOMMUFD_DEFAULT_IOPF_TIMEOUT;
curr = iommu_set_device_fault_cookie(idev->dev, pasid, fcookie);
if (IS_ERR(curr)) {
diff --git a/drivers/iommu/iommufd/hw_pagetable.c b/drivers/iommu/iommufd/hw_pagetable.c
index c1f3ebdce796..8c441fd72e1f 100644
--- a/drivers/iommu/iommufd/hw_pagetable.c
+++ b/drivers/iommu/iommufd/hw_pagetable.c
@@ -6,6 +6,7 @@
#include <linux/eventfd.h>
#include <linux/file.h>
#include <linux/anon_inodes.h>
+#include <linux/timer.h>
#include <uapi/linux/iommufd.h>
#include "../iommu-priv.h"
@@ -396,6 +397,60 @@ static void iommufd_compose_fault_message(struct iommu_fault *fault,
hwpt_fault->private_data[1] = fault->prm.private_data[1];
}
+static void drain_iopf_fault(struct iommufd_fault *ifault)
+{
+ struct iommu_page_response resp = {
+ .version = IOMMU_PAGE_RESP_VERSION_1,
+ .pasid = ifault->fault.pasid,
+ .grpid = ifault->fault.grpid,
+ .code = IOMMU_PAGE_RESP_FAILURE,
+ };
+
+ if (!(ifault->fault.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ return;
+
+ if ((ifault->fault.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
+ (ifault->fault.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
+ resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+
+ iommu_page_response(ifault->dev, &resp);
+}
+
+static void iommufd_put_fault(struct iommufd_fault *ifault)
+{
+ if (!ifault)
+ return;
+
+ if (refcount_dec_and_test(&ifault->users))
+ kfree(ifault);
+}
+
+static int iommufd_fault_timer_teardown(struct iommufd_fault *ifault)
+{
+ int rc;
+
+ rc = timer_delete(&ifault->timer);
+ if (rc)
+ iommufd_put_fault(ifault);
+
+ return rc;
+}
+
+static void iopf_timer_func(struct timer_list *t)
+{
+ struct iommufd_fault *ifault = from_timer(ifault, t, timer);
+ struct hw_pgtable_fault *fault = ifault->hwpt->fault;
+
+ mutex_lock(&fault->mutex);
+ if (!list_empty(&ifault->item)) {
+ list_del_init(&ifault->item);
+ drain_iopf_fault(ifault);
+ }
+ mutex_unlock(&fault->mutex);
+
+ iommufd_put_fault(ifault);
+}
+
static enum iommu_page_response_code
iommufd_hw_pagetable_iopf_handler(struct iommu_fault *fault,
struct device *dev, void *data)
@@ -416,6 +471,10 @@ iommufd_hw_pagetable_iopf_handler(struct iommu_fault *fault,
iommufd_compose_fault_message(fault, &ifault->fault, cookie->idev->obj.id);
ifault->dev = dev;
ifault->pasid = fault->prm.pasid;
+ ifault->hwpt = hwpt;
+ refcount_set(&ifault->users, 2);
+ timer_setup(&ifault->timer, iopf_timer_func, 0);
+ mod_timer(&ifault->timer, jiffies + msecs_to_jiffies(cookie->timeout));
mutex_lock(&hwpt->fault->mutex);
list_add_tail(&ifault->item, &hwpt->fault->deliver);
@@ -443,10 +502,12 @@ static ssize_t hwpt_fault_fops_read(struct file *filep, char __user *buf,
break;
done += fault_size;
list_del_init(&ifault->item);
- if (ifault->fault.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)
+ if (ifault->fault.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE) {
list_add_tail(&ifault->item, &fault->response);
- else
- kfree(ifault);
+ } else {
+ iommufd_fault_timer_teardown(ifault);
+ iommufd_put_fault(ifault);
+ }
}
mutex_unlock(&fault->mutex);
@@ -526,6 +587,7 @@ int iommufd_hwpt_page_response(struct iommufd_ucmd *ucmd)
{
struct iommu_hwpt_page_response *cmd = ucmd->cmd;
struct iommu_page_response resp = {};
+ struct iommufd_fault *ifault = NULL;
struct iommufd_fault *curr, *next;
struct iommufd_hw_pagetable *hwpt;
struct iommufd_device *idev;
@@ -547,6 +609,7 @@ int iommufd_hwpt_page_response(struct iommufd_ucmd *ucmd)
if (curr->dev != idev->dev || curr->fault.grpid != cmd->grpid)
continue;
+ ifault = curr;
if ((cmd->flags & IOMMU_PGFAULT_FLAGS_PASID_VALID) &&
cmd->pasid != curr->fault.pasid)
break;
@@ -555,6 +618,15 @@ int iommufd_hwpt_page_response(struct iommufd_ucmd *ucmd)
!(cmd->flags & IOMMU_PGFAULT_FLAGS_PASID_VALID))
break;
+ /*
+ * The timer has expired if it was not pending. Leave the
+ * response to the timer function.
+ */
+ if (!iommufd_fault_timer_teardown(curr)) {
+ rc = -ETIMEDOUT;
+ break;
+ }
+
resp.version = IOMMU_PAGE_RESP_VERSION_1;
resp.pasid = cmd->pasid;
resp.grpid = cmd->grpid;
@@ -564,11 +636,11 @@ int iommufd_hwpt_page_response(struct iommufd_ucmd *ucmd)
rc = iommu_page_response(idev->dev, &resp);
list_del_init(&curr->item);
- kfree(curr);
break;
}
mutex_unlock(&hwpt->fault->mutex);
+ iommufd_put_fault(ifault);
iommufd_put_object(&idev->obj);
out_put_hwpt:
iommufd_put_object(&hwpt->obj);
--
2.34.1
next prev parent reply other threads:[~2023-05-30 5:41 UTC|newest]
Thread overview: 37+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-30 5:37 [RFC PATCHES 00/17] IOMMUFD: Deliver IO page faults to user space Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 01/17] iommu: Move iommu fault data to linux/iommu.h Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 02/17] iommu: Support asynchronous I/O page fault response Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 03/17] iommu: Add helper to set iopf handler for domain Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 04/17] iommu: Pass device parameter to iopf handler Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 05/17] iommu: Split IO page fault handling from SVA Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 06/17] iommu: Add iommu page fault cookie helpers Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 07/17] iommufd: Add iommu page fault data Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 08/17] iommufd: IO page fault delivery initialization and release Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 09/17] iommufd: Add iommufd hwpt iopf handler Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 10/17] iommufd: Add IOMMU_HWPT_ALLOC_FLAGS_USER_PASID_TABLE for hwpt_alloc Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 11/17] iommufd: Deliver fault messages to user space Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 12/17] iommufd: Add io page fault response support Lu Baolu
2023-05-30 5:37 ` Lu Baolu [this message]
2023-05-30 5:37 ` [RFC PATCHES 14/17] iommufd: Drain all pending faults when destroying hwpt Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 15/17] iommufd: Allow new hwpt_alloc flags Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 16/17] iommufd/selftest: Add IOPF feature for mock devices Lu Baolu
2023-05-30 5:37 ` [RFC PATCHES 17/17] iommufd/selftest: Cover iopf-capable nested hwpt Lu Baolu
2023-05-30 18:50 ` [RFC PATCHES 00/17] IOMMUFD: Deliver IO page faults to user space Nicolin Chen
2023-05-31 2:10 ` Baolu Lu
2023-05-31 4:12 ` Nicolin Chen
2023-06-25 6:30 ` Baolu Lu
2023-06-25 19:21 ` Nicolin Chen
2023-06-26 3:10 ` Baolu Lu
2023-06-26 18:02 ` Nicolin Chen
2023-06-26 18:33 ` Jason Gunthorpe
2023-06-28 2:00 ` Baolu Lu
2023-06-28 12:49 ` Jason Gunthorpe
2023-06-29 1:07 ` Baolu Lu
2023-05-31 0:33 ` Jason Gunthorpe
2023-05-31 3:17 ` Baolu Lu
2023-06-23 6:18 ` Baolu Lu
2023-06-23 13:50 ` Jason Gunthorpe
2023-06-16 11:32 ` Jean-Philippe Brucker
2023-06-19 3:35 ` Baolu Lu
2023-06-26 9:51 ` Jean-Philippe Brucker
2023-06-19 12:58 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230530053724.232765-14-baolu.lu@linux.intel.com \
--to=baolu.lu@linux.intel.com \
--cc=iommu@lists.linux.dev \
--cc=jacob.jun.pan@linux.intel.com \
--cc=jean-philippe@linaro.org \
--cc=jgg@ziepe.ca \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-kselftest@vger.kernel.org \
--cc=nicolinc@nvidia.com \
--cc=robin.murphy@arm.com \
--cc=virtualization@lists.linux-foundation.org \
--cc=will@kernel.org \
--cc=yi.l.liu@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox