From: Lu Baolu <baolu.lu@linux.intel.com>
To: Joerg Roedel <joro@8bytes.org>
Cc: kevin.tian@intel.com, ashok.raj@intel.com,
linux-kernel@vger.kernel.org, iommu@lists.linux-foundation.org
Subject: [PATCH v2 5/7] iommu/vt-d: Save prq descriptors in an internal list
Date: Wed, 15 Apr 2020 13:25:40 +0800 [thread overview]
Message-ID: <20200415052542.30421-6-baolu.lu@linux.intel.com> (raw)
In-Reply-To: <20200415052542.30421-1-baolu.lu@linux.intel.com>
Currently, the page request interrupt thread handles the page
requests in the queue in this way:
- Clear PPR bit to ensure new interrupt could come in;
- Read and record the head and tail registers;
- Handle all descriptors between head and tail;
- Write tail to head register.
This might cause some descriptors to be handles multiple times.
An example sequence:
- Thread A got scheduled with PRQ_1 and PRQ_2 in the queue;
- Thread A clear the PPR bit and record the head and tail;
- A new PRQ_3 comes and Thread B gets scheduled;
- Thread B record the head and tail which includes PRQ_1
and PRQ_2.
As the result, PRQ_1 and PRQ_2 are handled twice in Thread_A and
Thread_B.
Thread_A Thread_B
.--------. .--------.
| | | |
.--------. .--------.
head| PRQ_1 | head| PRQ_1 |
.--------. .--------.
| PRQ_2 | | PRQ_2 |
.--------. .--------.
tail| | | PRQ_3 |
.--------. .--------.
| | tail| |
'--------' '--------'
To avoid this, probably, we need to apply a spinlock to ensure
that PRQs are handled in a serialized way. But that means the
intel_svm_process_prq() will be called with a spinlock held.
This causes extra complexities in intel_svm_process_prq().
This aims to make PRQ descriptors to be handled in a serialized
way while remove the requirement of holding the spin lock in
intel_svm_process_prq() by saving the descriptors in a list.
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
drivers/iommu/intel-svm.c | 58 ++++++++++++++++++++++++++++++-------
include/linux/intel-iommu.h | 2 ++
2 files changed, 49 insertions(+), 11 deletions(-)
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index a1921b462783..05aeb8ea51c4 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -50,6 +50,8 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
return ret;
}
iommu->pr_irq = irq;
+ INIT_LIST_HEAD(&iommu->prq_list);
+ spin_lock_init(&iommu->prq_lock);
snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
@@ -698,6 +700,14 @@ struct page_req_dsc {
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
+struct page_req {
+ struct list_head list;
+ struct page_req_dsc desc;
+ unsigned int processing:1;
+ unsigned int drained:1;
+ unsigned int completed:1;
+};
+
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
{
unsigned long requested = 0;
@@ -842,34 +852,60 @@ static void process_single_prq(struct intel_iommu *iommu,
}
}
-static void intel_svm_process_prq(struct intel_iommu *iommu,
- struct page_req_dsc *prq,
- int head, int tail)
+static void intel_svm_process_prq(struct intel_iommu *iommu)
{
- struct page_req_dsc *req;
-
- while (head != tail) {
- req = &iommu->prq[head / sizeof(*req)];
- process_single_prq(iommu, req);
- head = (head + sizeof(*req)) & PRQ_RING_MASK;
+ struct page_req *req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iommu->prq_lock, flags);
+ while (!list_empty(&iommu->prq_list)) {
+ req = list_first_entry(&iommu->prq_list, struct page_req, list);
+ if (!req->processing) {
+ req->processing = true;
+ spin_unlock_irqrestore(&iommu->prq_lock, flags);
+ process_single_prq(iommu, &req->desc);
+ spin_lock_irqsave(&iommu->prq_lock, flags);
+ req->completed = true;
+ } else if (req->completed) {
+ list_del(&req->list);
+ kfree(req);
+ } else {
+ break;
+ }
}
+ spin_unlock_irqrestore(&iommu->prq_lock, flags);
}
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_iommu *iommu = d;
+ unsigned long flags;
int head, tail;
+ spin_lock_irqsave(&iommu->prq_lock, flags);
/*
* Clear PPR bit before reading head/tail registers, to
* ensure that we get a new interrupt if needed.
*/
writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
-
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
- intel_svm_process_prq(iommu, iommu->prq, head, tail);
+ while (head != tail) {
+ struct page_req_dsc *dsc;
+ struct page_req *req;
+
+ dsc = &iommu->prq[head / sizeof(*dsc)];
+ req = kzalloc(sizeof (*req), GFP_ATOMIC);
+ if (!req)
+ break;
+ req->desc = *dsc;
+ list_add_tail(&req->list, &iommu->prq_list);
+ head = (head + sizeof(*dsc)) & PRQ_RING_MASK;
+ }
dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
+ spin_unlock_irqrestore(&iommu->prq_lock, flags);
+
+ intel_svm_process_prq(iommu);
return IRQ_RETVAL(1);
}
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index cca1e5f9aeaa..80715a59491c 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -590,6 +590,8 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU_SVM
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
+ struct list_head prq_list; /* pending page request list */
+ spinlock_t prq_lock; /* protect above pending list */
struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
#endif
struct q_inval *qi; /* Queued invalidation info */
--
2.17.1
_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu
next prev parent reply other threads:[~2020-04-15 5:29 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-15 5:25 [PATCH v2 0/7] iommu/vt-d: Add page request draining support Lu Baolu
2020-04-15 5:25 ` [PATCH v2 1/7] iommu/vt-d: Refactor parameters for qi_submit_sync() Lu Baolu
2020-04-15 8:02 ` Tian, Kevin
2020-04-15 8:33 ` Lu Baolu
2020-04-15 5:25 ` [PATCH v2 2/7] iommu/vt-d: Multiple descriptors per qi_submit_sync() Lu Baolu
2020-04-15 8:18 ` Tian, Kevin
2020-04-15 8:30 ` Lu Baolu
2020-04-15 8:51 ` Tian, Kevin
2020-04-15 5:25 ` [PATCH v2 3/7] iommu/vt-d: debugfs: Add support to show inv queue internals Lu Baolu
2020-04-15 5:25 ` [PATCH v2 4/7] iommu/vt-d: Refactor prq_event_thread() Lu Baolu
2020-04-15 9:15 ` Tian, Kevin
2020-04-16 1:33 ` Lu Baolu
2020-04-15 5:25 ` Lu Baolu [this message]
2020-04-15 9:30 ` [PATCH v2 5/7] iommu/vt-d: Save prq descriptors in an internal list Tian, Kevin
2020-04-16 1:46 ` Lu Baolu
2020-04-17 3:25 ` Lu Baolu
2020-04-15 5:25 ` [PATCH v2 6/7] iommu/vt-d: Add page request draining support Lu Baolu
2020-04-15 11:10 ` Tian, Kevin
2020-04-16 2:19 ` Lu Baolu
2020-04-16 8:38 ` Lu Baolu
2020-04-17 2:27 ` Tian, Kevin
2020-04-15 5:25 ` [PATCH v2 7/7] iommu/vt-d: Remove redundant IOTLB flush Lu Baolu
2020-04-15 7:57 ` [PATCH v2 0/7] iommu/vt-d: Add page request draining support Tian, Kevin
2020-04-15 8:25 ` Lu Baolu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200415052542.30421-6-baolu.lu@linux.intel.com \
--to=baolu.lu@linux.intel.com \
--cc=ashok.raj@intel.com \
--cc=iommu@lists.linux-foundation.org \
--cc=joro@8bytes.org \
--cc=kevin.tian@intel.com \
--cc=linux-kernel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).