From: Quan Xu <quan.xu@intel.com>
To: xen-devel@lists.xen.org
Cc: Quan Xu <quan.xu@intel.com>,
kevin.tian@intel.com, feng.wu@intel.com,
dario.faggioli@citrix.com, jbeulich@suse.com
Subject: [PATCH v10 2/3] vt-d: synchronize for Device-TLB flush one by one
Date: Fri, 22 Apr 2016 18:54:12 +0800 [thread overview]
Message-ID: <1461322453-29216-3-git-send-email-quan.xu@intel.com> (raw)
In-Reply-To: <1461322453-29216-1-git-send-email-quan.xu@intel.com>
Today we do Device-TLB flush synchronization after issuing flush
requests for all ATS devices belonging to a VM. Doing so however
imposes a limitation, i.e. that we can not figure out which flush
request is blocked in the flush queue list, based on VT-d spec.
To prepare correct Device-TLB flush timeout handling in next patch,
we change the behavior to synchronize for every Device-TLB flush
request. So the Device-TLB flush interface is changed a little bit,
by checking timeout within the function instead of outside of function.
Accordingly we also do a similar change for flush interfaces of
IOTLB/IEC/Context, i.e. moving synchronization into the function.
Since there is no user of a non-synced interface, we just rename
existing ones with _sync suffix.
Signed-off-by: Quan Xu <quan.xu@intel.com>
---
xen/drivers/passthrough/vtd/extern.h | 5 +--
xen/drivers/passthrough/vtd/qinval.c | 61 +++++++++++++++++++++--------------
xen/drivers/passthrough/vtd/x86/ats.c | 8 ++---
3 files changed, 43 insertions(+), 31 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/extern.h b/xen/drivers/passthrough/vtd/extern.h
index d4d37c3..ab7ecad 100644
--- a/xen/drivers/passthrough/vtd/extern.h
+++ b/xen/drivers/passthrough/vtd/extern.h
@@ -59,8 +59,9 @@ int ats_device(const struct pci_dev *, const struct acpi_drhd_unit *);
int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type);
-int qinval_device_iotlb(struct iommu *iommu,
- u32 max_invs_pend, u16 sid, u16 size, u64 addr);
+int __must_check qinval_device_iotlb_sync(struct iommu *iommu,
+ u32 max_invs_pend,
+ u16 sid, u16 size, u64 addr);
unsigned int get_cache_line_size(void);
void cacheline_flush(char *);
diff --git a/xen/drivers/passthrough/vtd/qinval.c b/xen/drivers/passthrough/vtd/qinval.c
index 52ba2c2..69cc6bf 100644
--- a/xen/drivers/passthrough/vtd/qinval.c
+++ b/xen/drivers/passthrough/vtd/qinval.c
@@ -33,6 +33,8 @@ integer_param("vtd_qi_timeout", vtd_qi_timeout);
#define IOMMU_QI_TIMEOUT (vtd_qi_timeout * MILLISECS(1))
+static int invalidate_sync(struct iommu *iommu);
+
static void print_qi_regs(struct iommu *iommu)
{
u64 val;
@@ -72,8 +74,10 @@ static void qinval_update_qtail(struct iommu *iommu, unsigned int index)
dmar_writeq(iommu->reg, DMAR_IQT_REG, (val << QINVAL_INDEX_SHIFT));
}
-static void queue_invalidate_context(struct iommu *iommu,
- u16 did, u16 source_id, u8 function_mask, u8 granu)
+static int __must_check queue_invalidate_context_sync(struct iommu *iommu,
+ u16 did, u16 source_id,
+ u8 function_mask,
+ u8 granu)
{
unsigned long flags;
unsigned int index;
@@ -100,10 +104,14 @@ static void queue_invalidate_context(struct iommu *iommu,
spin_unlock_irqrestore(&iommu->register_lock, flags);
unmap_vtd_domain_page(qinval_entries);
+
+ return invalidate_sync(iommu);
}
-static void queue_invalidate_iotlb(struct iommu *iommu,
- u8 granu, u8 dr, u8 dw, u16 did, u8 am, u8 ih, u64 addr)
+static int __must_check queue_invalidate_iotlb_sync(struct iommu *iommu,
+ u8 granu, u8 dr, u8 dw,
+ u16 did, u8 am, u8 ih,
+ u64 addr)
{
unsigned long flags;
unsigned int index;
@@ -133,10 +141,12 @@ static void queue_invalidate_iotlb(struct iommu *iommu,
unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);
+
+ return invalidate_sync(iommu);
}
static int __must_check queue_invalidate_wait(struct iommu *iommu,
- u8 iflag, u8 sw, u8 fn)
+ u8 iflag, u8 sw, u8 fn)
{
s_time_t timeout;
volatile u32 poll_slot = QINVAL_STAT_INIT;
@@ -196,8 +206,10 @@ static int invalidate_sync(struct iommu *iommu)
return 0;
}
-int qinval_device_iotlb(struct iommu *iommu,
- u32 max_invs_pend, u16 sid, u16 size, u64 addr)
+int __must_check qinval_device_iotlb_sync(struct iommu *iommu,
+ u32 max_invs_pend,
+ u16 sid, u16 size,
+ u64 addr)
{
unsigned long flags;
unsigned int index;
@@ -226,15 +238,17 @@ int qinval_device_iotlb(struct iommu *iommu,
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);
- return 0;
+ return invalidate_sync(iommu);
}
-static void queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
+static int __must_check queue_invalidate_iec_sync(struct iommu *iommu,
+ u8 granu, u8 im, u16 iidx)
{
unsigned long flags;
unsigned int index;
u64 entry_base;
struct qinval_entry *qinval_entry, *qinval_entries;
+ int ret;
spin_lock_irqsave(&iommu->register_lock, flags);
index = qinval_next_index(iommu);
@@ -254,14 +268,9 @@ static void queue_invalidate_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
unmap_vtd_domain_page(qinval_entries);
qinval_update_qtail(iommu, index);
spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-static int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
-{
- int ret;
-
- queue_invalidate_iec(iommu, granu, im, iidx);
ret = invalidate_sync(iommu);
+
/*
* reading vt-d architecture register will ensure
* draining happens in implementation independent way.
@@ -273,12 +282,12 @@ static int __iommu_flush_iec(struct iommu *iommu, u8 granu, u8 im, u16 iidx)
int iommu_flush_iec_global(struct iommu *iommu)
{
- return __iommu_flush_iec(iommu, IEC_GLOBAL_INVL, 0, 0);
+ return queue_invalidate_iec_sync(iommu, IEC_GLOBAL_INVL, 0, 0);
}
int iommu_flush_iec_index(struct iommu *iommu, u8 im, u16 iidx)
{
- return __iommu_flush_iec(iommu, IEC_INDEX_INVL, im, iidx);
+ return queue_invalidate_iec_sync(iommu, IEC_INDEX_INVL, im, iidx);
}
static int flush_context_qi(
@@ -304,11 +313,9 @@ static int flush_context_qi(
}
if ( qi_ctrl->qinval_maddr != 0 )
- {
- queue_invalidate_context(iommu, did, sid, fm,
- type >> DMA_CCMD_INVL_GRANU_OFFSET);
- ret = invalidate_sync(iommu);
- }
+ ret = queue_invalidate_context_sync(iommu, did, sid, fm,
+ type >> DMA_CCMD_INVL_GRANU_OFFSET);
+
return ret;
}
@@ -346,9 +353,13 @@ static int flush_iotlb_qi(
if (cap_read_drain(iommu->cap))
dr = 1;
/* Need to conside the ih bit later */
- queue_invalidate_iotlb(iommu,
- type >> DMA_TLB_FLUSH_GRANU_OFFSET, dr,
- dw, did, size_order, 0, addr);
+ ret = queue_invalidate_iotlb_sync(iommu,
+ type >> DMA_TLB_FLUSH_GRANU_OFFSET,
+ dr, dw, did, size_order, 0, addr);
+
+ if ( ret )
+ return ret;
+
if ( flush_dev_iotlb )
ret = dev_invalidate_iotlb(iommu, did, addr, size_order, type);
rc = invalidate_sync(iommu);
diff --git a/xen/drivers/passthrough/vtd/x86/ats.c b/xen/drivers/passthrough/vtd/x86/ats.c
index 334b9c1..dfa4d30 100644
--- a/xen/drivers/passthrough/vtd/x86/ats.c
+++ b/xen/drivers/passthrough/vtd/x86/ats.c
@@ -134,8 +134,8 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
/* invalidate all translations: sbit=1,bit_63=0,bit[62:12]=1 */
sbit = 1;
addr = (~0UL << PAGE_SHIFT_4K) & 0x7FFFFFFFFFFFFFFF;
- rc = qinval_device_iotlb(iommu, pdev->ats_queue_depth,
- sid, sbit, addr);
+ rc = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
break;
case DMA_TLB_PSI_FLUSH:
if ( !device_in_domain(iommu, pdev, did) )
@@ -154,8 +154,8 @@ int dev_invalidate_iotlb(struct iommu *iommu, u16 did,
addr |= (((u64)1 << (size_order - 1)) - 1) << PAGE_SHIFT_4K;
}
- rc = qinval_device_iotlb(iommu, pdev->ats_queue_depth,
- sid, sbit, addr);
+ rc = qinval_device_iotlb_sync(iommu, pdev->ats_queue_depth,
+ sid, sbit, addr);
break;
default:
dprintk(XENLOG_WARNING VTDPREFIX, "invalid vt-d flush type\n");
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
next prev parent reply other threads:[~2016-04-22 10:54 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-04-22 10:54 [PATCH v10 0/3] VT-d Device-TLB flush issue Quan Xu
2016-04-22 10:54 ` [PATCH v10 1/3] vt-d: add a timeout parameter for Queued Invalidation Quan Xu
2016-05-13 15:27 ` Jan Beulich
2016-05-16 15:25 ` Xu, Quan
2016-05-17 3:19 ` Tian, Kevin
2016-05-17 7:47 ` Jan Beulich
2016-05-18 12:53 ` Xu, Quan
2016-05-18 15:05 ` Jan Beulich
2016-05-19 0:32 ` Tian, Kevin
2016-05-19 1:35 ` Xu, Quan
2016-05-19 6:13 ` Jan Beulich
2016-05-19 11:26 ` Xu, Quan
2016-05-19 11:35 ` Jan Beulich
2016-05-19 15:14 ` Xu, Quan
2016-04-22 10:54 ` Quan Xu [this message]
2016-05-17 12:36 ` [PATCH v10 2/3] vt-d: synchronize for Device-TLB flush one by one Jan Beulich
2016-05-18 8:53 ` Xu, Quan
2016-05-18 9:29 ` Jan Beulich
2016-05-18 12:02 ` Xu, Quan
2016-04-22 10:54 ` [PATCH v10 3/3] vt-d: fix vt-d Device-TLB flush timeout issue Quan Xu
2016-05-17 14:00 ` Jan Beulich
2016-05-18 13:11 ` Xu, Quan
2016-05-20 7:15 ` Xu, Quan
2016-05-20 9:58 ` Jan Beulich
2016-05-23 14:00 ` Xu, Quan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1461322453-29216-3-git-send-email-quan.xu@intel.com \
--to=quan.xu@intel.com \
--cc=dario.faggioli@citrix.com \
--cc=feng.wu@intel.com \
--cc=jbeulich@suse.com \
--cc=kevin.tian@intel.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).