* [Qemu-devel] [PATCH 2/5] intel_iommu: convert invalid traces into error reports
2018-12-17 7:31 [Qemu-devel] [PATCH 0/5] intel_iommu: fix windows svvp verification, and trivial cleanups Peter Xu
2018-12-17 7:31 ` [Qemu-devel] [PATCH 1/5] intel_iommu: dump correct iova when failed Peter Xu
@ 2018-12-17 7:31 ` Peter Xu
2018-12-17 7:31 ` [Qemu-devel] [PATCH 3/5] intel_iommu: dma read/write draining support Peter Xu
` (2 subsequent siblings)
4 siblings, 0 replies; 9+ messages in thread
From: Peter Xu @ 2018-12-17 7:31 UTC (permalink / raw)
To: qemu-devel; +Cc: Michael S . Tsirkin, peterx, Jason Wang
Report more *_invalid() tracepoints to error_report_once() so that we
can detect issues even without tracing enabled. Drop those tracepoints.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
hw/i386/intel_iommu.c | 58 ++++++++++++++++++++++++++++++++-----------
hw/i386/trace-events | 6 -----
2 files changed, 43 insertions(+), 21 deletions(-)
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index f21988f396..4806d7edb4 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -524,7 +524,6 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
addr = s->root + index * sizeof(*re);
if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
- trace_vtd_re_invalid(re->rsvd, re->val);
re->val = 0;
return -VTD_FR_ROOT_TABLE_INV;
}
@@ -545,7 +544,6 @@ static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
/* we have checked that root entry is present */
addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
- trace_vtd_re_invalid(root->rsvd, root->val);
return -VTD_FR_CONTEXT_TABLE_INV;
}
ce->lo = le64_to_cpu(ce->lo);
@@ -630,16 +628,20 @@ static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu,
break;
case VTD_CONTEXT_TT_DEV_IOTLB:
if (!x86_iommu->dt_supported) {
+ error_report_once("%s: DT specified but not supported", __func__);
return false;
}
break;
case VTD_CONTEXT_TT_PASS_THROUGH:
if (!x86_iommu->pt_supported) {
+ error_report_once("%s: PT specified but not supported", __func__);
return false;
}
break;
default:
/* Unknwon type */
+ error_report_once("%s: unknown ce type: %"PRIu32, __func__,
+ vtd_ce_get_type(ce));
return false;
}
return true;
@@ -1003,7 +1005,9 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
}
if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD(s->aw_bits))) {
- trace_vtd_re_invalid(re.rsvd, re.val);
+ error_report_once("%s: invalid root entry: rsvd=0x%"PRIx64
+ ", val=0x%"PRIx64" (reserved nonzero)",
+ __func__, re.rsvd, re.val);
return -VTD_FR_ROOT_ENTRY_RSVD;
}
@@ -1020,19 +1024,23 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
(ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) {
- trace_vtd_ce_invalid(ce->hi, ce->lo);
+ error_report_once("%s: invalid context entry: hi=%"PRIx64
+ ", lo=%"PRIx64" (reserved nonzero)",
+ __func__, ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_RSVD;
}
/* Check if the programming of context-entry is valid */
if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
- trace_vtd_ce_invalid(ce->hi, ce->lo);
+ error_report_once("%s: invalid context entry: hi=%"PRIx64
+ ", lo=%"PRIx64" (level %d not supported)",
+ __func__, ce->hi, ce->lo, vtd_ce_get_level(ce));
return -VTD_FR_CONTEXT_ENTRY_INV;
}
/* Do translation type check */
if (!vtd_ce_type_check(x86_iommu, ce)) {
- trace_vtd_ce_invalid(ce->hi, ce->lo);
+ /* Errors dumped in vtd_ce_type_check() */
return -VTD_FR_CONTEXT_ENTRY_INV;
}
@@ -1878,7 +1886,9 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
{
if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
(inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
- trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
+ " (reserved nonzero)", __func__, inv_desc->hi,
+ inv_desc->lo);
return false;
}
if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
@@ -1901,7 +1911,9 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
/* Interrupt flag */
vtd_generate_completion_event(s);
} else {
- trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64
+ " (unknown type)", __func__, inv_desc->hi,
+ inv_desc->lo);
return false;
}
return true;
@@ -1913,7 +1925,9 @@ static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
uint16_t sid, fmask;
if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
- trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64
+ " (reserved nonzero)", __func__, inv_desc->hi,
+ inv_desc->lo);
return false;
}
switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
@@ -1932,7 +1946,9 @@ static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
break;
default:
- trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64
+ " (invalid type)", __func__, inv_desc->hi,
+ inv_desc->lo);
return false;
}
return true;
@@ -1946,7 +1962,9 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
(inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
- trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
+ ", lo=0x%"PRIx64" (reserved bits unzero)\n",
+ __func__, inv_desc->hi, inv_desc->lo);
return false;
}
@@ -1965,14 +1983,20 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
if (am > VTD_MAMV) {
- trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
+ ", lo=0x%"PRIx64" (am=%u > VTD_MAMV=%u)\n",
+ __func__, inv_desc->hi, inv_desc->lo,
+ am, (unsigned)VTD_MAMV);
return false;
}
vtd_iotlb_page_invalidate(s, domain_id, addr, am);
break;
default:
- trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
+ ", lo=0x%"PRIx64" (type mismatch: 0x%llx)\n",
+ __func__, inv_desc->hi, inv_desc->lo,
+ inv_desc->lo & VTD_INV_DESC_IOTLB_G);
return false;
}
return true;
@@ -2012,7 +2036,9 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
(inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
- trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
+ error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64
+ ", lo=%"PRIx64" (reserved nonzero)", __func__,
+ inv_desc->hi, inv_desc->lo);
return false;
}
@@ -2103,7 +2129,9 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s)
break;
default:
- trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
+ error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64
+ " (unknown type)", __func__, inv_desc.hi,
+ inv_desc.lo);
return false;
}
s->iq_head++;
diff --git a/hw/i386/trace-events b/hw/i386/trace-events
index 6ac347d18c..77244fc384 100644
--- a/hw/i386/trace-events
+++ b/hw/i386/trace-events
@@ -5,19 +5,15 @@ x86_iommu_iec_notify(bool global, uint32_t index, uint32_t mask) "Notify IEC inv
# hw/i386/intel_iommu.c
vtd_inv_desc(const char *type, uint64_t hi, uint64_t lo) "invalidate desc type %s high 0x%"PRIx64" low 0x%"PRIx64
-vtd_inv_desc_invalid(uint64_t hi, uint64_t lo) "invalid inv desc hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_inv_desc_cc_domain(uint16_t domain) "context invalidate domain 0x%"PRIx16
vtd_inv_desc_cc_global(void) "context invalidate globally"
vtd_inv_desc_cc_device(uint8_t bus, uint8_t dev, uint8_t fn) "context invalidate device %02"PRIx8":%02"PRIx8".%02"PRIx8
vtd_inv_desc_cc_devices(uint16_t sid, uint16_t fmask) "context invalidate devices sid 0x%"PRIx16" fmask 0x%"PRIx16
-vtd_inv_desc_cc_invalid(uint64_t hi, uint64_t lo) "invalid context-cache desc hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_inv_desc_iotlb_global(void) "iotlb invalidate global"
vtd_inv_desc_iotlb_domain(uint16_t domain) "iotlb invalidate whole domain 0x%"PRIx16
vtd_inv_desc_iotlb_pages(uint16_t domain, uint64_t addr, uint8_t mask) "iotlb invalidate domain 0x%"PRIx16" addr 0x%"PRIx64" mask 0x%"PRIx8
-vtd_inv_desc_iotlb_invalid(uint64_t hi, uint64_t lo) "invalid iotlb desc hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_inv_desc_wait_sw(uint64_t addr, uint32_t data) "wait invalidate status write addr 0x%"PRIx64" data 0x%"PRIx32
vtd_inv_desc_wait_irq(const char *msg) "%s"
-vtd_inv_desc_wait_invalid(uint64_t hi, uint64_t lo) "invalid wait desc hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_inv_desc_wait_write_fail(uint64_t hi, uint64_t lo) "write fail for wait desc hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_inv_desc_iec(uint32_t granularity, uint32_t index, uint32_t mask) "granularity 0x%"PRIx32" index 0x%"PRIx32" mask 0x%"PRIx32
vtd_inv_qi_enable(bool enable) "enabled %d"
@@ -27,9 +23,7 @@ vtd_inv_qi_tail(uint16_t head) "write tail %d"
vtd_inv_qi_fetch(void) ""
vtd_context_cache_reset(void) ""
vtd_re_not_present(uint8_t bus) "Root entry bus %"PRIu8" not present"
-vtd_re_invalid(uint64_t hi, uint64_t lo) "invalid root entry hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_ce_not_present(uint8_t bus, uint8_t devfn) "Context entry bus %"PRIu8" devfn %"PRIu8" not present"
-vtd_ce_invalid(uint64_t hi, uint64_t lo) "invalid context entry hi 0x%"PRIx64" lo 0x%"PRIx64
vtd_iotlb_page_hit(uint16_t sid, uint64_t addr, uint64_t slpte, uint16_t domain) "IOTLB page hit sid 0x%"PRIx16" iova 0x%"PRIx64" slpte 0x%"PRIx64" domain 0x%"PRIx16
vtd_iotlb_page_update(uint16_t sid, uint64_t addr, uint64_t slpte, uint16_t domain) "IOTLB page update sid 0x%"PRIx16" iova 0x%"PRIx64" slpte 0x%"PRIx64" domain 0x%"PRIx16
vtd_iotlb_cc_hit(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen) "IOTLB context hit bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32
--
2.17.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [Qemu-devel] [PATCH 3/5] intel_iommu: dma read/write draining support
2018-12-17 7:31 [Qemu-devel] [PATCH 0/5] intel_iommu: fix windows svvp verification, and trivial cleanups Peter Xu
2018-12-17 7:31 ` [Qemu-devel] [PATCH 1/5] intel_iommu: dump correct iova when failed Peter Xu
2018-12-17 7:31 ` [Qemu-devel] [PATCH 2/5] intel_iommu: convert invalid traces into error reports Peter Xu
@ 2018-12-17 7:31 ` Peter Xu
2018-12-17 7:31 ` [Qemu-devel] [PATCH 4/5] intel_iommu: remove "x-" prefix for "aw-bits" Peter Xu
2018-12-17 7:31 ` [Qemu-devel] [PATCH 5/5] intel_iommu: turn on IR by default Peter Xu
4 siblings, 0 replies; 9+ messages in thread
From: Peter Xu @ 2018-12-17 7:31 UTC (permalink / raw)
To: qemu-devel; +Cc: Michael S . Tsirkin, peterx, Jason Wang, Yu Wang
Support DMA read/write draining should be easy for existing VT-d
emulation since the emulation itself does not have any request queue
there so we don't need to do anything to flush the un-commited queue.
What we need to do is to declare the support.
These capabilities are required to pass Windows SVVP test program. It
is verified that when with parameters "x-aw-bits=48,caching-mode=off"
we can pass the Windows SVVP test with this patch applied. Otherwise
we'll fail with:
IOMMU[0] - DWD (DMA write draining) not supported
IOMMU[0] - DWD (DMA read draining) not supported
Segment 0 has no DMA remapping capable IOMMU units
However since these bits are not declared support for QEMU<=3.1, we'll
need a compatibility bit for it and we turn this on by default only
for QEMU>=4.0.
Please refer to VT-d spec 6.5.4 for more information.
CC: Yu Wang <wyu@redhat.com>
Bug: https://bugzilla.redhat.com/show_bug.cgi?id=1654550
Signed-off-by: Peter Xu <peterx@redhat.com>
---
hw/i386/intel_iommu.c | 4 ++++
hw/i386/intel_iommu_internal.h | 3 +++
include/hw/i386/intel_iommu.h | 1 +
include/hw/i386/pc.h | 5 +++++
4 files changed, 13 insertions(+)
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 4806d7edb4..26cc731c7b 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -2659,6 +2659,7 @@ static Property vtd_properties[] = {
DEFINE_PROP_UINT8("x-aw-bits", IntelIOMMUState, aw_bits,
VTD_HOST_ADDRESS_WIDTH),
DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
+ DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true),
DEFINE_PROP_END_OF_LIST(),
};
@@ -3147,6 +3148,9 @@ static void vtd_init(IntelIOMMUState *s)
s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND |
VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS |
VTD_CAP_SAGAW_39bit | VTD_CAP_MGAW(s->aw_bits);
+ if (s->dma_drain) {
+ s->cap |= VTD_CAP_DRAIN;
+ }
if (s->aw_bits == VTD_HOST_AW_48BIT) {
s->cap |= VTD_CAP_SAGAW_48bit;
}
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index d084099ed9..00e9edbc66 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -203,6 +203,9 @@
#define VTD_CAP_MAMV (VTD_MAMV << 48)
#define VTD_CAP_PSI (1ULL << 39)
#define VTD_CAP_SLLPS ((1ULL << 34) | (1ULL << 35))
+#define VTD_CAP_DRAIN_WRITE (1ULL << 54)
+#define VTD_CAP_DRAIN_READ (1ULL << 55)
+#define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE)
#define VTD_CAP_CM (1ULL << 7)
/* Supported Adjusted Guest Address Widths */
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index ed4e758273..a321cc9691 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -245,6 +245,7 @@ struct IntelIOMMUState {
OnOffAuto intr_eim; /* Toggle for EIM cabability */
bool buggy_eim; /* Force buggy EIM unless eim=off */
uint8_t aw_bits; /* Host/IOVA address width (in bits) */
+ bool dma_drain; /* Whether DMA r/w draining enabled */
/*
* Protects IOMMU states in general. Currently it protects the
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index 9d29c4b1df..c7c0c944e8 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -296,6 +296,11 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
#define PC_COMPAT_3_1 \
HW_COMPAT_3_1 \
+ {\
+ .driver = "intel-iommu",\
+ .property = "dma-drain",\
+ .value = "off",\
+ },
#define PC_COMPAT_3_0 \
HW_COMPAT_3_0 \
--
2.17.1
^ permalink raw reply related [flat|nested] 9+ messages in thread* [Qemu-devel] [PATCH 5/5] intel_iommu: turn on IR by default
2018-12-17 7:31 [Qemu-devel] [PATCH 0/5] intel_iommu: fix windows svvp verification, and trivial cleanups Peter Xu
` (3 preceding siblings ...)
2018-12-17 7:31 ` [Qemu-devel] [PATCH 4/5] intel_iommu: remove "x-" prefix for "aw-bits" Peter Xu
@ 2018-12-17 7:31 ` Peter Xu
2018-12-18 3:07 ` Peter Xu
4 siblings, 1 reply; 9+ messages in thread
From: Peter Xu @ 2018-12-17 7:31 UTC (permalink / raw)
To: qemu-devel; +Cc: Michael S . Tsirkin, peterx, Jason Wang
IR has been there for a long time and long time no bug reported.
Let's turn it on by default to match general hardwares. Providing
compatibility bit for QEMU<=3.1.
Signed-off-by: Peter Xu <peterx@redhat.com>
---
hw/i386/x86-iommu.c | 2 +-
include/hw/i386/pc.h | 4 ++++
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c
index abc3c03158..0150ceda14 100644
--- a/hw/i386/x86-iommu.c
+++ b/hw/i386/x86-iommu.c
@@ -135,7 +135,7 @@ static void x86_iommu_realize(DeviceState *dev, Error **errp)
}
static Property x86_iommu_properties[] = {
- DEFINE_PROP_BOOL("intremap", X86IOMMUState, intr_supported, false),
+ DEFINE_PROP_BOOL("intremap", X86IOMMUState, intr_supported, true),
DEFINE_PROP_BOOL("device-iotlb", X86IOMMUState, dt_supported, false),
DEFINE_PROP_BOOL("pt", X86IOMMUState, pt_supported, true),
DEFINE_PROP_END_OF_LIST(),
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index c7c0c944e8..ed958b9af1 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -300,6 +300,10 @@ bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
.driver = "intel-iommu",\
.property = "dma-drain",\
.value = "off",\
+ },{\
+ .driver = "x86-iommu",\
+ .property = "intremap",\
+ .value = "off",\
},
#define PC_COMPAT_3_0 \
--
2.17.1
^ permalink raw reply related [flat|nested] 9+ messages in thread