qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Liu, Yi L" <yi.l.liu@linux.intel.com>
To: qemu-devel@nongnu.org, alex.williamson@redhat.com, peterx@redhat.com
Cc: kvm@vger.kernel.org, jasowang@redhat.com,
	iommu@lists.linux-foundation.org, kevin.tian@intel.com,
	ashok.raj@intel.com, jacob.jun.pan@intel.com,
	tianyu.lan@intel.com, yi.l.liu@intel.com,
	jean-philippe.brucker@arm.com, "Liu,
	Yi L" <yi.l.liu@linux.intel.com>
Subject: [Qemu-devel] [RFC PATCH 02/20] intel_iommu: exposed extended-context mode to guest
Date: Wed, 26 Apr 2017 18:06:32 +0800	[thread overview]
Message-ID: <1493201210-14357-3-git-send-email-yi.l.liu@linux.intel.com> (raw)
In-Reply-To: <1493201210-14357-1-git-send-email-yi.l.liu@linux.intel.com>

VT-d implementations reporting PASID or PRS fields as "Set", must also
report ecap.ECS as "Set". Extended-Context is required for SVM.

When ECS is reported, intel iommu driver would initiate extended root entry
and extended context entry, and also PASID table if there is any SVM capable
device.

Signed-off-by: Liu, Yi L <yi.l.liu@linux.intel.com>
---
 hw/i386/intel_iommu.c          | 131 +++++++++++++++++++++++++++--------------
 hw/i386/intel_iommu_internal.h |   9 +++
 include/hw/i386/intel_iommu.h  |   2 +-
 3 files changed, 97 insertions(+), 45 deletions(-)

diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c
index 400d0d1..bf98fa5 100644
--- a/hw/i386/intel_iommu.c
+++ b/hw/i386/intel_iommu.c
@@ -497,6 +497,11 @@ static inline bool vtd_root_entry_present(VTDRootEntry *root)
     return root->val & VTD_ROOT_ENTRY_P;
 }
 
+static inline bool vtd_root_entry_upper_present(VTDRootEntry *root)
+{
+    return root->rsvd & VTD_ROOT_ENTRY_P;
+}
+
 static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
                               VTDRootEntry *re)
 {
@@ -509,6 +514,9 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
         return -VTD_FR_ROOT_TABLE_INV;
     }
     re->val = le64_to_cpu(re->val);
+    if (s->ecs) {
+        re->rsvd = le64_to_cpu(re->rsvd);
+    }
     return 0;
 }
 
@@ -517,19 +525,30 @@ static inline bool vtd_context_entry_present(VTDContextEntry *context)
     return context->lo & VTD_CONTEXT_ENTRY_P;
 }
 
-static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
-                                           VTDContextEntry *ce)
+static int vtd_get_context_entry_from_root(IntelIOMMUState *s,
+                 VTDRootEntry *root, uint8_t index, VTDContextEntry *ce)
 {
-    dma_addr_t addr;
+    dma_addr_t addr, ce_size;
 
     /* we have checked that root entry is present */
-    addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
-    if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
+    ce_size = (s->ecs) ? (2 * sizeof(*ce)) : (sizeof(*ce));
+    addr = (s->ecs && (index > 0x7f)) ?
+           ((root->rsvd & VTD_ROOT_ENTRY_CTP) + (index - 0x80) * ce_size) :
+           ((root->val & VTD_ROOT_ENTRY_CTP) + index * ce_size);
+
+    if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) {
         trace_vtd_re_invalid(root->rsvd, root->val);
         return -VTD_FR_CONTEXT_TABLE_INV;
     }
-    ce->lo = le64_to_cpu(ce->lo);
-    ce->hi = le64_to_cpu(ce->hi);
+
+    ce[0].lo = le64_to_cpu(ce[0].lo);
+    ce[0].hi = le64_to_cpu(ce[0].hi);
+
+    if (s->ecs) {
+        ce[1].lo = le64_to_cpu(ce[1].lo);
+        ce[1].hi = le64_to_cpu(ce[1].hi);
+    }
+
     return 0;
 }
 
@@ -595,9 +614,11 @@ static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce)
     return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
 }
 
-static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce)
+static inline uint32_t vtd_ce_get_type(IntelIOMMUState *s,
+                                       VTDContextEntry *ce)
 {
-    return ce->lo & VTD_CONTEXT_ENTRY_TT;
+    return s->ecs ? (ce->lo & VTD_CONTEXT_ENTRY_TT) :
+                    (ce->lo & VTD_EXT_CONTEXT_ENTRY_TT);
 }
 
 static inline uint64_t vtd_iova_limit(VTDContextEntry *ce)
@@ -842,16 +863,20 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
         return ret_fr;
     }
 
-    if (!vtd_root_entry_present(&re)) {
+    if (!vtd_root_entry_present(&re) ||
+        (s->ecs && (devfn > 0x7f) && (!vtd_root_entry_upper_present(&re)))) {
         /* Not error - it's okay we don't have root entry. */
         trace_vtd_re_not_present(bus_num);
         return -VTD_FR_ROOT_ENTRY_P;
-    } else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
-        trace_vtd_re_invalid(re.rsvd, re.val);
-        return -VTD_FR_ROOT_ENTRY_RSVD;
+    }
+    if ((s->ecs && (devfn > 0x7f) && (re.rsvd & VTD_ROOT_ENTRY_RSVD)) ||
+        (s->ecs && (devfn < 0x80) && (re.val & VTD_ROOT_ENTRY_RSVD)) ||
+        ((!s->ecs) && (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)))) {
+            trace_vtd_re_invalid(re.rsvd, re.val);
+            return -VTD_FR_ROOT_ENTRY_RSVD;
     }
 
-    ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
+    ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce);
     if (ret_fr) {
         return ret_fr;
     }
@@ -860,21 +885,36 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
         /* Not error - it's okay we don't have context entry. */
         trace_vtd_ce_not_present(bus_num, devfn);
         return -VTD_FR_CONTEXT_ENTRY_P;
-    } else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
-               (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
+    }
+
+    /* Check Reserved bits in context-entry */
+    if ((!s->ecs && (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI)) ||
+        (!s->ecs && (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) ||
+        (s->ecs && (ce[0].lo & VTD_EXT_CONTEXT_ENTRY_RSVD_LOW0)) ||
+        (s->ecs && (ce[0].hi & VTD_EXT_CONTEXT_ENTRY_RSVD_HIGH0)) ||
+        (s->ecs && (ce[1].lo & VTD_EXT_CONTEXT_ENTRY_RSVD_LOW1))) {
         trace_vtd_ce_invalid(ce->hi, ce->lo);
         return -VTD_FR_CONTEXT_ENTRY_RSVD;
     }
+
     /* Check if the programming of context-entry is valid */
     if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
         trace_vtd_ce_invalid(ce->hi, ce->lo);
         return -VTD_FR_CONTEXT_ENTRY_INV;
     } else {
-        switch (vtd_ce_get_type(ce)) {
+        switch (vtd_ce_get_type(s, ce)) {
         case VTD_CONTEXT_TT_MULTI_LEVEL:
             /* fall through */
         case VTD_CONTEXT_TT_DEV_IOTLB:
             break;
+        case VTD_EXT_CONTEXT_TT_NO_DEV_IOTLB:
+        case VTD_EXT_CONTEXT_TT_DEV_IOTLB:
+            if (s->ecs) {
+                break;
+            } else {
+                trace_vtd_ce_invalid(ce->hi, ce->lo);
+                return -VTD_FR_CONTEXT_ENTRY_INV;
+            }
         case VTD_CONTEXT_TT_PASS_THROUGH:
             if (s->ecap & VTD_ECAP_PT) {
                 break;
@@ -894,18 +934,18 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
 static int vtd_dev_get_trans_type(VTDAddressSpace *as)
 {
     IntelIOMMUState *s;
-    VTDContextEntry ce;
+    VTDContextEntry ce[2];
     int ret;
 
     s = as->iommu_state;
 
     ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus),
-                                   as->devfn, &ce);
+                                   as->devfn, &ce[0]);
     if (ret) {
         return ret;
     }
 
-    return vtd_ce_get_type(&ce);
+    return vtd_ce_get_type(s, &ce[0]);
 }
 
 static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
@@ -1008,7 +1048,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
                                    IOMMUTLBEntry *entry)
 {
     IntelIOMMUState *s = vtd_as->iommu_state;
-    VTDContextEntry ce;
+    VTDContextEntry ce[2];
     uint8_t bus_num = pci_bus_num(bus);
     VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
     uint64_t slpte, page_mask;
@@ -1039,14 +1079,16 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
     }
     /* Try to fetch context-entry from cache first */
     if (cc_entry->context_cache_gen == s->context_cache_gen) {
-        trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
-                               cc_entry->context_entry.lo,
+        trace_vtd_iotlb_cc_hit(bus_num, devfn,
+                               cc_entry->context_entry[0].hi,
+                               cc_entry->context_entry[0].lo,
                                cc_entry->context_cache_gen);
-        ce = cc_entry->context_entry;
-        is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
+        ce[0] = cc_entry->context_entry[0];
+        ce[1] = cc_entry->context_entry[1];
+        is_fpd_set = ce[0].lo & VTD_CONTEXT_ENTRY_FPD;
     } else {
-        ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
-        is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
+        ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce[0]);
+        is_fpd_set = ce[0].lo & VTD_CONTEXT_ENTRY_FPD;
         if (ret_fr) {
             ret_fr = -ret_fr;
             if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
@@ -1057,10 +1099,11 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
             return;
         }
         /* Update context-cache */
-        trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
+        trace_vtd_iotlb_cc_update(bus_num, devfn, ce[0].hi, ce[0].lo,
                                   cc_entry->context_cache_gen,
                                   s->context_cache_gen);
-        cc_entry->context_entry = ce;
+        cc_entry->context_entry[0] = ce[0];
+        cc_entry->context_entry[1] = ce[1];
         cc_entry->context_cache_gen = s->context_cache_gen;
     }
 
@@ -1068,7 +1111,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
      * We don't need to translate for pass-through context entries.
      * Also, let's ignore IOTLB caching as well for PT devices.
      */
-    if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) {
+    if (vtd_ce_get_type(s, &ce[0]) == VTD_CONTEXT_TT_PASS_THROUGH) {
         entry->translated_addr = entry->iova;
         entry->addr_mask = VTD_PAGE_SIZE - 1;
         entry->perm = IOMMU_RW;
@@ -1076,7 +1119,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
         return;
     }
 
-    ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
+    ret_fr = vtd_iova_to_slpte(&ce[0], addr, is_write, &slpte, &level,
                                &reads, &writes);
     if (ret_fr) {
         ret_fr = -ret_fr;
@@ -1089,7 +1132,7 @@ static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
     }
 
     page_mask = vtd_slpt_level_page_mask(level);
-    vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
+    vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce[0].hi), addr, slpte,
                      reads, writes, level);
 out:
     entry->iova = addr & page_mask;
@@ -1283,7 +1326,7 @@ static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
 static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
 {
     IntelIOMMUNotifierNode *node;
-    VTDContextEntry ce;
+    VTDContextEntry ce[2];
     VTDAddressSpace *vtd_as;
 
     g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
@@ -1292,8 +1335,8 @@ static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
     QLIST_FOREACH(node, &s->notifiers_list, next) {
         vtd_as = node->vtd_as;
         if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
-                                      vtd_as->devfn, &ce) &&
-            domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
+                                      vtd_as->devfn, &ce[0]) &&
+            domain_id == VTD_CONTEXT_ENTRY_DID(ce[0].hi)) {
             memory_region_iommu_replay_all(&vtd_as->iommu);
         }
     }
@@ -1311,15 +1354,15 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
                                            uint8_t am)
 {
     IntelIOMMUNotifierNode *node;
-    VTDContextEntry ce;
+    VTDContextEntry ce[2];
     int ret;
 
     QLIST_FOREACH(node, &(s->notifiers_list), next) {
         VTDAddressSpace *vtd_as = node->vtd_as;
         ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
-                                       vtd_as->devfn, &ce);
-        if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
-            vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE,
+                                       vtd_as->devfn, &ce[0]);
+        if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce[0].hi)) {
+            vtd_page_walk(&ce[0], addr, addr + (1 << am) * VTD_PAGE_SIZE,
                           vtd_page_invalidate_notify_hook,
                           (void *)&vtd_as->iommu, true);
         }
@@ -2858,7 +2901,7 @@ static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n)
     VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu);
     IntelIOMMUState *s = vtd_as->iommu_state;
     uint8_t bus_n = pci_bus_num(vtd_as->bus);
-    VTDContextEntry ce;
+    VTDContextEntry ce[2];
 
     /*
      * The replay can be triggered by either a invalidation or a newly
@@ -2867,12 +2910,12 @@ static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n)
      */
     vtd_address_space_unmap(vtd_as, n);
 
-    if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
+    if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce[0]) == 0) {
         trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
                                   PCI_FUNC(vtd_as->devfn),
-                                  VTD_CONTEXT_ENTRY_DID(ce.hi),
-                                  ce.hi, ce.lo);
-        vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false);
+                                  VTD_CONTEXT_ENTRY_DID(ce[0].hi),
+                                  ce[0].hi, ce[0].lo);
+        vtd_page_walk(&ce[0], 0, ~0ULL, vtd_replay_hook, (void *)n, false);
     } else {
         trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
                                     PCI_FUNC(vtd_as->devfn));
diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h
index ec1bd17..71a1c1e 100644
--- a/hw/i386/intel_iommu_internal.h
+++ b/hw/i386/intel_iommu_internal.h
@@ -425,6 +425,15 @@ typedef struct VTDRootEntry VTDRootEntry;
 
 #define VTD_CONTEXT_ENTRY_NR        (VTD_PAGE_SIZE / sizeof(VTDContextEntry))
 
+/* Definition for Extended Context */
+#define VTD_EXT_CONTEXT_ENTRY_RSVD_LOW0   (~(VTD_HAW_MASK))
+#define VTD_EXT_CONTEXT_ENTRY_RSVD_HIGH0  0xF0000000ULL
+#define VTD_EXT_CONTEXT_ENTRY_RSVD_LOW1   ((~(VTD_HAW_MASK)) | 0xFF0ULL)
+#define VTD_EXT_CONTEXT_ENTRY_RSVD_HIGH1  ((~(VTD_HAW_MASK)) | 0xFFFULL)
+#define VTD_EXT_CONTEXT_ENTRY_TT          (7ULL << 2)
+#define VTD_EXT_CONTEXT_TT_NO_DEV_IOTLB   (4ULL << 2)
+#define VTD_EXT_CONTEXT_TT_DEV_IOTLB      (5ULL << 2)
+
 /* Paging Structure common */
 #define VTD_SL_PT_PAGE_SIZE_MASK    (1ULL << 7)
 /* Bits to decide the offset for each level */
diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h
index fa5963e..ae21fe5 100644
--- a/include/hw/i386/intel_iommu.h
+++ b/include/hw/i386/intel_iommu.h
@@ -76,7 +76,7 @@ struct VTDContextCacheEntry {
      * context_cache_gen!=IntelIOMMUState.context_cache_gen
      */
     uint32_t context_cache_gen;
-    struct VTDContextEntry context_entry;
+    struct VTDContextEntry context_entry[2];
 };
 
 struct VTDAddressSpace {
-- 
1.9.1

  parent reply	other threads:[~2017-04-26 10:23 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-04-26 10:06 [Qemu-devel] [RFC PATCH 00/20] Qemu: Extend intel_iommu emulator to support Shared Virtual Memory Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 01/20] intel_iommu: add "ecs" option Liu, Yi L
2017-04-26 10:06 ` Liu, Yi L [this message]
2017-04-27 10:32   ` [Qemu-devel] [RFC PATCH 02/20] intel_iommu: exposed extended-context mode to guest Peter Xu
2017-04-28  6:00     ` Lan Tianyu
2017-04-28  9:56       ` Liu, Yi L
2017-04-28  9:55     ` Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 03/20] intel_iommu: add "svm" option Liu, Yi L
2017-04-27 10:53   ` Peter Xu
2017-05-04 20:28     ` Alex Williamson
2017-05-04 20:37       ` Raj, Ashok
2017-05-08 10:38     ` Liu, Yi L
2017-05-08 11:20       ` Peter Xu
2017-05-08  8:15         ` Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 04/20] Memory: modify parameter in IOMMUNotifier func Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 05/20] VFIO: add new IOCTL for svm bind tasks Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 06/20] VFIO: add new notifier for binding PASID table Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 07/20] VFIO: check notifier flag in region_del() Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 08/20] Memory: add notifier flag check in memory_replay() Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 09/20] Memory: introduce iommu_ops->record_device Liu, Yi L
2017-04-28  6:46   ` Lan Tianyu
2017-05-19  5:23     ` Liu, Yi L
2017-05-19  9:07       ` Tian, Kevin
2017-05-19  9:35         ` Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 10/20] VFIO: notify vIOMMU emulator when device is assigned Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 11/20] intel_iommu: provide iommu_ops->record_device Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 12/20] Memory: Add func to fire pasidt_bind notifier Liu, Yi L
2017-04-26 13:50   ` Paolo Bonzini
2017-04-27  2:37     ` Liu, Yi L
2017-04-27  6:14       ` Peter Xu
2017-04-27 10:09         ` Peter Xu
2017-04-27 10:25         ` Liu, Yi L
2017-04-27 10:51           ` Peter Xu
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 13/20] IOMMU: add pasid_table_info for guest pasid table Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 14/20] intel_iommu: add FOR_EACH_ASSIGN_DEVICE macro Liu, Yi L
2017-04-28  7:33   ` Lan Tianyu
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 15/20] intel_iommu: link whole guest pasid table to host Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 16/20] VFIO: Add notifier for propagating IOMMU TLB invalidate Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 17/20] Memory: Add func to fire TLB invalidate notifier Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 18/20] intel_iommu: propagate Extended-IOTLB invalidate to host Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 19/20] intel_iommu: propagate PASID-Cache " Liu, Yi L
2017-04-26 10:06 ` [Qemu-devel] [RFC PATCH 20/20] intel_iommu: propagate Ext-Device-TLB " Liu, Yi L

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1493201210-14357-3-git-send-email-yi.l.liu@linux.intel.com \
    --to=yi.l.liu@linux.intel.com \
    --cc=alex.williamson@redhat.com \
    --cc=ashok.raj@intel.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jacob.jun.pan@intel.com \
    --cc=jasowang@redhat.com \
    --cc=jean-philippe.brucker@arm.com \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=peterx@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=tianyu.lan@intel.com \
    --cc=yi.l.liu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).