From: Chao Gao <chao.gao@intel.com>
To: xen-devel@lists.xen.org
Cc: "Lan Tianyu" <tianyu.lan@intel.com>,
"Kevin Tian" <kevin.tian@intel.com>,
"Stefano Stabellini" <sstabellini@kernel.org>,
"Wei Liu" <wei.liu2@citrix.com>,
"Konrad Rzeszutek Wilk" <konrad.wilk@oracle.com>,
"George Dunlap" <george.dunlap@eu.citrix.com>,
"Ian Jackson" <ian.jackson@eu.citrix.com>,
"Tim Deegan" <tim@xen.org>, "Jan Beulich" <jbeulich@suse.com>,
"Andrew Cooper" <andrew.cooper3@citrix.com>,
"Chao Gao" <chao.gao@intel.com>,
"Roger Pau Monné" <roger.pau@citrix.com>
Subject: [PATCH v4 14/28] x86/vvtd: Handle interrupt translation faults
Date: Fri, 17 Nov 2017 14:22:21 +0800 [thread overview]
Message-ID: <1510899755-40237-15-git-send-email-chao.gao@intel.com> (raw)
In-Reply-To: <1510899755-40237-1-git-send-email-chao.gao@intel.com>
Interrupt translation faults are non-recoverable fault. When faults
are triggered, it needs to populate fault info to Fault Recording
Registers and inject msi interrupt to notify guest IOMMU driver
to deal with faults.
This patch emulates hardware's handling interrupt translation
faults (more information about the process can be found in VT-d spec,
chipter "Translation Faults", section "Non-Recoverable Fault
Reporting" and section "Non-Recoverable Logging").
Specifically, viommu_record_fault() records the fault information and
viommu_report_non_recoverable_fault() reports faults to software.
Currently, only Primary Fault Logging is supported and the Number of
Fault-recording Registers is 1.
Signed-off-by: Chao Gao <chao.gao@intel.com>
Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
---
v4:
- introduce a lock to protect fault-event related regs
---
xen/drivers/passthrough/vtd/iommu.h | 51 ++++++-
xen/drivers/passthrough/vtd/vvtd.c | 288 +++++++++++++++++++++++++++++++++++-
2 files changed, 333 insertions(+), 6 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/iommu.h b/xen/drivers/passthrough/vtd/iommu.h
index 82edd2a..dc2df75 100644
--- a/xen/drivers/passthrough/vtd/iommu.h
+++ b/xen/drivers/passthrough/vtd/iommu.h
@@ -196,26 +196,67 @@
#define DMA_CCMD_CAIG_MASK(x) (((u64)x) & ((u64) 0x3 << 59))
/* FECTL_REG */
-#define DMA_FECTL_IM ((uint32_t)1 << 31)
+#define DMA_FECTL_IM_SHIFT 31
+#define DMA_FECTL_IP_SHIFT 30
+#define DMA_FECTL_IM ((uint32_t)1 << DMA_FECTL_IM_SHIFT)
+#define DMA_FECTL_IP ((uint32_t)1 << DMA_FECTL_IP_SHIFT)
/* FSTS_REG */
-#define DMA_FSTS_PFO ((uint32_t)1 << 0)
-#define DMA_FSTS_PPF ((uint32_t)1 << 1)
+#define DMA_FSTS_PFO_SHIFT 0
+#define DMA_FSTS_PPF_SHIFT 1
+#define DMA_FSTS_PRO_SHIFT 7
+
+#define DMA_FSTS_PFO ((uint32_t)1 << DMA_FSTS_PFO_SHIFT)
+#define DMA_FSTS_PPF ((uint32_t)1 << DMA_FSTS_PPF_SHIFT)
#define DMA_FSTS_AFO ((uint32_t)1 << 2)
#define DMA_FSTS_APF ((uint32_t)1 << 3)
#define DMA_FSTS_IQE ((uint32_t)1 << 4)
#define DMA_FSTS_ICE ((uint32_t)1 << 5)
#define DMA_FSTS_ITE ((uint32_t)1 << 6)
-#define DMA_FSTS_FAULTS DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE
+#define DMA_FSTS_PRO ((uint32_t)1 << DMA_FSTS_PRO_SHIFT)
+#define DMA_FSTS_FAULTS (DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_AFO | \
+ DMA_FSTS_APF | DMA_FSTS_IQE | DMA_FSTS_ICE | \
+ DMA_FSTS_ITE | DMA_FSTS_PRO)
+#define DMA_FSTS_RW1CS (DMA_FSTS_PFO | DMA_FSTS_AFO | DMA_FSTS_APF | \
+ DMA_FSTS_IQE | DMA_FSTS_ICE | DMA_FSTS_ITE | \
+ DMA_FSTS_PRO)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */
-#define DMA_FRCD_F (((u64)1) << 31)
+#define DMA_FRCD_LEN 0x10
+#define DMA_FRCD2_OFFSET 0x8
+#define DMA_FRCD3_OFFSET 0xc
+#define DMA_FRCD_F_SHIFT 31
+#define DMA_FRCD_F ((u64)1 << DMA_FRCD_F_SHIFT)
#define dma_frcd_type(d) ((d >> 30) & 1)
#define dma_frcd_fault_reason(c) (c & 0xff)
#define dma_frcd_source_id(c) (c & 0xffff)
#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
+struct vtd_fault_record_register
+{
+ union {
+ struct {
+ uint64_t lo;
+ uint64_t hi;
+ } bits;
+ struct {
+ uint64_t rsvd0 :12,
+ fault_info :52;
+ uint64_t source_id :16,
+ rsvd1 :9,
+ pmr :1, /* Privilege Mode Requested */
+ exe :1, /* Execute Permission Requested */
+ pasid_p :1, /* PASID Present */
+ fault_reason :8, /* Fault Reason */
+ pasid_val :20, /* PASID Value */
+ addr_type :2, /* Address Type */
+ type :1, /* Type. (0) Write (1) Read/AtomicOp */
+ fault :1; /* Fault */
+ } fields;
+ };
+};
+
/* Interrupt remapping transition faults */
#define VTD_FR_IR_REQ_RSVD 0x20
#define VTD_FR_IR_INDEX_OVER 0x21
diff --git a/xen/drivers/passthrough/vtd/vvtd.c b/xen/drivers/passthrough/vtd/vvtd.c
index d3dec01..83805d1 100644
--- a/xen/drivers/passthrough/vtd/vvtd.c
+++ b/xen/drivers/passthrough/vtd/vvtd.c
@@ -43,6 +43,7 @@
struct hvm_hw_vvtd {
bool eim_enabled;
bool intremap_enabled;
+ uint32_t fault_index;
/* Interrupt remapping table base gfn and the max of entries */
uint16_t irt_max_entry;
@@ -58,6 +59,12 @@ struct vvtd {
struct domain *domain;
/* # of in-flight interrupts */
atomic_t inflight_intr;
+ /*
+ * This lock protects fault-event related registers (DMAR_FEXXX_REG).
+ * It's used for draining in-flight fault events before responding
+ * guest's programming to those registers.
+ */
+ spinlock_t fe_lock;
struct hvm_hw_vvtd hw;
void *irt_base;
@@ -87,6 +94,21 @@ boolean_runtime_param("viommu_verbose", viommu_verbose);
#endif
#define VVTD_REG_POS(vvtd, offset) &(vvtd->hw.regs[offset/sizeof(uint32_t)])
+static inline int vvtd_test_and_set_bit(struct vvtd *vvtd, uint32_t reg, int nr)
+{
+ return test_and_set_bit(nr, VVTD_REG_POS(vvtd, reg));
+}
+
+static inline int vvtd_test_and_clear_bit(struct vvtd *vvtd, uint32_t reg,
+ int nr)
+{
+ return test_and_clear_bit(nr, VVTD_REG_POS(vvtd, reg));
+}
+
+static inline int vvtd_test_bit(struct vvtd *vvtd, uint32_t reg, int nr)
+{
+ return test_bit(nr, VVTD_REG_POS(vvtd, reg));
+}
static inline void vvtd_set_bit(struct vvtd *vvtd, uint32_t reg, int nr)
{
@@ -238,6 +260,30 @@ static int vvtd_delivery(struct domain *d, uint8_t vector,
return 0;
}
+static void vvtd_generate_interrupt(const struct vvtd *vvtd, uint64_t addr,
+ uint32_t data)
+{
+ bool dm = addr & MSI_ADDR_DESTMODE_MASK;
+ uint32_t dest = MASK_EXTR(addr, MSI_ADDR_DEST_ID_MASK);
+ uint8_t dlm = MASK_EXTR(data, MSI_DATA_DELIVERY_MODE_MASK);
+ uint8_t tm = MASK_EXTR(data, MSI_DATA_TRIGGER_MASK);
+ uint8_t vector = data & MSI_DATA_VECTOR_MASK;
+
+ vvtd_debug("d%d: generating msi %lx %x\n", vvtd->domain->domain_id, addr,
+ data);
+
+ if ( vvtd->hw.eim_enabled )
+ dest |= (addr >> 40) << 8;
+
+ vvtd_delivery(vvtd->domain, vector, dest, dm, dlm, tm);
+}
+
+static void vvtd_notify_fault(const struct vvtd *vvtd)
+{
+ vvtd_generate_interrupt(vvtd, vvtd_get_reg_quad(vvtd, DMAR_FEADDR_REG),
+ vvtd_get_reg(vvtd, DMAR_FEDATA_REG));
+}
+
/* Computing the IRTE index for a given interrupt request. When success, return
* 0 and set index to reference the corresponding IRTE. Otherwise, return < 0,
* i.e. -1 when the irq request isn't an remapping format.
@@ -290,6 +336,198 @@ static inline uint32_t irte_dest(struct vvtd *vvtd, uint32_t dest)
: MASK_EXTR(dest, IRTE_xAPIC_DEST_MASK);
}
+static void vvtd_report_non_recoverable_fault(struct vvtd *vvtd, int reason)
+{
+ uint32_t fsts = vvtd_get_reg(vvtd, DMAR_FSTS_REG);
+
+ vvtd_set_bit(vvtd, DMAR_FSTS_REG, reason);
+
+ /*
+ * Accoroding to VT-d spec "Non-Recoverable Fault Event" chapter, if
+ * there are any previously reported interrupt conditions that are yet to
+ * be sevices by software, the Fault Event interrrupt is not generated.
+ */
+ if ( fsts & DMA_FSTS_FAULTS )
+ return;
+
+ vvtd_set_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_SHIFT);
+ if ( !vvtd_test_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_SHIFT) )
+ {
+ vvtd_notify_fault(vvtd);
+ vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_SHIFT);
+ }
+}
+
+static void vvtd_update_ppf(struct vvtd *vvtd)
+{
+ int i;
+ uint64_t cap = vvtd_get_reg_quad(vvtd, DMAR_CAP_REG);
+ unsigned int base = cap_fault_reg_offset(cap);
+
+ for ( i = 0; i < cap_num_fault_regs(cap); i++ )
+ {
+ if ( vvtd_test_bit(vvtd, base + i * DMA_FRCD_LEN + DMA_FRCD3_OFFSET,
+ DMA_FRCD_F_SHIFT) )
+ {
+ vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PPF_SHIFT);
+ return;
+ }
+ }
+ /*
+ * No Primary Fault is in Fault Record Registers, thus clear PPF bit in
+ * FSTS.
+ */
+ vvtd_clear_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PPF_SHIFT);
+
+ /* If no fault is in FSTS, clear pending bit in FECTL. */
+ if ( !(vvtd_get_reg(vvtd, DMAR_FSTS_REG) & DMA_FSTS_FAULTS) )
+ vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_SHIFT);
+}
+
+/*
+ * Commit a fault to emulated Fault Record Registers.
+ */
+static void vvtd_commit_frcd(struct vvtd *vvtd, int idx,
+ const struct vtd_fault_record_register *frcd)
+{
+ unsigned int base = cap_fault_reg_offset(
+ vvtd_get_reg_quad(vvtd, DMAR_CAP_REG));
+
+ vvtd_set_reg_quad(vvtd, base + idx * DMA_FRCD_LEN, frcd->bits.lo);
+ vvtd_set_reg_quad(vvtd, base + idx * DMA_FRCD_LEN + 8, frcd->bits.hi);
+ vvtd_update_ppf(vvtd);
+}
+
+/*
+ * Allocate a FRCD for the caller. If success, return the FRI. Or, return -1
+ * when failure.
+ */
+static int vvtd_alloc_frcd(struct vvtd *vvtd)
+{
+ int prev;
+ uint64_t cap = vvtd_get_reg_quad(vvtd, DMAR_CAP_REG);
+ unsigned int base = cap_fault_reg_offset(cap);
+
+ /* Set the F bit to indicate the FRCD is in use. */
+ if ( !vvtd_test_and_set_bit(vvtd,
+ base + vvtd->hw.fault_index * DMA_FRCD_LEN +
+ DMA_FRCD3_OFFSET, DMA_FRCD_F_SHIFT) )
+ {
+ prev = vvtd->hw.fault_index;
+ vvtd->hw.fault_index = (prev + 1) % cap_num_fault_regs(cap);
+ return vvtd->hw.fault_index;
+ }
+ return -ENOMEM;
+}
+
+static void vvtd_free_frcd(struct vvtd *vvtd, int i)
+{
+ unsigned int base = cap_fault_reg_offset(
+ vvtd_get_reg_quad(vvtd, DMAR_CAP_REG));
+
+ vvtd_clear_bit(vvtd, base + i * DMA_FRCD_LEN + DMA_FRCD3_OFFSET,
+ DMA_FRCD_F_SHIFT);
+}
+
+static int vvtd_record_fault(struct vvtd *vvtd,
+ const struct arch_irq_remapping_request *request,
+ int reason)
+{
+ struct vtd_fault_record_register frcd;
+ int fault_index;
+ uint32_t irt_index;
+
+ spin_lock(&vvtd->fe_lock);
+ switch(reason)
+ {
+ case VTD_FR_IR_REQ_RSVD:
+ case VTD_FR_IR_INDEX_OVER:
+ case VTD_FR_IR_ENTRY_P:
+ case VTD_FR_IR_ROOT_INVAL:
+ case VTD_FR_IR_IRTE_RSVD:
+ case VTD_FR_IR_REQ_COMPAT:
+ case VTD_FR_IR_SID_ERR:
+ if ( vvtd_test_bit(vvtd, DMAR_FSTS_REG, DMA_FSTS_PFO_SHIFT) )
+ goto out;
+
+ /* No available Fault Record means Fault overflowed */
+ fault_index = vvtd_alloc_frcd(vvtd);
+ if ( fault_index < 0 )
+ {
+ vvtd_report_non_recoverable_fault(vvtd, DMA_FSTS_PFO_SHIFT);
+ goto out;
+ }
+ memset(&frcd, 0, sizeof(frcd));
+ frcd.fields.fault_reason = reason;
+ if ( irq_remapping_request_index(request, &irt_index) )
+ goto out;
+ frcd.fields.fault_info = irt_index;
+ frcd.fields.source_id = request->source_id;
+ frcd.fields.fault = 1;
+ vvtd_commit_frcd(vvtd, fault_index, &frcd);
+ break;
+
+ default:
+ vvtd_debug("d%d: can't handle vvtd fault (reason 0x%x)",
+ vvtd->domain->domain_id, reason);
+ break;
+ }
+
+ out:
+ spin_unlock(&vvtd->fe_lock);
+ return X86EMUL_OKAY;
+}
+
+static int vvtd_write_frcd3(struct vvtd *vvtd, uint32_t val)
+{
+ /* Writing a 1 means clear fault */
+ if ( val & DMA_FRCD_F )
+ {
+ vvtd_free_frcd(vvtd, 0);
+ vvtd_update_ppf(vvtd);
+ }
+ return X86EMUL_OKAY;
+}
+
+static void vvtd_write_fectl(struct vvtd *vvtd, uint32_t val)
+{
+ /*
+ * Only DMA_FECTL_IM bit is writable. Generate pending event when unmask.
+ */
+ if ( !(val & DMA_FECTL_IM) )
+ {
+ /* Clear IM */
+ vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_SHIFT);
+ if ( vvtd_test_and_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_SHIFT) )
+ vvtd_notify_fault(vvtd);
+ }
+ else
+ vvtd_set_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IM_SHIFT);
+}
+
+static void vvtd_write_fsts(struct vvtd *vvtd, uint32_t val)
+{
+ int i, max_fault_index = DMA_FSTS_PRO_SHIFT;
+ uint64_t bits_to_clear = val & DMA_FSTS_RW1CS;
+
+ if ( bits_to_clear )
+ {
+ i = find_first_bit(&bits_to_clear, max_fault_index / 8 + 1);
+ while ( i <= max_fault_index )
+ {
+ vvtd_clear_bit(vvtd, DMAR_FSTS_REG, i);
+ i = find_next_bit(&bits_to_clear, max_fault_index / 8 + 1, i + 1);
+ }
+ }
+
+ /*
+ * Clear IP field when all status fields in the Fault Status Register
+ * being clear.
+ */
+ if ( !((vvtd_get_reg(vvtd, DMAR_FSTS_REG) & DMA_FSTS_FAULTS)) )
+ vvtd_clear_bit(vvtd, DMAR_FECTL_REG, DMA_FECTL_IP_SHIFT);
+}
+
static void write_gcmd_ire(struct vvtd *vvtd, uint32_t val)
{
bool set = val & DMA_GCMD_IRE;
@@ -391,11 +629,47 @@ static int vvtd_read(struct vcpu *v, unsigned long addr,
return X86EMUL_OKAY;
}
+static void vvtd_write_fault_regs(struct vvtd *vvtd, unsigned long val,
+ unsigned int offset, unsigned int len)
+{
+ unsigned int fault_offset = cap_fault_reg_offset(
+ vvtd_get_reg_quad(vvtd, DMAR_CAP_REG));
+
+ spin_lock(&vvtd->fe_lock);
+ for ( ; len ; len -= 4, offset += 4, val = val >> 32)
+ {
+ switch ( offset )
+ {
+ case DMAR_FSTS_REG:
+ vvtd_write_fsts(vvtd, val);
+ break;
+
+ case DMAR_FECTL_REG:
+ vvtd_write_fectl(vvtd, val);
+ break;
+
+ case DMAR_FEDATA_REG:
+ case DMAR_FEADDR_REG:
+ case DMAR_FEUADDR_REG:
+ vvtd_set_reg(vvtd, offset, val);
+ break;
+
+ default:
+ if ( offset == (fault_offset + DMA_FRCD3_OFFSET) )
+ vvtd_write_frcd3(vvtd, val);
+ break;
+ }
+ }
+ spin_unlock(&vvtd->fe_lock);
+}
+
static int vvtd_write(struct vcpu *v, unsigned long addr,
unsigned int len, unsigned long val)
{
struct vvtd *vvtd = domain_vvtd(v->domain);
unsigned int offset = addr - vvtd->base_addr;
+ unsigned int fault_offset = cap_fault_reg_offset(
+ vvtd_get_reg_quad(vvtd, DMAR_CAP_REG));
vvtd_info("Write offset %x len %d val %lx\n", offset, len, val);
@@ -419,7 +693,18 @@ static int vvtd_write(struct vcpu *v, unsigned long addr,
vvtd_set_reg(vvtd, offset, val);
break;
+ case DMAR_FSTS_REG:
+ case DMAR_FECTL_REG:
+ case DMAR_FEDATA_REG:
+ case DMAR_FEADDR_REG:
+ case DMAR_FEUADDR_REG:
+ vvtd_write_fault_regs(vvtd, val, offset, len);
+ break;
+
default:
+ if ( (offset == (fault_offset + DMA_FRCD2_OFFSET)) ||
+ (offset == (fault_offset + DMA_FRCD3_OFFSET)) )
+ vvtd_write_fault_regs(vvtd, val, offset, len);
break;
}
@@ -448,7 +733,7 @@ static void vvtd_handle_fault(struct vvtd *vvtd,
case VTD_FR_IR_REQ_RSVD:
case VTD_FR_IR_INDEX_OVER:
case VTD_FR_IR_ROOT_INVAL:
- /* TODO: handle fault (e.g. record and report this fault to VM */
+ vvtd_record_fault(vvtd, irq, fault);
break;
default:
@@ -607,6 +892,7 @@ static int vvtd_create(struct domain *d, struct viommu *viommu)
vvtd->base_addr = viommu->base_address;
vvtd->domain = d;
register_mmio_handler(d, &vvtd_mmio_ops);
+ spin_lock_init(&vvtd->fe_lock);
viommu->priv = vvtd;
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-11-17 6:22 UTC|newest]
Thread overview: 83+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-11-17 6:22 [PATCH v4 00/28] add vIOMMU support with irq remapping function of virtual VT-d Chao Gao
2017-11-17 6:22 ` [PATCH v4 01/28] Xen/doc: Add Xen virtual IOMMU doc Chao Gao
2018-02-09 12:54 ` Roger Pau Monné
2018-02-09 15:53 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 02/28] VIOMMU: Add vIOMMU framework and vIOMMU domctl Chao Gao
2018-02-09 14:33 ` Roger Pau Monné
2018-02-09 16:13 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 03/28] VIOMMU: Add irq request callback to deal with irq remapping Chao Gao
2018-02-09 15:02 ` Roger Pau Monné
2018-02-09 16:21 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 04/28] VIOMMU: Add get irq info callback to convert irq remapping request Chao Gao
2018-02-09 15:06 ` Roger Pau Monné
2018-02-09 16:34 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 05/28] VIOMMU: Introduce callback of checking irq remapping mode Chao Gao
2018-02-09 15:11 ` Roger Pau Monné
2018-02-09 16:47 ` Chao Gao
2018-02-12 10:21 ` Roger Pau Monné
2017-11-17 6:22 ` [PATCH v4 06/28] vtd: clean-up and preparation for vvtd Chao Gao
2018-02-09 15:17 ` Roger Pau Monné
2018-02-09 16:51 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 07/28] x86/hvm: Introduce a emulated VTD for HVM Chao Gao
2018-02-09 16:27 ` Roger Pau Monné
2018-02-09 17:12 ` Chao Gao
2018-02-12 10:35 ` Roger Pau Monné
2017-11-17 6:22 ` [PATCH v4 08/28] x86/vvtd: Add MMIO handler for VVTD Chao Gao
2018-02-09 16:39 ` Roger Pau Monné
2018-02-09 17:21 ` Chao Gao
2018-02-09 17:51 ` Roger Pau Monné
2018-02-22 6:20 ` Chao Gao
2018-02-23 17:07 ` Roger Pau Monné
2018-02-23 17:37 ` Wei Liu
2017-11-17 6:22 ` [PATCH v4 09/28] x86/vvtd: Set Interrupt Remapping Table Pointer through GCMD Chao Gao
2018-02-09 16:59 ` Roger Pau Monné
2018-02-11 4:34 ` Chao Gao
2018-02-11 5:09 ` Chao Gao
2018-02-12 11:25 ` Roger Pau Monné
2017-11-17 6:22 ` [PATCH v4 10/28] x86/vvtd: Enable Interrupt Remapping " Chao Gao
2018-02-09 17:15 ` Roger Pau Monné
2018-02-11 5:05 ` Chao Gao
2018-02-12 11:30 ` Roger Pau Monné
2018-02-22 6:25 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 11/28] x86/vvtd: Process interrupt remapping request Chao Gao
2018-02-09 17:44 ` Roger Pau Monné
2018-02-11 5:31 ` Chao Gao
2018-02-23 17:04 ` Roger Pau Monné
2017-11-17 6:22 ` [PATCH v4 12/28] x86/vvtd: decode interrupt attribute from IRTE Chao Gao
2018-02-12 11:55 ` Roger Pau Monné
2018-02-22 6:33 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 13/28] x86/vvtd: add a helper function to decide the interrupt format Chao Gao
2018-02-12 12:14 ` Roger Pau Monné
2017-11-17 6:22 ` Chao Gao [this message]
2018-02-12 12:55 ` [PATCH v4 14/28] x86/vvtd: Handle interrupt translation faults Roger Pau Monné
2018-02-22 8:23 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 15/28] x86/vvtd: Enable Queued Invalidation through GCMD Chao Gao
2018-02-12 14:04 ` Roger Pau Monné
2018-02-22 10:33 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 16/28] x86/vvtd: Add queued invalidation (QI) support Chao Gao
2018-02-12 14:36 ` Roger Pau Monné
2018-02-23 4:38 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 17/28] x86/vvtd: save and restore emulated VT-d Chao Gao
2018-02-12 14:49 ` Roger Pau Monné
2018-02-23 5:22 ` Chao Gao
2018-02-23 17:19 ` Roger Pau Monné
2017-11-17 6:22 ` [PATCH v4 18/28] x86/vioapic: Hook interrupt delivery of vIOAPIC Chao Gao
2018-02-12 14:54 ` Roger Pau Monné
2018-02-24 1:51 ` Chao Gao
2018-02-24 3:17 ` Tian, Kevin
2017-11-17 6:22 ` [PATCH v4 19/28] x86/vioapic: extend vioapic_get_vector() to support remapping format RTE Chao Gao
2018-02-12 15:01 ` Roger Pau Monné
2017-11-17 6:22 ` [PATCH v4 20/28] xen/pt: when binding guest msi, accept the whole msi message Chao Gao
2018-02-12 15:16 ` Roger Pau Monné
2018-02-24 2:20 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 21/28] vvtd: update hvm_gmsi_info when binding guest msi with pirq or Chao Gao
2018-02-12 15:38 ` Roger Pau Monné
2018-02-24 5:05 ` Chao Gao
2017-11-17 6:22 ` [PATCH v4 22/28] x86/vmsi: Hook delivering remapping format msi to guest and handling eoi Chao Gao
2017-11-17 6:22 ` [PATCH v4 23/28] tools/libacpi: Add DMA remapping reporting (DMAR) ACPI table structures Chao Gao
2017-11-17 6:22 ` [PATCH v4 24/28] tools/libacpi: Add new fields in acpi_config for DMAR table Chao Gao
2017-11-17 6:22 ` [PATCH v4 25/28] tools/libxl: Add an user configurable parameter to control vIOMMU attributes Chao Gao
2017-11-17 6:22 ` [PATCH v4 26/28] tools/libxl: build DMAR table for a guest with one virtual VTD Chao Gao
2017-11-17 6:22 ` [PATCH v4 27/28] tools/libxl: create vIOMMU during domain construction Chao Gao
2017-11-17 6:22 ` [PATCH v4 28/28] tools/libxc: Add viommu operations in libxc Chao Gao
2018-10-04 15:51 ` [PATCH v4 00/28] add vIOMMU support with irq remapping function of virtual VT-d Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1510899755-40237-15-git-send-email-chao.gao@intel.com \
--to=chao.gao@intel.com \
--cc=andrew.cooper3@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=kevin.tian@intel.com \
--cc=konrad.wilk@oracle.com \
--cc=roger.pau@citrix.com \
--cc=sstabellini@kernel.org \
--cc=tianyu.lan@intel.com \
--cc=tim@xen.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).