From: Lan Tianyu <tianyu.lan@intel.com>
To: xen-devel@lists.xen.org
Cc: Lan Tianyu <tianyu.lan@intel.com>,
kevin.tian@intel.com, wei.liu2@citrix.com,
andrew.cooper3@citrix.com, ian.jackson@eu.citrix.com,
jbeulich@suse.com, Chao Gao <chao.gao@intel.com>
Subject: [PATCH 21/25] Tools/libxc: Add a new interface to bind remapping format msi with pirq
Date: Thu, 29 Jun 2017 01:50:53 -0400 [thread overview]
Message-ID: <1498715457-16565-22-git-send-email-tianyu.lan@intel.com> (raw)
In-Reply-To: <1498715457-16565-1-git-send-email-tianyu.lan@intel.com>
From: Chao Gao <chao.gao@intel.com>
Introduce a new binding relationship and provide a new interface to
manage the new relationship.
Signed-off-by: Chao Gao <chao.gao@intel.com>
Signed-off-by: Lan Tianyu <tianyu.lan@intel.com>
---
tools/libxc/include/xenctrl.h | 17 ++++++
tools/libxc/xc_domain.c | 53 +++++++++++++++++
xen/drivers/passthrough/io.c | 135 +++++++++++++++++++++++++++++++++++-------
xen/include/public/domctl.h | 7 +++
xen/include/xen/hvm/irq.h | 7 +++
5 files changed, 198 insertions(+), 21 deletions(-)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 51ceeb9..75aaa9c 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1710,6 +1710,15 @@ int xc_domain_ioport_mapping(xc_interface *xch,
uint32_t nr_ports,
uint32_t add_mapping);
+int xc_domain_update_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr,
+ uint64_t gtable);
+
int xc_domain_update_msi_irq(
xc_interface *xch,
uint32_t domid,
@@ -1724,6 +1733,14 @@ int xc_domain_unbind_msi_irq(xc_interface *xch,
uint32_t pirq,
uint32_t gflags);
+int xc_domain_unbind_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr);
+
int xc_domain_bind_pt_irq(xc_interface *xch,
uint32_t domid,
uint8_t machine_irq,
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 5d192ea..58623af 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1657,8 +1657,34 @@ int xc_deassign_dt_device(
return rc;
}
+int xc_domain_update_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr,
+ uint64_t gtable)
+{
+ int rc;
+ xen_domctl_bind_pt_irq_t *bind;
+
+ DECLARE_DOMCTL;
+ domctl.cmd = XEN_DOMCTL_bind_pt_irq;
+ domctl.domain = (domid_t)domid;
+ bind = &(domctl.u.bind_pt_irq);
+ bind->irq_type = PT_IRQ_TYPE_MSI_IR;
+ bind->machine_irq = pirq;
+ bind->u.msi_ir.source_id = source_id;
+ bind->u.msi_ir.data = data;
+ bind->u.msi_ir.addr = addr;
+ bind->u.msi_ir.gtable = gtable;
+
+ rc = do_domctl(xch, &domctl);
+ return rc;
+}
int xc_domain_update_msi_irq(
xc_interface *xch,
@@ -1687,6 +1713,33 @@ int xc_domain_update_msi_irq(
return rc;
}
+int xc_domain_unbind_msi_irq_remapping(
+ xc_interface *xch,
+ uint32_t domid,
+ uint32_t pirq,
+ uint32_t source_id,
+ uint32_t data,
+ uint64_t addr)
+{
+ int rc;
+ xen_domctl_bind_pt_irq_t *bind;
+
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
+ domctl.domain = (domid_t)domid;
+
+ bind = &(domctl.u.bind_pt_irq);
+ bind->irq_type = PT_IRQ_TYPE_MSI_IR;
+ bind->machine_irq = pirq;
+ bind->u.msi_ir.source_id = source_id;
+ bind->u.msi_ir.data = data;
+ bind->u.msi_ir.addr = addr;
+
+ rc = do_domctl(xch, &domctl);
+ return rc;
+}
+
int xc_domain_unbind_msi_irq(
xc_interface *xch,
uint32_t domid,
diff --git a/xen/drivers/passthrough/io.c b/xen/drivers/passthrough/io.c
index 2158a11..599f481 100644
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -259,6 +259,92 @@ static struct vcpu *vector_hashing_dest(const struct domain *d,
return dest;
}
+static inline void set_hvm_gmsi_info(struct hvm_gmsi_info *msi,
+ xen_domctl_bind_pt_irq_t *pt_irq_bind)
+{
+ if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
+ {
+ msi->legacy.gvec = pt_irq_bind->u.msi.gvec;
+ msi->legacy.gflags = pt_irq_bind->u.msi.gflags;
+ }
+ else if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_IR )
+ {
+ msi->intremap.source_id = pt_irq_bind->u.msi_ir.source_id;
+ msi->intremap.data = pt_irq_bind->u.msi_ir.data;
+ msi->intremap.addr = pt_irq_bind->u.msi_ir.addr;
+ }
+ else
+ BUG();
+}
+
+static inline void clear_hvm_gmsi_info(struct hvm_gmsi_info *msi, int irq_type)
+{
+ if ( irq_type == PT_IRQ_TYPE_MSI )
+ {
+ msi->legacy.gvec = 0;
+ msi->legacy.gflags = 0;
+ }
+ else if ( irq_type == PT_IRQ_TYPE_MSI_IR )
+ {
+ msi->intremap.source_id = 0;
+ msi->intremap.data = 0;
+ msi->intremap.addr = 0;
+ }
+ else
+ BUG();
+}
+
+static inline bool hvm_gmsi_info_need_update(struct hvm_gmsi_info *msi,
+ xen_domctl_bind_pt_irq_t *pt_irq_bind)
+{
+ if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI )
+ return ((msi->legacy.gvec != pt_irq_bind->u.msi.gvec) ||
+ (msi->legacy.gflags != pt_irq_bind->u.msi.gflags));
+ else if ( pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_IR )
+ return ((msi->intremap.source_id != pt_irq_bind->u.msi_ir.source_id) ||
+ (msi->intremap.data != pt_irq_bind->u.msi_ir.data) ||
+ (msi->intremap.addr != pt_irq_bind->u.msi_ir.addr));
+ BUG();
+ return 0;
+}
+
+static int pirq_dpci_2_msi_attr(struct domain *d,
+ struct hvm_pirq_dpci *pirq_dpci, uint8_t *gvec,
+ uint8_t *dest, uint8_t *dm, uint8_t *dlm)
+{
+ int rc = 0;
+
+ if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI )
+ {
+ *gvec = pirq_dpci->gmsi.legacy.gvec;
+ *dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
+ *dm = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
+ *dlm = (pirq_dpci->gmsi.legacy.gflags & VMSI_DELIV_MASK) >>
+ GFLAGS_SHIFT_DELIV_MODE;
+ }
+ else if ( pirq_dpci->flags & HVM_IRQ_DPCI_GUEST_MSI_IR )
+ {
+ struct irq_remapping_request request;
+ struct irq_remapping_info irq_info;
+
+ irq_request_msi_fill(&request, pirq_dpci->gmsi.intremap.source_id,
+ pirq_dpci->gmsi.intremap.addr,
+ pirq_dpci->gmsi.intremap.data);
+ /* Currently, only viommu 0 is supported */
+ rc = viommu_get_irq_info(d, 0, &request, &irq_info);
+ if ( !rc )
+ {
+ *gvec = irq_info.vector;
+ *dest = irq_info.dest;
+ *dm = irq_info.dest_mode;
+ *dlm = irq_info.delivery_mode;
+ }
+ }
+ else
+ BUG();
+ return rc;
+}
+
int pt_irq_create_bind(
struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
{
@@ -316,17 +402,21 @@ int pt_irq_create_bind(
switch ( pt_irq_bind->irq_type )
{
case PT_IRQ_TYPE_MSI:
+ case PT_IRQ_TYPE_MSI_IR:
{
- uint8_t dest, dest_mode, delivery_mode;
+ uint8_t dest = 0, dest_mode = 0, delivery_mode = 0, gvec;
int dest_vcpu_id;
const struct vcpu *vcpu;
+ bool ir = (pt_irq_bind->irq_type == PT_IRQ_TYPE_MSI_IR);
+ uint64_t gtable = ir ? pt_irq_bind->u.msi_ir.gtable :
+ pt_irq_bind->u.msi.gtable;
if ( !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
{
pirq_dpci->flags = HVM_IRQ_DPCI_MAPPED | HVM_IRQ_DPCI_MACH_MSI |
- HVM_IRQ_DPCI_GUEST_MSI;
- pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
+ (ir ? HVM_IRQ_DPCI_GUEST_MSI_IR :
+ HVM_IRQ_DPCI_GUEST_MSI);
+ set_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind);
/*
* 'pt_irq_create_bind' can be called after 'pt_irq_destroy_bind'.
* The 'pirq_cleanup_check' which would free the structure is only
@@ -341,9 +431,9 @@ int pt_irq_create_bind(
pirq_dpci->dom = d;
/* bind after hvm_irq_dpci is setup to avoid race with irq handler*/
rc = pirq_guest_bind(d->vcpu[0], info, 0);
- if ( rc == 0 && pt_irq_bind->u.msi.gtable )
+ if ( rc == 0 && gtable )
{
- rc = msixtbl_pt_register(d, info, pt_irq_bind->u.msi.gtable);
+ rc = msixtbl_pt_register(d, info, gtable);
if ( unlikely(rc) )
{
pirq_guest_unbind(d, info);
@@ -358,8 +448,7 @@ int pt_irq_create_bind(
}
if ( unlikely(rc) )
{
- pirq_dpci->gmsi.legacy.gflags = 0;
- pirq_dpci->gmsi.legacy.gvec = 0;
+ clear_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind->irq_type);
pirq_dpci->dom = NULL;
pirq_dpci->flags = 0;
pirq_cleanup_check(info, d);
@@ -369,7 +458,8 @@ int pt_irq_create_bind(
}
else
{
- uint32_t mask = HVM_IRQ_DPCI_MACH_MSI | HVM_IRQ_DPCI_GUEST_MSI;
+ uint32_t mask = HVM_IRQ_DPCI_MACH_MSI |
+ (ir ? HVM_IRQ_DPCI_GUEST_MSI_IR : HVM_IRQ_DPCI_GUEST_MSI);
if ( (pirq_dpci->flags & mask) != mask )
{
@@ -378,29 +468,31 @@ int pt_irq_create_bind(
}
/* If pirq is already mapped as vmsi, update guest data/addr. */
- if ( pirq_dpci->gmsi.legacy.gvec != pt_irq_bind->u.msi.gvec ||
- pirq_dpci->gmsi.legacy.gflags != pt_irq_bind->u.msi.gflags )
+ if ( hvm_gmsi_info_need_update(&pirq_dpci->gmsi, pt_irq_bind) )
{
/* Directly clear pending EOIs before enabling new MSI info. */
pirq_guest_eoi(info);
- pirq_dpci->gmsi.legacy.gvec = pt_irq_bind->u.msi.gvec;
- pirq_dpci->gmsi.legacy.gflags = pt_irq_bind->u.msi.gflags;
+ set_hvm_gmsi_info(&pirq_dpci->gmsi, pt_irq_bind);
}
}
/* Calculate dest_vcpu_id for MSI-type pirq migration. */
- dest = pirq_dpci->gmsi.legacy.gflags & VMSI_DEST_ID_MASK;
- dest_mode = !!(pirq_dpci->gmsi.legacy.gflags & VMSI_DM_MASK);
- delivery_mode = (pirq_dpci->gmsi.legacy.gflags & VMSI_DELIV_MASK) >>
- GFLAGS_SHIFT_DELIV_MODE;
-
- dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
+ rc = pirq_dpci_2_msi_attr(d, pirq_dpci, &gvec, &dest, &dest_mode,
+ &delivery_mode);
+ if ( unlikely(rc) )
+ {
+ spin_unlock(&d->event_lock);
+ return -EFAULT;
+ }
+ else
+ dest_vcpu_id = hvm_girq_dest_2_vcpu_id(d, dest, dest_mode);
pirq_dpci->gmsi.dest_vcpu_id = dest_vcpu_id;
spin_unlock(&d->event_lock);
pirq_dpci->gmsi.posted = false;
vcpu = (dest_vcpu_id >= 0) ? d->vcpu[dest_vcpu_id] : NULL;
- if ( iommu_intpost )
+ /* Currently, don't use interrupt posting for guest's remapping MSIs */
+ if ( iommu_intpost && !ir )
{
if ( delivery_mode == dest_LowestPrio )
vcpu = vector_hashing_dest(d, dest, dest_mode,
@@ -412,7 +504,7 @@ int pt_irq_create_bind(
hvm_migrate_pirqs(d->vcpu[dest_vcpu_id]);
/* Use interrupt posting if it is supported. */
- if ( iommu_intpost )
+ if ( iommu_intpost && !ir )
pi_update_irte(vcpu ? &vcpu->arch.hvm_vmx.pi_desc : NULL,
info, pirq_dpci->gmsi.legacy.gvec);
@@ -545,6 +637,7 @@ int pt_irq_destroy_bind(
}
break;
case PT_IRQ_TYPE_MSI:
+ case PT_IRQ_TYPE_MSI_IR:
break;
default:
return -EOPNOTSUPP;
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 7581df3..30535e0 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -555,6 +555,7 @@ typedef enum pt_irq_type_e {
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
+ PT_IRQ_TYPE_MSI_IR,
} pt_irq_type_t;
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
@@ -575,6 +576,12 @@ struct xen_domctl_bind_pt_irq {
uint64_aligned_t gtable;
} msi;
struct {
+ uint32_t source_id;
+ uint32_t data;
+ uint64_t addr;
+ uint64_aligned_t gtable;
+ } msi_ir;
+ struct {
uint16_t spi;
} spi;
} u;
diff --git a/xen/include/xen/hvm/irq.h b/xen/include/xen/hvm/irq.h
index 5f8e2f4..9e93459 100644
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -40,6 +40,7 @@ struct dev_intx_gsi_link {
#define _HVM_IRQ_DPCI_EOI_LATCH_SHIFT 3
#define _HVM_IRQ_DPCI_GUEST_PCI_SHIFT 4
#define _HVM_IRQ_DPCI_GUEST_MSI_SHIFT 5
+#define _HVM_IRQ_DPCI_GUEST_MSI_IR_SHIFT 6
#define _HVM_IRQ_DPCI_TRANSLATE_SHIFT 15
#define HVM_IRQ_DPCI_MACH_PCI (1 << _HVM_IRQ_DPCI_MACH_PCI_SHIFT)
#define HVM_IRQ_DPCI_MACH_MSI (1 << _HVM_IRQ_DPCI_MACH_MSI_SHIFT)
@@ -47,6 +48,7 @@ struct dev_intx_gsi_link {
#define HVM_IRQ_DPCI_EOI_LATCH (1 << _HVM_IRQ_DPCI_EOI_LATCH_SHIFT)
#define HVM_IRQ_DPCI_GUEST_PCI (1 << _HVM_IRQ_DPCI_GUEST_PCI_SHIFT)
#define HVM_IRQ_DPCI_GUEST_MSI (1 << _HVM_IRQ_DPCI_GUEST_MSI_SHIFT)
+#define HVM_IRQ_DPCI_GUEST_MSI_IR (1 << _HVM_IRQ_DPCI_GUEST_MSI_IR_SHIFT)
#define HVM_IRQ_DPCI_TRANSLATE (1 << _HVM_IRQ_DPCI_TRANSLATE_SHIFT)
#define VMSI_DEST_ID_MASK 0xff
@@ -65,6 +67,11 @@ struct hvm_gmsi_info {
uint32_t gvec;
uint32_t gflags;
} legacy;
+ struct {
+ uint32_t source_id;
+ uint32_t data;
+ uint64_t addr;
+ } intremap;
};
int dest_vcpu_id; /* -1 :multi-dest, non-negative: dest_vcpu_id */
bool posted; /* directly deliver to guest via VT-d PI? */
--
1.8.3.1
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-06-29 5:50 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-06-29 5:50 [PATCH 00/25] xen/vIOMMU: Add vIOMMU support with irq remapping fucntion of virtual vtd Lan Tianyu
2017-06-29 5:50 ` [PATCH 1/25] VIOMMU: Add vIOMMU helper functions to create, destroy and query capabilities Lan Tianyu
2017-06-30 13:05 ` Wei Liu
2017-07-04 1:46 ` Lan Tianyu
2017-07-04 7:34 ` Julien Grall
2017-07-04 7:53 ` Lan Tianyu
2017-07-04 7:57 ` Jan Beulich
2017-07-04 10:16 ` Julien Grall
2017-07-04 10:18 ` Julien Grall
2017-07-04 7:55 ` Jan Beulich
2017-07-04 8:45 ` Lan Tianyu
2017-07-04 10:03 ` Jan Beulich
2017-06-29 5:50 ` [PATCH 2/25] DOMCTL: Introduce new DOMCTL commands for vIOMMU support Lan Tianyu
2017-06-30 13:07 ` Wei Liu
2017-06-29 5:50 ` [PATCH 3/25] VIOMMU: Add irq request callback to deal with irq remapping Lan Tianyu
2017-06-29 5:50 ` [PATCH 4/25] VIOMMU: Add get irq info callback to convert irq remapping request Lan Tianyu
2017-06-29 5:50 ` [PATCH 5/25] Xen/doc: Add Xen virtual IOMMU doc Lan Tianyu
2017-07-04 10:39 ` Julien Grall
2017-07-05 3:15 ` Lan Tianyu
2017-07-05 13:25 ` Julien Grall
2017-07-06 3:10 ` Lan Tianyu
2017-07-07 16:08 ` Julien Grall
2017-07-12 3:09 ` Lan Tianyu
2017-07-12 7:26 ` Julien Grall
2017-07-12 11:44 ` Lan Tianyu
2017-07-06 6:20 ` Lan Tianyu
2017-07-07 16:16 ` Julien Grall
2017-07-12 5:34 ` Lan Tianyu
2017-06-29 5:50 ` [PATCH 6/25] Tools/libxc: Add viommu operations in libxc Lan Tianyu
2017-06-30 13:44 ` Wei Liu
2017-06-29 5:50 ` [PATCH 7/25] Tools/libacpi: Add DMA remapping reporting (DMAR) ACPI table structures Lan Tianyu
2017-06-29 5:50 ` [PATCH 8/25] Tools/libacpi: Add new fields in acpi_config to build DMAR table Lan Tianyu
2017-06-29 5:50 ` [PATCH 9/25] Tools/libacpi: Add a user configurable parameter to control vIOMMU attributes Lan Tianyu
2017-06-30 13:44 ` Wei Liu
2017-06-29 5:50 ` [PATCH 10/25] libxl: create vIOMMU during domain construction Lan Tianyu
2017-06-30 13:45 ` Wei Liu
2017-07-04 10:46 ` Julien Grall
2017-07-04 11:03 ` Wei Liu
2017-07-05 10:53 ` Lan Tianyu
2017-07-05 11:19 ` Wei Liu
2017-07-05 11:32 ` Lan Tianyu
2017-07-05 11:39 ` Wei Liu
2017-06-29 5:50 ` [PATCH 11/25] x86/hvm: Introduce a emulated VTD for HVM Lan Tianyu
2017-06-29 5:50 ` [PATCH 12/25] X86/vvtd: Add MMIO handler for VVTD Lan Tianyu
2017-06-30 13:46 ` Wei Liu
2017-06-29 5:50 ` [PATCH 13/25] X86/vvtd: Set Interrupt Remapping Table Pointer through GCMD Lan Tianyu
2017-06-29 5:50 ` [PATCH 14/25] X86/vvtd: Process interrupt remapping request Lan Tianyu
2017-06-29 5:50 ` [PATCH 15/25] x86/vvtd: decode interrupt attribute from IRTE Lan Tianyu
2017-06-29 5:50 ` [PATCH 16/25] x86/vioapic: Hook interrupt delivery of vIOAPIC Lan Tianyu
2017-06-29 5:50 ` [PATCH 17/25] X86/vvtd: Enable Queued Invalidation through GCMD Lan Tianyu
2017-06-29 5:50 ` [PATCH 18/25] X86/vvtd: Enable Interrupt Remapping " Lan Tianyu
2017-06-29 5:50 ` [PATCH 19/25] x86/vioapic: introduce a function to get vector from pin Lan Tianyu
2017-06-29 5:50 ` [PATCH 20/25] passthrough: move some fields of hvm_gmsi_info to a sub-structure Lan Tianyu
2017-06-29 5:50 ` Lan Tianyu [this message]
2017-06-30 13:48 ` [PATCH 21/25] Tools/libxc: Add a new interface to bind remapping format msi with pirq Wei Liu
2017-06-29 5:50 ` [PATCH 22/25] x86/vmsi: Hook delivering remapping format msi to guest Lan Tianyu
2017-06-29 5:50 ` [PATCH 23/25] x86/vvtd: Handle interrupt translation faults Lan Tianyu
2017-06-29 5:50 ` [PATCH 24/25] x86/vvtd: Add queued invalidation (QI) support Lan Tianyu
2017-06-29 5:50 ` [PATCH 25/25] x86/vvtd: save and restore emulated VT-d Lan Tianyu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1498715457-16565-22-git-send-email-tianyu.lan@intel.com \
--to=tianyu.lan@intel.com \
--cc=andrew.cooper3@citrix.com \
--cc=chao.gao@intel.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=kevin.tian@intel.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).