From: Zhi Wang <zhi.a.wang@intel.com>
To: intel-gfx@lists.freedesktop.org, igvt-g@lists.01.org
Cc: daniel.vetter@ffwll.ch, david.j.cowperthwaite@intel.com
Subject: [RFC 16/29] drm/i915: gvt: Generic MPT framework
Date: Thu, 28 Jan 2016 18:21:38 +0800 [thread overview]
Message-ID: <1453976511-27322-17-git-send-email-zhi.a.wang@intel.com> (raw)
In-Reply-To: <1453976511-27322-1-git-send-email-zhi.a.wang@intel.com>
GVT-g supports both Xen/KVM hypervisors and requires a couple of hypervisor
services to work. The MPT framework is a kinds of abstraction which provides
a unique hypervisor APIs to GVT-g core logics.
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
drivers/gpu/drm/i915/gvt/gvt.c | 6 ++
drivers/gpu/drm/i915/gvt/gvt.h | 11 +-
drivers/gpu/drm/i915/gvt/hypercall.h | 26 +++++
drivers/gpu/drm/i915/gvt/mmio.c | 194 +++++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/mmio.h | 4 +
drivers/gpu/drm/i915/gvt/mpt.h | 103 +++++++++++++++----
drivers/gpu/drm/i915/gvt/perf.h | 4 +
7 files changed, 326 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 13fecdf..a71873c 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -31,6 +31,11 @@ struct gvt_host gvt_host;
extern struct gvt_kernel_dm xengt_kdm;
extern struct gvt_kernel_dm kvmgt_kdm;
+static struct gvt_io_emulation_ops default_io_emulation_ops = {
+ .emulate_mmio_read = gvt_emulate_mmio_read,
+ .emulate_mmio_write = gvt_emulate_mmio_write,
+};
+
static const char *supported_hypervisors[] = {
[GVT_HYPERVISOR_TYPE_XEN] = "Xen Hypervisor",
[GVT_HYPERVISOR_TYPE_KVM] = "KVM",
@@ -72,6 +77,7 @@ static bool gvt_init_host(void)
gvt_info("Running with hypervisor %s in host mode",
supported_hypervisors[host->hypervisor_type]);
+ host->emulate_ops = &default_io_emulation_ops;
idr_init(&host->device_idr);
mutex_init(&host->device_idr_lock);
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 542f3e6..eb5fd47 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -31,7 +31,6 @@
#include "params.h"
#include "reg.h"
#include "hypercall.h"
-#include "mpt.h"
#include "fb_decoder.h"
#include "mmio.h"
#include "interrupt.h"
@@ -52,12 +51,20 @@ enum {
GVT_HYPERVISOR_TYPE_KVM,
};
+struct gvt_io_emulation_ops {
+ bool (*emulate_mmio_read)(struct vgt_device *, uint64_t, void *, int);
+ bool (*emulate_mmio_write)(struct vgt_device *, uint64_t, void *, int);
+ bool (*emulate_cfg_read)(struct vgt_device *, unsigned int, void *, int);
+ bool (*emulate_cfg_write)(struct vgt_device *, unsigned int, void *, int);
+};
+
struct gvt_host {
bool initialized;
int hypervisor_type;
struct mutex device_idr_lock;
struct idr device_idr;
struct gvt_kernel_dm *kdm;
+ struct gvt_io_emulation_ops *emulate_ops;
};
extern struct gvt_host gvt_host;
@@ -579,4 +586,6 @@ static inline u32 h2g_gtt_index(struct vgt_device *vgt, uint32_t h_index)
return (u32)(h2g_gm(vgt, h_addr) >> GTT_PAGE_SHIFT);
}
+#include "mpt.h"
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
index 0a41874..d30f5a7 100644
--- a/drivers/gpu/drm/i915/gvt/hypercall.h
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -24,7 +24,33 @@
#ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_
+struct vgt_device;
+struct guest_page;
+
+enum map_type {
+ GVT_MAP_APERTURE,
+ GVT_MAP_OPREGION,
+};
+
struct gvt_kernel_dm {
+ const char *name;
+ unsigned long (*g2m_pfn)(int vm_id, unsigned long g_pfn);
+ int (*pause_domain)(int vm_id);
+ int (*shutdown_domain)(int vm_id);
+ int (*map_mfn_to_gpfn)(int vm_id, unsigned long gpfn,
+ unsigned long mfn, int nr, int map, enum map_type type);
+ int (*set_trap_area)(struct vgt_device *vgt, uint64_t start, uint64_t end, bool map);
+ bool (*set_wp_pages)(struct vgt_device *vgt, struct guest_page *p);
+ bool (*unset_wp_pages)(struct vgt_device *vgt, struct guest_page *p);
+ int (*detect_host)(void);
+ int (*from_virt_to_mfn)(void *addr);
+ void *(*from_mfn_to_virt)(int mfn);
+ int (*inject_msi)(int vm_id, u32 addr, u16 data);
+ int (*hvm_init)(struct vgt_device *vgt);
+ void (*hvm_exit)(struct vgt_device *vgt);
+ void *(*gpa_to_va)(struct vgt_device *vgt, unsigned long gap);
+ bool (*read_va)(struct vgt_device *vgt, void *va, void *val, int len, int atomic);
+ bool (*write_va)(struct vgt_device *vgt, void *va, void *val, int len, int atomic);
};
#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 28e1393..3297d82 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -320,3 +320,197 @@ void gvt_init_shadow_mmio_register(struct vgt_device *vgt)
struct gvt_virtual_device_state *state = &vgt->state;
memcpy (state->mmio.sreg, vgt->pdev->initial_mmio_state, vgt->pdev->mmio_size);
}
+
+unsigned int pa_to_mmio_offset(struct vgt_device *vgt,
+ uint64_t pa)
+{
+#define PCI_BAR_ADDR_MASK (~0xFUL) /* 4 LSB bits are not address */
+ return pa - ((*(u64*)(vgt->state.cfg.space + GVT_REG_CFG_SPACE_BAR0))
+ & PCI_BAR_ADDR_MASK);
+}
+
+static inline bool valid_mmio_alignment(struct gvt_mmio_entry *e,
+ unsigned int offset, int bytes)
+{
+ if ((bytes >= e->align_bytes) && !(offset & (bytes - 1)))
+ return true;
+ gvt_err("invalid MMIO offset %08x len %d", offset, bytes);
+ return false;
+}
+
+bool gvt_default_mmio_read(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy(p_data, (char *)vgt->state.mmio.vreg + offset, bytes);
+ return true;
+}
+
+bool gvt_default_mmio_write(struct vgt_device *vgt, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ memcpy((char *)vgt->state.mmio.vreg + offset, p_data, bytes);
+ return true;
+}
+
+bool gvt_emulate_mmio_read(struct vgt_device *vgt, uint64_t pa, void *p_data,int bytes)
+{
+ struct pgt_device *pdev = vgt->pdev;
+ struct gvt_statistics *stat = &vgt->stat;
+ struct gvt_mmio_entry *mmio_entry;
+ unsigned int offset;
+ cycles_t t0, t1;
+ bool r;
+
+ t0 = get_cycles();
+
+ mutex_lock(&pdev->lock);
+
+ if (atomic_read(&vgt->gtt.n_write_protected_guest_page)) {
+ guest_page_t *gp;
+ gp = gvt_find_guest_page(vgt, pa >> PAGE_SHIFT);
+ if (gp) {
+ memcpy(p_data, gp->vaddr + (pa & ~PAGE_MASK), bytes);
+ mutex_unlock(&pdev->lock);
+ return true;
+ }
+ }
+
+ offset = pa_to_mmio_offset(vgt, pa);
+
+ if (bytes > 8 || (offset & (bytes - 1)))
+ goto err;
+
+ if (reg_is_gtt(pdev, offset)) {
+ r = gtt_emulate_read(vgt, offset, p_data, bytes);
+ mutex_unlock(&pdev->lock);
+ return r;
+ }
+
+ if (!reg_is_mmio(pdev, offset + bytes))
+ goto err;
+
+ mmio_entry = find_mmio_entry(pdev, offset);
+ if (mmio_entry && mmio_entry->read) {
+ if (!valid_mmio_alignment(mmio_entry, offset, bytes))
+ goto err;
+ if (!mmio_entry->read(vgt, offset, p_data, bytes))
+ goto err;
+ } else
+ if (!gvt_default_mmio_read(vgt, offset, p_data, bytes))
+ goto err;
+
+ if (!reg_is_tracked(pdev, offset) && vgt->warn_untrack) {
+ gvt_warn("[ vgt%d ] untracked MMIO read, offset %x len %d val 0x%x",
+ vgt->vm_id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ printk("------------------------------------------\n");
+ printk("VM(%d) likely triggers a gfx reset\n", vgt->vm_id);
+ printk("Disable untracked MMIO warning for VM(%d)\n", vgt->vm_id);
+ printk("------------------------------------------\n");
+ vgt->warn_untrack = 0;
+ }
+ }
+
+ reg_set_accessed(pdev, offset);
+ mutex_unlock(&pdev->lock);
+
+ t1 = get_cycles();
+ stat->mmio_rcnt++;
+ stat->mmio_rcycles += t1 - t0;
+ return true;
+err:
+ gvt_err("[ vgt%d ] fail to emulate MMIO read, offset %08x len %d",
+ vgt->id, offset, bytes);
+ mutex_unlock(&pdev->lock);
+ return false;
+}
+
+bool gvt_emulate_mmio_write(struct vgt_device *vgt, uint64_t pa,
+ void *p_data, int bytes)
+{
+ struct pgt_device *pdev = vgt->pdev;
+ struct gvt_mmio_entry *mmio_entry;
+ struct gvt_statistics *stat = &vgt->stat;
+ unsigned int offset;
+ u32 old_vreg = 0, old_sreg = 0;
+ cycles_t t0, t1;
+ bool r;
+
+ t0 = get_cycles();
+
+ mutex_lock(&pdev->lock);
+
+ if (atomic_read(&vgt->gtt.n_write_protected_guest_page)) {
+ guest_page_t *guest_page;
+ guest_page = gvt_find_guest_page(vgt, pa >> PAGE_SHIFT);
+ if (guest_page) {
+ r = guest_page->handler(guest_page, pa, p_data, bytes);
+ t1 = get_cycles();
+ stat->wp_cycles += t1 - t0;
+ stat->wp_cnt++;
+ mutex_unlock(&pdev->lock);
+ return r;
+ }
+ }
+
+ offset = pa_to_mmio_offset(vgt, pa);
+
+ /* FENCE registers / GTT entries(sometimes) are accessed in 8 bytes. */
+ if (bytes > 8 || (offset & (bytes - 1)))
+ goto err;
+
+ if (reg_is_gtt(pdev, offset)) {
+ r = gtt_emulate_write(vgt, offset, p_data, bytes);
+ mutex_unlock(&pdev->lock);
+ return r;
+ }
+
+ if (!reg_is_mmio(pdev, offset + bytes))
+ goto err;
+
+ if (reg_mode_ctl(pdev, offset)) {
+ old_vreg = __vreg(vgt, offset);
+ old_sreg = __sreg(vgt, offset);
+ }
+
+ if (!reg_is_tracked(pdev, offset) && vgt->warn_untrack) {
+ gvt_warn("[ vgt%d ] untracked MMIO write, offset %x len %d val 0x%x",
+ vgt->vm_id, offset, bytes, *(u32 *)p_data);
+ }
+
+ mmio_entry = find_mmio_entry(pdev, offset);
+ if (mmio_entry && mmio_entry->write ) {
+ if (!valid_mmio_alignment(mmio_entry, offset, bytes))
+ goto err;
+ if (!mmio_entry->write(vgt, offset, p_data, bytes))
+ goto err;
+ } else
+ if (!gvt_default_mmio_write(vgt, offset, p_data, bytes))
+ goto err;
+
+ /* higher 16bits of mode ctl regs are mask bits for change */
+ if (reg_mode_ctl(pdev, offset)) {
+ u32 mask = __vreg(vgt, offset) >> 16;
+ /*
+ * share the global mask among VMs, since having one VM touch a bit
+ * not changed by another VM should be still saved/restored later
+ */
+ reg_aux_mode_mask(pdev, offset) |= mask << 16;
+ __vreg(vgt, offset) = (old_vreg & ~mask) | (__vreg(vgt, offset) & mask);
+ __sreg(vgt, offset) = (old_sreg & ~mask) | (__sreg(vgt, offset) & mask);
+ }
+
+ reg_set_accessed(pdev, offset);
+ mutex_unlock(&pdev->lock);
+
+ t1 = get_cycles();
+ stat->mmio_wcycles += t1 - t0;
+ stat->mmio_wcnt++;
+ return true;
+err:
+ gvt_err("[ vgt%d ] fail to emulate MMIO write, offset %08x len %d",
+ vgt->id, offset, bytes);
+ mutex_unlock(&pdev->lock);
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index caca60f..4301655 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -84,4 +84,8 @@ struct gvt_reg_info {
extern struct gvt_reg_info gvt_general_reg_info[];
extern struct gvt_reg_info gvt_broadwell_reg_info[];
extern int gvt_get_reg_num(int type);
+
+bool gvt_emulate_mmio_read(struct vgt_device *vgt, uint64_t pa, void *p_data,int bytes);
+bool gvt_emulate_mmio_write(struct vgt_device *vgt, uint64_t pa, void *p_data,int bytes);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
index 99acf3d..f837dd1 100644
--- a/drivers/gpu/drm/i915/gvt/mpt.h
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -24,85 +24,146 @@
#ifndef _GVT_MPT_H_
#define _GVT_MPT_H_
-struct guest_page;
-struct vgt_device;
-
static inline unsigned long hypervisor_g2m_pfn(struct vgt_device *vgt,
unsigned long g_pfn)
{
- return 0;
+ return gvt_host.kdm->g2m_pfn(vgt->vm_id, g_pfn);
}
static inline int hypervisor_pause_domain(struct vgt_device *vgt)
{
- return 0;
+ return gvt_host.kdm->pause_domain(vgt->vm_id);
}
static inline int hypervisor_shutdown_domain(struct vgt_device *vgt)
{
+ return gvt_host.kdm->shutdown_domain(vgt->vm_id);
+}
+
+static inline int hypervisor_map_mfn_to_gpfn(struct vgt_device *vgt,
+ unsigned long gpfn, unsigned long mfn, int nr, int map, enum map_type type)
+{
+ if (gvt_host.kdm && gvt_host.kdm->map_mfn_to_gpfn)
+ return gvt_host.kdm->map_mfn_to_gpfn(vgt->vm_id, gpfn, mfn, nr, map, type);
+
return 0;
}
static inline int hypervisor_set_trap_area(struct vgt_device *vgt,
- uint64_t start, uint64_t end, bool map)
+ u64 start, u64 end, bool map)
{
- return 0;
+ return gvt_host.kdm->set_trap_area(vgt, start, end, map);
}
-static inline bool hypervisor_detect_host(void)
+static inline int hypervisor_set_wp_pages(struct vgt_device *vgt, guest_page_t *p)
{
- return false;
+ return gvt_host.kdm->set_wp_pages(vgt, p);
}
-static inline int hypervisor_virt_to_mfn(void *addr)
+static inline int hypervisor_unset_wp_pages(struct vgt_device *vgt, guest_page_t *p)
{
- return 0;
+ return gvt_host.kdm->unset_wp_pages(vgt, p);
}
-static inline void *hypervisor_mfn_to_virt(int mfn)
+static inline int hypervisor_detect_host(void)
{
- return NULL;
+ return gvt_host.kdm->detect_host();
}
-static inline int hypervisor_set_wp_pages(struct vgt_device *vgt, struct guest_page *p)
+static inline int hypervisor_virt_to_mfn(void *addr)
{
- return 0;
+ return gvt_host.kdm->from_virt_to_mfn(addr);
}
-static inline int hypervisor_unset_wp_pages(struct vgt_device *vgt, struct guest_page *p)
+static inline void *hypervisor_mfn_to_virt(int mfn)
{
- return 0;
+ return gvt_host.kdm->from_mfn_to_virt(mfn);
}
static inline void hypervisor_inject_msi(struct vgt_device *vgt)
{
- return;
+#define MSI_CAP_OFFSET 0x90 /* FIXME. need to get from cfg emulation */
+#define MSI_CAP_CONTROL (MSI_CAP_OFFSET + 2)
+#define MSI_CAP_ADDRESS (MSI_CAP_OFFSET + 4)
+#define MSI_CAP_DATA (MSI_CAP_OFFSET + 8)
+#define MSI_CAP_EN 0x1
+
+ char *cfg_space = &vgt->state.cfg.space[0];
+ u16 control = *(u16 *)(cfg_space + MSI_CAP_CONTROL);
+ u32 addr = *(u32 *)(cfg_space + MSI_CAP_ADDRESS);
+ u16 data = *(u16 *)(cfg_space + MSI_CAP_DATA);
+ int r;
+
+ /* Do not generate MSI if MSIEN is disable */
+ if (!(control & MSI_CAP_EN))
+ return;
+
+ /* FIXME: currently only handle one MSI format */
+ ASSERT_NUM(!(control & 0xfffe), control);
+
+ gvt_dbg(GVT_DBG_IRQ, "VM %d hvm injections. address (%x) data(%x)!",
+ vgt->vm_id, addr, data);
+ r = gvt_host.kdm->inject_msi(vgt->vm_id, addr, data);
+ if (r < 0)
+ gvt_err("VGT %d failed to inject vmsi", vgt->id);
}
static inline int hypervisor_hvm_init(struct vgt_device *vgt)
{
+ if (gvt_host.kdm && gvt_host.kdm->hvm_init)
+ return gvt_host.kdm->hvm_init(vgt);
+
return 0;
}
static inline void hypervisor_hvm_exit(struct vgt_device *vgt)
{
+ if (gvt_host.kdm && gvt_host.kdm->hvm_exit)
+ gvt_host.kdm->hvm_exit(vgt);
}
static inline void *hypervisor_gpa_to_va(struct vgt_device *vgt, unsigned long gpa)
{
- return NULL;
+ if (!vgt->vm_id)
+ return (char *)hypervisor_mfn_to_virt(gpa >> PAGE_SHIFT) + offset_in_page(gpa);
+
+ return gvt_host.kdm->gpa_to_va(vgt, gpa);
}
static inline bool hypervisor_read_va(struct vgt_device *vgt, void *va,
void *val, int len, int atomic)
{
- return false;
+ bool ret;
+
+ if (!vgt->vm_id) {
+ memcpy(val, va, len);
+ return true;
+ }
+
+ ret = gvt_host.kdm->read_va(vgt, va, val, len, atomic);
+ if (unlikely(!ret))
+ gvt_err("VM(%d): read va failed, va: 0x%p, atomic : %s\n", vgt->vm_id,
+ va, atomic ? "yes" : "no");
+
+ return ret;
}
static inline bool hypervisor_write_va(struct vgt_device *vgt, void *va,
void *val, int len, int atomic)
{
- return false;
+ bool ret;
+
+ if (!vgt->vm_id) {
+ memcpy(va, val, len);
+ return true;
+ }
+
+ ret = gvt_host.kdm->write_va(vgt, va, val, len, atomic);
+ if (unlikely(!ret))
+ gvt_err("VM(%d): write va failed, va: 0x%p, atomic : %s\n", vgt->vm_id,
+ va, atomic ? "yes" : "no");
+
+ return ret;
}
#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/perf.h b/drivers/gpu/drm/i915/gvt/perf.h
index 146a1cb..21b0637 100644
--- a/drivers/gpu/drm/i915/gvt/perf.h
+++ b/drivers/gpu/drm/i915/gvt/perf.h
@@ -28,6 +28,10 @@ struct gvt_statistics {
u64 irq_num;
u64 events[GVT_EVENT_MAX];
u64 last_injection;
+ u64 mmio_rcnt;
+ u64 mmio_wcnt;
+ u64 mmio_wcycles;
+ u64 mmio_rcycles;
u64 gtt_mmio_rcnt;
u64 gtt_mmio_wcnt;
u64 gtt_mmio_wcycles;
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2016-01-28 10:24 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-28 10:21 [RFC 00/29] iGVT-g implementation in i915 Zhi Wang
2016-01-28 10:21 ` [RFC 01/29] drm/i915/gvt: Introduce the basic architecture of GVT-g Zhi Wang
2016-01-29 13:57 ` Joonas Lahtinen
2016-01-29 16:48 ` Chris Wilson
2016-02-03 6:28 ` Zhi Wang
2016-02-05 7:02 ` Zhiyuan Lv
2016-02-03 6:01 ` Zhi Wang
2016-02-03 7:01 ` Zhiyuan Lv
2016-02-04 11:25 ` Joonas Lahtinen
2016-02-16 9:54 ` Zhi Wang
2016-02-16 12:44 ` Jani Nikula
2016-02-16 14:08 ` Joonas Lahtinen
2016-01-28 10:21 ` [RFC 02/29] drm/i915: Introduce host graphics memory balloon for gvt Zhi Wang
2016-02-04 11:27 ` Joonas Lahtinen
2016-02-05 10:03 ` Zhiyuan Lv
2016-02-05 13:40 ` Joonas Lahtinen
2016-02-05 14:16 ` Zhiyuan Lv
2016-02-08 11:52 ` Joonas Lahtinen
2016-02-10 8:08 ` Daniel Vetter
2016-01-28 10:21 ` [RFC 03/29] drm/i915: Introduce GVT context creation API Zhi Wang
2016-01-28 10:21 ` [RFC 04/29] drm/i915: Ondemand populate context addressing mode bit Zhi Wang
2016-01-28 10:21 ` [RFC 05/29] drm/i915: Do not populate PPGTT root pointers for GVT context Zhi Wang
2016-01-28 10:21 ` [RFC 06/29] drm/i915: Do not initialize the engine state of " Zhi Wang
2016-01-28 10:21 ` [RFC 07/29] drm/i915: GVT context scheduling Zhi Wang
2016-01-28 10:21 ` [RFC 08/29] drm/i915: Support vGPU guest framebuffer GEM object Zhi Wang
2016-01-28 10:21 ` [RFC 09/29] drm/i915: gvt: Resource allocator Zhi Wang
2016-01-28 10:21 ` [RFC 10/29] drm/i915: gvt: Basic mmio emulation state Zhi Wang
2016-01-28 10:21 ` [RFC 11/29] drm/i915: gvt: update PVINFO page definition in i915_vgpu.h Zhi Wang
2016-01-28 10:21 ` [RFC 12/29] drm/i915: gvt: vGPU life cycle management Zhi Wang
2016-01-28 10:21 ` [RFC 13/29] drm/i915: gvt: trace stub Zhi Wang
2016-01-28 10:21 ` [RFC 14/29] drm/i915: gvt: vGPU interrupt emulation framework Zhi Wang
2016-01-28 10:21 ` [RFC 15/29] drm/i915: gvt: vGPU graphics memory " Zhi Wang
2016-01-28 10:21 ` Zhi Wang [this message]
2016-01-28 10:21 ` [RFC 17/29] gvt: Xen hypervisor GVT-g MPT module Zhi Wang
2016-01-28 11:33 ` Joonas Lahtinen
2016-01-28 12:50 ` Zhiyuan Lv
2016-01-28 10:21 ` [RFC 18/29] drm/i915: gvt: vGPU configuration emulation Zhi Wang
2016-01-28 10:21 ` [RFC 19/29] drm/i915: gvt: vGPU OpRegion emulation Zhi Wang
2016-01-28 10:21 ` [RFC 20/29] drm/i915: gvt: vGPU framebuffer format decoder Zhi Wang
2016-01-28 10:21 ` [RFC 21/29] drm/i915: gvt: vGPU MMIO register emulation Zhi Wang
2016-01-28 10:21 ` [RFC 22/29] drm/i915: gvt: Full display virtualization Zhi Wang
2016-01-28 10:21 ` [RFC 23/29] drm/i915: gvt: Introduce GVT control interface Zhi Wang
2016-01-28 10:21 ` [RFC 24/29] drm/i915: gvt: Full execlist status emulation Zhi Wang
2016-01-28 10:21 ` [RFC 25/29] drm/i915: gvt: vGPU execlist workload submission Zhi Wang
2016-01-28 10:21 ` [RFC 26/29] drm/i915: gvt: workload scheduler Zhi Wang
2016-01-28 10:21 ` [RFC 27/29] drm/i915: gvt: vGPU schedule policy framework Zhi Wang
2016-01-28 10:21 ` [RFC 28/29] drm/i915: gvt: vGPU context switch Zhi Wang
2016-01-28 10:21 ` [RFC 29/29] drm/i915: gvt: vGPU command scanner Zhi Wang
2016-01-28 17:15 ` ✗ Fi.CI.BAT: failure for iGVT-g implementation in i915 Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1453976511-27322-17-git-send-email-zhi.a.wang@intel.com \
--to=zhi.a.wang@intel.com \
--cc=daniel.vetter@ffwll.ch \
--cc=david.j.cowperthwaite@intel.com \
--cc=igvt-g@lists.01.org \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).