From: Zhi Wang <zhi.a.wang@intel.com>
To: intel-gfx@lists.freedesktop.org, igvt-g@lists.01.org
Cc: daniel.vetter@ffwll.ch, david.j.cowperthwaite@intel.com
Subject: [RFC 27/29] drm/i915: gvt: vGPU schedule policy framework
Date: Thu, 28 Jan 2016 18:21:49 +0800 [thread overview]
Message-ID: <1453976511-27322-28-git-send-email-zhi.a.wang@intel.com> (raw)
In-Reply-To: <1453976511-27322-1-git-send-email-zhi.a.wang@intel.com>
This patch introduces a vGPU schedule policy framework, with a timer based
schedule policy module for now
Signed-off-by: Zhi Wang <zhi.a.wang@intel.com>
---
drivers/gpu/drm/i915/gvt/Makefile | 3 +-
drivers/gpu/drm/i915/gvt/gvt.h | 2 +
drivers/gpu/drm/i915/gvt/handlers.c | 16 ++
drivers/gpu/drm/i915/gvt/instance.c | 16 ++
drivers/gpu/drm/i915/gvt/sched_policy.c | 295 ++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/sched_policy.h | 48 ++++++
drivers/gpu/drm/i915/gvt/scheduler.c | 5 +
drivers/gpu/drm/i915/gvt/scheduler.h | 3 +
8 files changed, 387 insertions(+), 1 deletion(-)
create mode 100644 drivers/gpu/drm/i915/gvt/sched_policy.c
create mode 100644 drivers/gpu/drm/i915/gvt/sched_policy.h
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 46f71db..dcaf715 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -1,6 +1,7 @@
GVT_SOURCE := gvt.o params.o aperture_gm.o mmio.o handlers.o instance.o \
trace_points.o interrupt.o gtt.o cfg_space.o opregion.o utility.o \
- fb_decoder.o display.o edid.o control.o execlist.o scheduler.o
+ fb_decoder.o display.o edid.o control.o execlist.o scheduler.o \
+ sched_policy.o
ccflags-y += -I$(src) -I$(src)/.. -Wall -Werror -Wno-unused-function
i915_gvt-y := $(GVT_SOURCE)
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 83f1017..5788bb7 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -44,6 +44,7 @@
#include "display.h"
#include "execlist.h"
#include "scheduler.h"
+#include "sched_policy.h"
#define GVT_MAX_VGPU 8
@@ -160,6 +161,7 @@ struct vgt_device {
unsigned long last_reset_time;
atomic_t crashing;
bool warn_untrack;
+ void *sched_data;
};
struct gvt_gm_allocator {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 356cfc4..a04d0cb 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -259,6 +259,22 @@ static bool dpy_reg_mmio_read_3(struct vgt_device *vgt, unsigned int offset,
static bool ring_mode_write(struct vgt_device *vgt, unsigned int off,
void *p_data, unsigned int bytes)
{
+ u32 data = *(u32 *)p_data;
+ int ring_id = gvt_render_mmio_to_ring_id(off);
+ bool enable_execlist;
+
+ if (_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)
+ || _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE)) {
+ enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
+
+ gvt_info("EXECLIST %s on ring %d.",
+ (enable_execlist ? "enabling" : "disabling"),
+ ring_id);
+
+ if (enable_execlist)
+ gvt_start_schedule(vgt);
+ }
+
return true;
}
diff --git a/drivers/gpu/drm/i915/gvt/instance.c b/drivers/gpu/drm/i915/gvt/instance.c
index 959c8ee..0b7eb8f 100644
--- a/drivers/gpu/drm/i915/gvt/instance.c
+++ b/drivers/gpu/drm/i915/gvt/instance.c
@@ -193,9 +193,22 @@ void gvt_destroy_instance(struct vgt_device *vgt)
struct pgt_device *pdev = vgt->pdev;
mutex_lock(&pdev->lock);
+
+ gvt_stop_schedule(vgt);
+
+ mutex_unlock(&pdev->lock);
+
+ if (atomic_read(&vgt->running_workload_num))
+ gvt_wait_instance_idle(vgt);
+
+ mutex_lock(&pdev->lock);
+
+ gvt_clean_instance_sched_policy(vgt);
+
gvt_set_instance_offline(vgt);
if (vgt->id != -1)
idr_remove(&pdev->instance_idr, vgt->id);
+
mutex_unlock(&pdev->lock);
hypervisor_hvm_exit(vgt);
@@ -234,6 +247,9 @@ struct vgt_device *gvt_create_instance(struct pgt_device *pdev,
vgt->id = id;
vgt->pdev = pdev;
+ if (!gvt_init_instance_sched_policy(vgt))
+ goto err;
+
vgt->warn_untrack = true;
if (!create_virtual_device_state(vgt, info))
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
new file mode 100644
index 0000000..14f4301
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "gvt.h"
+
+static bool instance_has_pending_workload(struct vgt_device *vgt)
+{
+ struct gvt_virtual_execlist_info *info;
+ int i;
+
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ info = &vgt->virtual_execlist_info[i];
+ if (!list_empty(workload_q_head(vgt, i)))
+ return true;
+ }
+
+ return false;
+}
+
+static void try_to_schedule_next_instance(struct pgt_device *pdev)
+{
+ struct gvt_workload_scheduler *scheduler =
+ &pdev->workload_scheduler;
+ int i;
+
+ /* no target to schedule */
+ if (!scheduler->next_instance)
+ return;
+
+ gvt_dbg_sched("try to schedule next instance %d",
+ scheduler->next_instance->id);
+
+ /*
+ * after the flag is set, workload dispatch thread will
+ * stop dispatching workload for current instance
+ */
+ scheduler->need_reschedule = true;
+
+ /* still have uncompleted workload? */
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ if (scheduler->current_workload[i]) {
+ gvt_dbg_sched("still have running workload");
+ return;
+ }
+ }
+
+ gvt_dbg_sched("switch to next instance %d",
+ scheduler->next_instance->id);
+
+ /* switch current instance */
+ scheduler->current_instance = scheduler->next_instance;
+ scheduler->next_instance = NULL;
+
+ /* wake up workload dispatch thread */
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ wake_up(&scheduler->waitq[i]);
+
+ scheduler->need_reschedule = false;
+}
+
+struct tbs_instance_data {
+ struct list_head list;
+ struct vgt_device *vgt;
+ /* put some per-instance sched stats here*/
+};
+
+struct tbs_sched_data {
+ struct pgt_device *pdev;
+ struct delayed_work work;
+ unsigned long period;
+ atomic_t runq_instance_num;
+ struct list_head runq_head;
+};
+
+#define GVT_DEFAULT_TIME_SLICE (16 * HZ / 1000)
+
+static void tbs_sched_func(struct work_struct *work)
+{
+ struct tbs_sched_data *sched_data = container_of(work,
+ struct tbs_sched_data, work.work);
+ struct tbs_instance_data *instance_data;
+
+ struct pgt_device *pdev = sched_data->pdev;
+ struct gvt_workload_scheduler *scheduler =
+ &pdev->workload_scheduler;
+
+ struct vgt_device *vgt = NULL;
+ struct list_head *pos, *head;
+
+ mutex_lock(&pdev->lock);
+
+ /* no instance or has already had a target */
+ if (list_empty(&sched_data->runq_head)|| scheduler->next_instance)
+ goto out;
+
+ if (scheduler->current_instance) {
+ instance_data = scheduler->current_instance->sched_data;
+ head = &instance_data->list;
+ } else {
+ gvt_dbg_sched("no current instance search from q head");
+ head = &sched_data->runq_head;
+ }
+
+ /* search a instance with pending workload */
+ list_for_each(pos, head) {
+ if (pos == &sched_data->runq_head)
+ continue;
+
+ instance_data = container_of(pos, struct tbs_instance_data, list);
+ if (!instance_has_pending_workload(instance_data->vgt))
+ continue;
+
+ vgt = instance_data->vgt;
+ break;
+ }
+
+ if (vgt) {
+ scheduler->next_instance = vgt;
+ gvt_dbg_sched("pick next instance %d", vgt->id);
+ }
+out:
+ if (scheduler->next_instance) {
+ gvt_dbg_sched("try to schedule next instance %d",
+ scheduler->next_instance->id);
+ try_to_schedule_next_instance(pdev);
+ }
+
+ /*
+ * still have instance on runq
+ * or last schedule haven't finished due to running workload
+ */
+ if (atomic_read(&sched_data->runq_instance_num) || scheduler->next_instance)
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+
+ mutex_unlock(&pdev->lock);
+}
+
+static bool tbs_sched_init(struct pgt_device *pdev)
+{
+ struct gvt_workload_scheduler *scheduler =
+ &pdev->workload_scheduler;
+
+ struct tbs_sched_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ gvt_err("fail to allocate sched data");
+ return false;
+ }
+
+ INIT_LIST_HEAD(&data->runq_head);
+ INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ data->period = GVT_DEFAULT_TIME_SLICE;
+ data->pdev = pdev;
+
+ atomic_set(&data->runq_instance_num, 0);
+ scheduler->sched_data = data;
+
+ return true;
+}
+
+static void tbs_sched_clean(struct pgt_device *pdev)
+{
+ struct gvt_workload_scheduler *scheduler =
+ &pdev->workload_scheduler;
+ struct tbs_sched_data *data = scheduler->sched_data;
+
+ cancel_delayed_work(&data->work);
+ kfree(data);
+ scheduler->sched_data = NULL;
+}
+
+static bool tbs_sched_instance_init(struct vgt_device *vgt)
+{
+ struct tbs_instance_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ gvt_err("fail to allocate memory");
+ return false;
+ }
+
+ data->vgt = vgt;
+ INIT_LIST_HEAD(&data->list);
+
+ vgt->sched_data = data;
+
+ return true;
+}
+
+static void tbs_sched_instance_clean(struct vgt_device *vgt)
+{
+ kfree(vgt->sched_data);
+ vgt->sched_data = NULL;
+}
+
+static void tbs_sched_start_schedule(struct vgt_device *vgt)
+{
+ struct tbs_sched_data *sched_data = vgt->pdev->workload_scheduler.sched_data;
+ struct tbs_instance_data *instance_data = vgt->sched_data;
+
+ if (!list_empty(&instance_data->list))
+ return;
+
+ list_add_tail(&instance_data->list, &sched_data->runq_head);
+ atomic_inc(&sched_data->runq_instance_num);
+
+ schedule_delayed_work(&sched_data->work, sched_data->period);
+}
+
+static void tbs_sched_stop_schedule(struct vgt_device *vgt)
+{
+ struct tbs_sched_data *sched_data = vgt->pdev->workload_scheduler.sched_data;
+ struct tbs_instance_data *instance_data = vgt->sched_data;
+
+ atomic_dec(&sched_data->runq_instance_num);
+ list_del_init(&instance_data->list);
+}
+
+struct gvt_schedule_policy_ops tbs_schedule_ops = {
+ .init = tbs_sched_init,
+ .clean = tbs_sched_clean,
+ .instance_init = tbs_sched_instance_init,
+ .instance_clean = tbs_sched_instance_clean,
+ .start_schedule = tbs_sched_start_schedule,
+ .stop_schedule = tbs_sched_stop_schedule,
+};
+
+bool gvt_init_sched_policy(struct pgt_device *pdev)
+{
+ pdev->workload_scheduler.sched_ops = &tbs_schedule_ops;
+
+ return pdev->workload_scheduler.sched_ops->init(pdev);
+}
+
+void gvt_clean_sched_policy(struct pgt_device *pdev)
+{
+ pdev->workload_scheduler.sched_ops->clean(pdev);
+}
+
+bool gvt_init_instance_sched_policy(struct vgt_device *vgt)
+{
+ return vgt->pdev->workload_scheduler.sched_ops->instance_init(vgt);
+}
+
+void gvt_clean_instance_sched_policy(struct vgt_device *vgt)
+{
+ vgt->pdev->workload_scheduler.sched_ops->instance_clean(vgt);
+}
+
+void gvt_start_schedule(struct vgt_device *vgt)
+{
+ gvt_info("[vgt %d] start schedule", vgt->id);
+
+ vgt->pdev->workload_scheduler.sched_ops->start_schedule(vgt);
+}
+
+void gvt_stop_schedule(struct vgt_device *vgt)
+{
+ struct gvt_workload_scheduler *scheduler =
+ &vgt->pdev->workload_scheduler;
+
+ gvt_info("[vgt %d] stop schedule", vgt->id);
+
+ scheduler->sched_ops->stop_schedule(vgt);
+
+ if (scheduler->next_instance == vgt)
+ scheduler->next_instance = NULL;
+
+ if (scheduler->current_instance == vgt) {
+ /* stop workload dispatching */
+ scheduler->need_reschedule = true;
+ scheduler->current_instance = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
new file mode 100644
index 0000000..9cc1899
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __GVT_SCHED_POLICY__
+#define __GVT_SCHED_POLICY__
+
+struct gvt_schedule_policy_ops {
+ bool (*init)(struct pgt_device *pdev);
+ void (*clean)(struct pgt_device *pdev);
+ bool (*instance_init)(struct vgt_device *vgt);
+ void (*instance_clean)(struct vgt_device *vgt);
+ void (*start_schedule)(struct vgt_device *vgt);
+ void (*stop_schedule)(struct vgt_device *vgt);
+};
+
+bool gvt_init_sched_policy(struct pgt_device *pdev);
+
+void gvt_clean_sched_policy(struct pgt_device *pdev);
+
+bool gvt_init_instance_sched_policy(struct vgt_device *vgt);
+
+void gvt_clean_instance_sched_policy(struct vgt_device *vgt);
+
+void gvt_start_schedule(struct vgt_device *vgt);
+
+void gvt_stop_schedule(struct vgt_device *vgt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index cdf179f..d8d2e23 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -434,6 +434,8 @@ void gvt_clean_workload_scheduler(struct pgt_device *pdev)
i915_gem_context_unreference(scheduler->shadow_ctx);
scheduler->shadow_ctx = NULL;
+
+ gvt_clean_sched_policy(pdev);
}
bool gvt_init_workload_scheduler(struct pgt_device *pdev)
@@ -474,6 +476,9 @@ bool gvt_init_workload_scheduler(struct pgt_device *pdev)
}
}
+ if (!gvt_init_sched_policy(pdev))
+ goto err;
+
return true;
err:
if (param) {
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c4e7fa2..7a8f1eb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -35,6 +35,9 @@ struct gvt_workload_scheduler {
wait_queue_head_t workload_complete_wq;
struct task_struct *thread[I915_NUM_RINGS];
wait_queue_head_t waitq[I915_NUM_RINGS];
+
+ void *sched_data;
+ struct gvt_schedule_policy_ops *sched_ops;
};
struct gvt_workload {
--
1.9.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2016-01-28 10:25 UTC|newest]
Thread overview: 49+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-01-28 10:21 [RFC 00/29] iGVT-g implementation in i915 Zhi Wang
2016-01-28 10:21 ` [RFC 01/29] drm/i915/gvt: Introduce the basic architecture of GVT-g Zhi Wang
2016-01-29 13:57 ` Joonas Lahtinen
2016-01-29 16:48 ` Chris Wilson
2016-02-03 6:28 ` Zhi Wang
2016-02-05 7:02 ` Zhiyuan Lv
2016-02-03 6:01 ` Zhi Wang
2016-02-03 7:01 ` Zhiyuan Lv
2016-02-04 11:25 ` Joonas Lahtinen
2016-02-16 9:54 ` Zhi Wang
2016-02-16 12:44 ` Jani Nikula
2016-02-16 14:08 ` Joonas Lahtinen
2016-01-28 10:21 ` [RFC 02/29] drm/i915: Introduce host graphics memory balloon for gvt Zhi Wang
2016-02-04 11:27 ` Joonas Lahtinen
2016-02-05 10:03 ` Zhiyuan Lv
2016-02-05 13:40 ` Joonas Lahtinen
2016-02-05 14:16 ` Zhiyuan Lv
2016-02-08 11:52 ` Joonas Lahtinen
2016-02-10 8:08 ` Daniel Vetter
2016-01-28 10:21 ` [RFC 03/29] drm/i915: Introduce GVT context creation API Zhi Wang
2016-01-28 10:21 ` [RFC 04/29] drm/i915: Ondemand populate context addressing mode bit Zhi Wang
2016-01-28 10:21 ` [RFC 05/29] drm/i915: Do not populate PPGTT root pointers for GVT context Zhi Wang
2016-01-28 10:21 ` [RFC 06/29] drm/i915: Do not initialize the engine state of " Zhi Wang
2016-01-28 10:21 ` [RFC 07/29] drm/i915: GVT context scheduling Zhi Wang
2016-01-28 10:21 ` [RFC 08/29] drm/i915: Support vGPU guest framebuffer GEM object Zhi Wang
2016-01-28 10:21 ` [RFC 09/29] drm/i915: gvt: Resource allocator Zhi Wang
2016-01-28 10:21 ` [RFC 10/29] drm/i915: gvt: Basic mmio emulation state Zhi Wang
2016-01-28 10:21 ` [RFC 11/29] drm/i915: gvt: update PVINFO page definition in i915_vgpu.h Zhi Wang
2016-01-28 10:21 ` [RFC 12/29] drm/i915: gvt: vGPU life cycle management Zhi Wang
2016-01-28 10:21 ` [RFC 13/29] drm/i915: gvt: trace stub Zhi Wang
2016-01-28 10:21 ` [RFC 14/29] drm/i915: gvt: vGPU interrupt emulation framework Zhi Wang
2016-01-28 10:21 ` [RFC 15/29] drm/i915: gvt: vGPU graphics memory " Zhi Wang
2016-01-28 10:21 ` [RFC 16/29] drm/i915: gvt: Generic MPT framework Zhi Wang
2016-01-28 10:21 ` [RFC 17/29] gvt: Xen hypervisor GVT-g MPT module Zhi Wang
2016-01-28 11:33 ` Joonas Lahtinen
2016-01-28 12:50 ` Zhiyuan Lv
2016-01-28 10:21 ` [RFC 18/29] drm/i915: gvt: vGPU configuration emulation Zhi Wang
2016-01-28 10:21 ` [RFC 19/29] drm/i915: gvt: vGPU OpRegion emulation Zhi Wang
2016-01-28 10:21 ` [RFC 20/29] drm/i915: gvt: vGPU framebuffer format decoder Zhi Wang
2016-01-28 10:21 ` [RFC 21/29] drm/i915: gvt: vGPU MMIO register emulation Zhi Wang
2016-01-28 10:21 ` [RFC 22/29] drm/i915: gvt: Full display virtualization Zhi Wang
2016-01-28 10:21 ` [RFC 23/29] drm/i915: gvt: Introduce GVT control interface Zhi Wang
2016-01-28 10:21 ` [RFC 24/29] drm/i915: gvt: Full execlist status emulation Zhi Wang
2016-01-28 10:21 ` [RFC 25/29] drm/i915: gvt: vGPU execlist workload submission Zhi Wang
2016-01-28 10:21 ` [RFC 26/29] drm/i915: gvt: workload scheduler Zhi Wang
2016-01-28 10:21 ` Zhi Wang [this message]
2016-01-28 10:21 ` [RFC 28/29] drm/i915: gvt: vGPU context switch Zhi Wang
2016-01-28 10:21 ` [RFC 29/29] drm/i915: gvt: vGPU command scanner Zhi Wang
2016-01-28 17:15 ` ✗ Fi.CI.BAT: failure for iGVT-g implementation in i915 Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1453976511-27322-28-git-send-email-zhi.a.wang@intel.com \
--to=zhi.a.wang@intel.com \
--cc=daniel.vetter@ffwll.ch \
--cc=david.j.cowperthwaite@intel.com \
--cc=igvt-g@lists.01.org \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).