Intel-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Xiaolin Zhang <xiaolin.zhang@intel.com>
To: intel-gvt-dev@lists.freedesktop.org, intel-gfx@lists.freedesktop.org
Cc: chris@chris-wilson.co.uk, zhiyuan.lv@intel.com
Subject: [Intel-gfx] [PATCH v1 09/12] drm/i915/gvt: GVTg support vgpu pv CTB protocol
Date: Sat,  5 Sep 2020 00:21:42 +0800	[thread overview]
Message-ID: <1599236505-9086-10-git-send-email-xiaolin.zhang@intel.com> (raw)
In-Reply-To: <1599236505-9086-1-git-send-email-xiaolin.zhang@intel.com>

host side to implement vgpu PV CTB protocol. based on the protocol,
CTB read functionality is implemented to handle pv command from guest.

Signed-off-by: Xiaolin Zhang <xiaolin.zhang@intel.com>
---
 drivers/gpu/drm/i915/gvt/handlers.c | 119 +++++++++++++++++++++++++++++++++++-
 1 file changed, 118 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 295e43a..b9c9f62 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1218,6 +1218,119 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
 	return 0;
 }
 
+static inline unsigned int ct_header_get_len(u32 header)
+{
+	return (header >> PV_CT_MSG_LEN_SHIFT) & PV_CT_MSG_LEN_MASK;
+}
+
+static inline unsigned int ct_header_get_action(u32 header)
+{
+	return (header >> PV_CT_MSG_ACTION_SHIFT) & PV_CT_MSG_ACTION_MASK;
+}
+
+static int fetch_pv_command_buffer(struct intel_vgpu *vgpu,
+		struct vgpu_pv_ct_buffer_desc *desc,
+		u32 *fence, u32 *action, u32 *data)
+{
+	u32 head, tail, len, size, off;
+	u32 cmd_head;
+	u32 avail;
+	u32 ret;
+
+	/* fetch command descriptor */
+	off = PV_DESC_OFF;
+	ret = intel_gvt_read_shared_page(vgpu, off, desc, sizeof(*desc));
+	if (ret)
+		return ret;
+
+	GEM_BUG_ON(desc->size % 4);
+	GEM_BUG_ON(desc->head % 4);
+	GEM_BUG_ON(desc->tail % 4);
+	GEM_BUG_ON(tail >= size);
+	GEM_BUG_ON(head >= size);
+
+	/* tail == head condition indicates empty */
+	head = desc->head/4;
+	tail = desc->tail/4;
+	size = desc->size/4;
+
+	if (unlikely((tail - head) == 0))
+		return -ENODATA;
+
+	/* fetch command head */
+	off = desc->addr + head * 4;
+	ret = intel_gvt_read_shared_page(vgpu, off, &cmd_head, 4);
+	head = (head + 1) % size;
+	if (ret)
+		goto err;
+
+	len = ct_header_get_len(cmd_head) - 1;
+	*action = ct_header_get_action(cmd_head);
+
+	/* fetch command fence */
+	off = desc->addr + head * 4;
+	ret = intel_gvt_read_shared_page(vgpu, off, fence, 4);
+	head = (head + 1) % size;
+	if (ret)
+		goto err;
+
+	/* no command data */
+	if (len == 0)
+		goto err;
+
+	/* fetch command data */
+	avail = size - head;
+	if (len <= avail) {
+		off =  desc->addr + head * 4;
+		ret = intel_gvt_read_shared_page(vgpu, off, data, len * 4);
+		head = (head + len) % size;
+	} else {
+		/* swap case */
+		off =  desc->addr + head * 4;
+		ret = intel_gvt_read_shared_page(vgpu, off, data, avail * 4);
+		head = (head + avail) % size;
+		if (ret)
+			goto err;
+
+		off = desc->addr;
+		ret = intel_gvt_read_shared_page(vgpu, off, &data[avail],
+				(len - avail) * 4);
+		head = (head + len - avail) % size;
+	}
+
+err:
+	desc->head = head * 4;
+	return ret;
+}
+
+static int pv_command_buffer_read(struct intel_vgpu *vgpu,
+		u32 *cmd, u32 *data)
+{
+	struct vgpu_pv_ct_buffer_desc desc;
+	u32 fence, off = PV_DESC_OFF;
+	int ret;
+
+	ret = fetch_pv_command_buffer(vgpu, &desc, &fence, cmd, data);
+
+	/* write command descriptor back */
+	desc.fence = fence;
+	desc.status = ret;
+
+	ret = intel_gvt_write_shared_page(vgpu, off, &desc, sizeof(desc));
+	return ret;
+
+}
+
+static int handle_pv_commands(struct intel_vgpu *vgpu)
+{
+	u32 cmd;
+	u32 data[32];
+	int ret;
+
+	ret = pv_command_buffer_read(vgpu, &cmd, data);
+	return ret;
+}
+
 static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 {
 	enum intel_gvt_gtt_type root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
@@ -1226,6 +1339,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 	unsigned long gpa, gfn;
 	u16 ver_major = PV_MAJOR;
 	u16 ver_minor = PV_MINOR;
+	int ret = 0;
 
 	pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
 
@@ -1252,6 +1366,9 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 		intel_gvt_write_shared_page(vgpu, 0, &ver_major, 2);
 		intel_gvt_write_shared_page(vgpu, 2, &ver_minor, 2);
 		break;
+	case VGT_G2V_PV_SEND_TRIGGER:
+		ret = handle_pv_commands(vgpu);
+		break;
 	case VGT_G2V_EXECLIST_CONTEXT_CREATE:
 	case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
 	case 1:	/* Remove this in guest driver. */
@@ -1259,7 +1376,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
 	default:
 		gvt_vgpu_err("Invalid PV notification %d\n", notification);
 	}
-	return 0;
+	return ret;
 }
 
 static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-09-07  0:55 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-04 16:21 [Intel-gfx] [PATCH v1 00/12] enhanced i915 vgpu with PV feature support Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 01/12] drm/i915: introduced vgpu pv capability Xiaolin Zhang
2020-09-10 13:10   ` Jani Nikula
2020-09-21  5:37     ` Zhang, Xiaolin
2020-09-10 13:10   ` Jani Nikula
2020-09-21  5:24     ` Zhang, Xiaolin
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 02/12] drm/i915: vgpu shared memory setup for pv support Xiaolin Zhang
2020-09-10 13:16   ` Jani Nikula
2020-09-21  5:27     ` Zhang, Xiaolin
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 03/12] drm/i915: vgpu pv command buffer transport protocol Xiaolin Zhang
2020-09-10 13:20   ` Jani Nikula
2020-09-21  5:33     ` Zhang, Xiaolin
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 04/12] drm/i915: vgpu ppgtt page table pv support Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 05/12] drm/i915: vgpu ggtt " Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 06/12] drm/i915: vgpu workload submisison " Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 07/12] drm/i915/gvt: GVTg expose pv_caps PVINFO register Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 08/12] drm/i915/gvt: GVTg handle guest shared_page setup Xiaolin Zhang
2020-09-04 16:21 ` Xiaolin Zhang [this message]
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 10/12] drm/i915/gvt: GVTg support ppgtt pv operations Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 11/12] drm/i915/gvt: GVTg support ggtt " Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 12/12] drm/i915/gvt: GVTg support pv workload submssion Xiaolin Zhang
2020-09-07  1:12 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for enhanced i915 vgpu with PV feature support Patchwork
2020-09-07  1:13 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-09-07  1:15 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2020-09-07  1:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-09-07  7:24 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1599236505-9086-10-git-send-email-xiaolin.zhang@intel.com \
    --to=xiaolin.zhang@intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=intel-gvt-dev@lists.freedesktop.org \
    --cc=zhiyuan.lv@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox