From: Xiaolin Zhang <xiaolin.zhang@intel.com>
To: intel-gvt-dev@lists.freedesktop.org, intel-gfx@lists.freedesktop.org
Cc: chris@chris-wilson.co.uk, zhiyuan.lv@intel.com
Subject: [Intel-gfx] [PATCH v1 11/12] drm/i915/gvt: GVTg support ggtt pv operations
Date: Sat, 5 Sep 2020 00:21:44 +0800 [thread overview]
Message-ID: <1599236505-9086-12-git-send-email-xiaolin.zhang@intel.com> (raw)
In-Reply-To: <1599236505-9086-1-git-send-email-xiaolin.zhang@intel.com>
This patch is to handle ppgtt PV_CMD_BIND_GGTT and PV_CMD_UNBIND_GGTT
for pv ggtt, it is operated (bind/unbind) per vma instead of per ggtt
entry mmio update to improve efficiency
Signed-off-by: Xiaolin Zhang <xiaolin.zhang@intel.com>
---
drivers/gpu/drm/i915/gvt/gtt.c | 83 +++++++++++++++++++++++++++++++++++++
drivers/gpu/drm/i915/gvt/handlers.c | 4 ++
drivers/gpu/drm/i915/gvt/vgpu.c | 2 +-
3 files changed, 88 insertions(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c13560a..c79171f 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2732,6 +2732,83 @@ static void intel_vgpu_pv_ppgtt_unbind(struct intel_vgpu *vgpu,
}
+static int intel_vgpu_pv_ggtt_bind(struct intel_vgpu *vgpu,
+ struct pv_vma *vma, u64 *gpas)
+{
+ u64 off = (vma->start / I915_GTT_PAGE_SIZE) << 3;
+ u32 size = vma->size;
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long g_gtt_index = off >> 3;
+ struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ int ret = 0;
+ int i;
+ u64 gfn;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < size; i++) {
+ e.val64 = gpas[i];
+ if (!ops->test_present(&e)) {
+ ops->set_pfn(&m, vgpu->gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ goto out;
+ }
+
+ gfn = ops->get_pfn(&e);
+ m.val64 = e.val64;
+ ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
+ gfn, PAGE_SIZE, &dma_addr);
+ if (ret) {
+ gvt_vgpu_err("failed to map guest ggtt entry\n");
+ ops->set_pfn(&m, vgpu->gvt->gtt.scratch_mfn);
+ } else
+ ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
+out:
+ g_gtt_index = off >> 3;
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &e);
+ ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
+ off += 8;
+ }
+
+ ggtt_invalidate(vgpu->gvt->gt);
+ return ret;
+}
+
+
+static int intel_vgpu_pv_ggtt_unbind(struct intel_vgpu *vgpu,
+ struct pv_vma *vma, u64 *gpas)
+{
+ u64 off = (vma->start / I915_GTT_PAGE_SIZE) << 3;
+ u32 size = vma->size;
+ struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
+ struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
+ unsigned long g_gtt_index = off >> 3;
+ struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ g_gtt_index = off >> 3;
+ e.val64 = gpas[i];
+ ggtt_invalidate_pte(vgpu, &e);
+ ops->clear_present(&e);
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ ops->set_pfn(&m, vgpu->gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
+ off += 8;
+ }
+
+ ggtt_invalidate(vgpu->gvt->gt);
+
+ return ret;
+}
+
int intel_vgpu_handle_pv_vma(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *mm, u32 cmd, u32 data[])
{
@@ -2768,6 +2845,12 @@ int intel_vgpu_handle_pv_vma(struct intel_vgpu *vgpu,
case PV_CMD_BIND_PPGTT:
intel_vgpu_pv_ppgtt_bind(vgpu, mm, vma, dma_addrs);
break;
+ case PV_CMD_BIND_GGTT:
+ ret = intel_vgpu_pv_ggtt_bind(vgpu, vma, dma_addrs);
+ break;
+ case PV_CMD_UNBIND_GGTT:
+ ret = intel_vgpu_pv_ggtt_unbind(vgpu, vma, dma_addrs);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index a3637d86..f1ad024 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1349,6 +1349,10 @@ static int handle_pv_commands(struct intel_vgpu *vgpu)
}
ret = intel_vgpu_handle_pv_vma(vgpu, mm, cmd, data);
break;
+ case PV_CMD_BIND_GGTT:
+ case PV_CMD_UNBIND_GGTT:
+ ret = intel_vgpu_handle_pv_vma(vgpu, NULL, cmd, data);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index c898e0d..1411c7b5 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -50,7 +50,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_PV;
- vgpu_vreg_t(vgpu, vgtif_reg(pv_caps)) = PV_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(pv_caps)) = PV_PPGTT | PV_GGTT;
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu);
--
2.7.4
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2020-09-07 0:56 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-04 16:21 [Intel-gfx] [PATCH v1 00/12] enhanced i915 vgpu with PV feature support Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 01/12] drm/i915: introduced vgpu pv capability Xiaolin Zhang
2020-09-10 13:10 ` Jani Nikula
2020-09-21 5:37 ` Zhang, Xiaolin
2020-09-10 13:10 ` Jani Nikula
2020-09-21 5:24 ` Zhang, Xiaolin
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 02/12] drm/i915: vgpu shared memory setup for pv support Xiaolin Zhang
2020-09-10 13:16 ` Jani Nikula
2020-09-21 5:27 ` Zhang, Xiaolin
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 03/12] drm/i915: vgpu pv command buffer transport protocol Xiaolin Zhang
2020-09-10 13:20 ` Jani Nikula
2020-09-21 5:33 ` Zhang, Xiaolin
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 04/12] drm/i915: vgpu ppgtt page table pv support Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 05/12] drm/i915: vgpu ggtt " Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 06/12] drm/i915: vgpu workload submisison " Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 07/12] drm/i915/gvt: GVTg expose pv_caps PVINFO register Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 08/12] drm/i915/gvt: GVTg handle guest shared_page setup Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 09/12] drm/i915/gvt: GVTg support vgpu pv CTB protocol Xiaolin Zhang
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 10/12] drm/i915/gvt: GVTg support ppgtt pv operations Xiaolin Zhang
2020-09-04 16:21 ` Xiaolin Zhang [this message]
2020-09-04 16:21 ` [Intel-gfx] [PATCH v1 12/12] drm/i915/gvt: GVTg support pv workload submssion Xiaolin Zhang
2020-09-07 1:12 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for enhanced i915 vgpu with PV feature support Patchwork
2020-09-07 1:13 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-09-07 1:15 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2020-09-07 1:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-09-07 7:24 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1599236505-9086-12-git-send-email-xiaolin.zhang@intel.com \
--to=xiaolin.zhang@intel.com \
--cc=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
--cc=intel-gvt-dev@lists.freedesktop.org \
--cc=zhiyuan.lv@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox