From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 3/3] drm/i915/gtt: Setup phys pages for 3lvl pdps
Date: Thu, 4 Jul 2019 18:44:07 +0300 [thread overview]
Message-ID: <20190704154407.25551-3-mika.kuoppala@linux.intel.com> (raw)
In-Reply-To: <20190704154407.25551-1-mika.kuoppala@linux.intel.com>
If we setup backing phys page for 3lvl pdps, even they
are not used, we lose 5 pages per ppgtt.
Trading this memory on bsw, we gain more common code paths for all
gen8+ directory manipulation. And those paths are now void of checks
for page directory type, making the hot paths faster.
v2: don't shortcut vm (Chris)
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 77 +++++++++++++++++++----------
1 file changed, 50 insertions(+), 27 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 84e119d7a5fc..b9422d592e8c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -758,22 +758,14 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
return pd;
}
-static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
-{
- return pd->base.page;
-}
-
static void free_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- if (likely(pd_has_phys_page(pd)))
- cleanup_page_dma(vm, &pd->base);
-
+ cleanup_page_dma(vm, &pd->base);
kfree(pd);
}
#define init_pd(vm, pd, to) { \
- GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd)); \
fill_px((vm), (pd), gen8_pde_encode(px_dma(to), I915_CACHE_LLC)); \
memset_p((pd)->entry, (to), 512); \
}
@@ -1595,6 +1587,50 @@ static void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.vma_ops.clear_pages = clear_pages;
}
+static void init_pd_n(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ struct i915_page_directory *to,
+ const unsigned int entries)
+{
+ const u64 daddr = gen8_pde_encode(px_dma(to), I915_CACHE_LLC);
+ u64 * const vaddr = kmap_atomic(pd->base.page);
+
+ memset64(vaddr, daddr, entries);
+ kunmap_atomic(vaddr);
+
+ memset_p(pd->entry, to, entries);
+}
+
+static struct i915_page_directory *
+gen8_alloc_top_pd(struct i915_address_space *vm)
+{
+ struct i915_page_directory *pd;
+
+ if (i915_vm_is_4lvl(vm)) {
+ pd = alloc_pd(vm);
+ if (!IS_ERR(pd))
+ init_pd(vm, pd, vm->scratch_pdp);
+
+ return pd;
+ }
+
+ /* 3lvl */
+ pd = __alloc_pd();
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->entry[GEN8_3LVL_PDPES] = NULL;
+
+ if (unlikely(setup_page_dma(vm, &pd->base))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_pd_n(vm, pd, vm->scratch_pd, GEN8_3LVL_PDPES);
+
+ return pd;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1631,34 +1667,21 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- ppgtt->pd = __alloc_pd();
- if (!ppgtt->pd) {
- err = -ENOMEM;
+ ppgtt->pd = gen8_alloc_top_pd(&ppgtt->vm);
+ if (IS_ERR(ppgtt->pd)) {
+ err = PTR_ERR(ppgtt->pd);
goto err_free_scratch;
}
if (i915_vm_is_4lvl(&ppgtt->vm)) {
- err = setup_page_dma(&ppgtt->vm, &ppgtt->pd->base);
- if (err)
- goto err_free_pdp;
-
- init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
-
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- /*
- * We don't need to setup dma for top level pdp, only
- * for entries. So point entries to scratch.
- */
- memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
- GEN8_3LVL_PDPES);
-
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
if (err)
- goto err_free_pdp;
+ goto err_free_pd;
}
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
@@ -1673,7 +1696,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
-err_free_pdp:
+err_free_pd:
free_pd(&ppgtt->vm, ppgtt->pd);
err_free_scratch:
gen8_free_scratch(&ppgtt->vm);
--
2.17.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2019-07-04 15:44 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-07-04 15:44 [PATCH 1/3] drm/i915/gtt: pde entry encoding is identical Mika Kuoppala
2019-07-04 15:44 ` [PATCH 2/3] drm/i915/gtt: Tear down setup and cleanup macros for page dma Mika Kuoppala
2019-07-04 15:44 ` Mika Kuoppala [this message]
2019-07-04 15:54 ` [PATCH 1/3] drm/i915/gtt: pde entry encoding is identical Chris Wilson
2019-07-04 16:03 ` Mika Kuoppala
2019-07-04 17:31 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/3] " Patchwork
2019-07-04 17:33 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-07-04 17:50 ` ✓ Fi.CI.BAT: success " Patchwork
2019-07-06 0:07 ` ✓ Fi.CI.IGT: " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2019-06-18 16:17 [PATCH 1/3] " Mika Kuoppala
2019-06-18 16:17 ` [PATCH 3/3] drm/i915/gtt: Setup phys pages for 3lvl pdps Mika Kuoppala
2019-06-18 16:37 ` Chris Wilson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190704154407.25551-3-mika.kuoppala@linux.intel.com \
--to=mika.kuoppala@linux.intel.com \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox