From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 05/10] drm/i915/gtt: Generalize alloc_pd
Date: Fri, 14 Jun 2019 19:43:45 +0300 [thread overview]
Message-ID: <20190614164350.30415-5-mika.kuoppala@linux.intel.com> (raw)
In-Reply-To: <20190614164350.30415-1-mika.kuoppala@linux.intel.com>
Allocate all page directory variants with alloc_pd. As
the lvl3 and lvl4 variants differ in manipulation, we
need to check for existence of backing phys page before accessing
it.
v2: use err in returns
Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 88 ++++++++++++-----------------
1 file changed, 36 insertions(+), 52 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 25805971f771..de264b3a0105 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -719,10 +719,17 @@ static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
return pd;
}
+static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
+{
+ return pd->base.page;
+}
+
static void free_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- cleanup_px(vm, pd);
+ if (likely(pd_has_phys_page(pd)))
+ cleanup_px(vm, pd);
+
kfree(pd);
}
@@ -734,37 +741,12 @@ static void init_pd_with_page(struct i915_address_space *vm,
memset_p(pd->entry, pt, 512);
}
-static struct i915_page_directory *alloc_pdp(struct i915_address_space *vm)
-{
- struct i915_page_directory *pdp;
-
- pdp = __alloc_pd();
- if (!pdp)
- return ERR_PTR(-ENOMEM);
-
- if (i915_vm_is_4lvl(vm)) {
- if (unlikely(setup_px(vm, pdp))) {
- kfree(pdp);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- return pdp;
-}
-
-static void free_pdp(struct i915_address_space *vm,
- struct i915_page_directory *pdp)
-{
- if (i915_vm_is_4lvl(vm))
- cleanup_px(vm, pdp);
-
- kfree(pdp);
-}
-
static void init_pd(struct i915_address_space *vm,
struct i915_page_directory * const pd,
struct i915_page_directory * const to)
{
+ GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
+
fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
memset_p(pd->entry, to, 512);
}
@@ -842,14 +824,13 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
return !atomic_read(&pd->used);
}
-static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
- struct i915_page_directory *pdp,
+static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
struct i915_page_directory *pd,
unsigned int pdpe)
{
gen8_ppgtt_pdpe_t *vaddr;
- if (!i915_vm_is_4lvl(vm))
+ if (!pd_has_phys_page(pdp))
return;
vaddr = kmap_atomic_px(pdp);
@@ -877,7 +858,7 @@ static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
spin_lock(&pdp->lock);
if (!atomic_read(&pd->used)) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
pdp->entry[pdpe] = vm->scratch_pd;
GEM_BUG_ON(!atomic_read(&pdp->used));
@@ -938,7 +919,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
}
spin_unlock(&pml4->lock);
if (free)
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
}
@@ -1242,7 +1223,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
}
if (i915_vm_is_4lvl(vm)) {
- vm->scratch_pdp = alloc_pdp(vm);
+ vm->scratch_pdp = alloc_pd(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -1304,7 +1285,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
return;
if (i915_vm_is_4lvl(vm))
- free_pdp(vm, vm->scratch_pdp);
+ free_pd(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
cleanup_scratch_page(vm);
@@ -1324,7 +1305,7 @@ static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
free_pd(vm, pdp->entry[i]);
}
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
@@ -1431,7 +1412,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
old = cmpxchg(&pdp->entry[pdpe], vm->scratch_pd, pd);
if (old == vm->scratch_pd) {
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
atomic_inc(&pdp->used);
} else {
free_pd(vm, pd);
@@ -1457,7 +1438,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
unwind_pd:
spin_lock(&pdp->lock);
if (atomic_dec_and_test(&pd->used)) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
GEM_BUG_ON(!atomic_read(&pdp->used));
atomic_dec(&pdp->used);
free_pd(vm, pd);
@@ -1487,13 +1468,12 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
spin_lock(&pml4->lock);
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
-
if (pdp == vm->scratch_pdp) {
struct i915_page_directory *old;
spin_unlock(&pml4->lock);
- pdp = alloc_pdp(vm);
+ pdp = alloc_pd(vm);
if (IS_ERR(pdp))
goto unwind;
@@ -1503,7 +1483,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
if (old == vm->scratch_pdp) {
gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
} else {
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
pdp = old;
}
@@ -1527,7 +1507,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
spin_lock(&pml4->lock);
if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pdp(vm, pdp);
+ free_pd(vm, pdp);
}
spin_unlock(&pml4->lock);
unwind:
@@ -1550,7 +1530,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
goto unwind;
init_pd_with_page(vm, pd, vm->scratch_pt);
- gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
atomic_inc(&pdp->used);
}
@@ -1562,7 +1542,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
unwind:
start -= from;
gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
- gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
free_pd(vm, pd);
}
atomic_set(&pdp->used, 0);
@@ -1620,13 +1600,17 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- ppgtt->pd = alloc_pdp(&ppgtt->vm);
- if (IS_ERR(ppgtt->pd)) {
- err = PTR_ERR(ppgtt->pd);
- goto err_scratch;
+ ppgtt->pd = __alloc_pd();
+ if (!ppgtt->pd) {
+ err = -ENOMEM;
+ goto err_free_scratch;
}
if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ err = setup_px(&ppgtt->vm, ppgtt->pd);
+ if (err)
+ goto err_free_pdp;
+
init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
@@ -1643,7 +1627,7 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (intel_vgpu_active(i915)) {
err = gen8_preallocate_top_level_pdp(ppgtt);
if (err)
- goto err_pdp;
+ goto err_free_pdp;
}
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
@@ -1658,9 +1642,9 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
return ppgtt;
-err_pdp:
- free_pdp(&ppgtt->vm, ppgtt->pd);
-err_scratch:
+err_free_pdp:
+ free_pd(&ppgtt->vm, ppgtt->pd);
+err_free_scratch:
gen8_free_scratch(&ppgtt->vm);
err_free:
kfree(ppgtt);
--
2.17.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2019-06-14 16:43 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-06-14 16:43 [PATCH 01/10] drm/i915/gtt: No need to zero the table for page dirs Mika Kuoppala
2019-06-14 16:43 ` [PATCH 02/10] drm/i915/gtt: Use a common type for page directories Mika Kuoppala
2019-06-14 16:56 ` Chris Wilson
2019-06-14 16:43 ` [PATCH 03/10] drm/i915/gtt: Introduce init_pd_with_page Mika Kuoppala
2019-06-14 17:10 ` Chris Wilson
2019-06-14 16:43 ` [PATCH 04/10] drm/i915/gtt: Introduce init_pd Mika Kuoppala
2019-06-14 17:13 ` Chris Wilson
2019-06-14 16:43 ` Mika Kuoppala [this message]
2019-06-14 17:17 ` [PATCH 05/10] drm/i915/gtt: Generalize alloc_pd Chris Wilson
2019-06-14 16:43 ` [PATCH 06/10] drm/i915/gtt: pde entry encoding is identical Mika Kuoppala
2019-06-14 17:21 ` Chris Wilson
2019-06-14 16:43 ` [PATCH 07/10] drm/i915/gtt: Check for physical page for pd entry always Mika Kuoppala
2019-06-14 17:22 ` Chris Wilson
2019-06-14 16:43 ` [PATCH 08/10] drm/i915/gtt: Make swapping the pd entry generic Mika Kuoppala
2019-06-14 17:26 ` Chris Wilson
2019-06-14 16:43 ` [PATCH 09/10] drm/i915/gtt: Tear down setup and cleanup macros for page dma Mika Kuoppala
2019-06-14 17:30 ` Chris Wilson
2019-06-14 16:43 ` [PATCH 10/10] drm/i915/gtt: Setup phys pages for 3lvl pdps Mika Kuoppala
2019-06-14 17:36 ` Chris Wilson
2019-06-14 17:00 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/10] drm/i915/gtt: No need to zero the table for page dirs Patchwork
2019-06-14 17:04 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-15 4:59 ` ✓ Fi.CI.BAT: success " Patchwork
2019-06-17 10:32 ` ✓ Fi.CI.IGT: " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190614164350.30415-5-mika.kuoppala@linux.intel.com \
--to=mika.kuoppala@linux.intel.com \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox