From: "Michał Winiarski" <michal.winiarski@intel.com>
To: intel-gfx@lists.freedesktop.org
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Subject: [PATCH 4/8] drm/i915/gtt: Don't use temp bitmaps to unwind gen8_alloc_va_range
Date: Mon, 12 Dec 2016 12:44:13 +0100 [thread overview]
Message-ID: <1481543057-333-5-git-send-email-michal.winiarski@intel.com> (raw)
In-Reply-To: <1481543057-333-1-git-send-email-michal.winiarski@intel.com>
We can just operate on ranges and make use of cleanup functions
introduced with ppgtt shrinking.
Cc: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michel Thierry <michel.thierry@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
---
drivers/gpu/drm/i915/i915_gem_gtt.c | 82 ++++++++++++++-----------------------
1 file changed, 30 insertions(+), 52 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f760c3e..c6f0708 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1109,6 +1109,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
uint32_t pde;
+ const uint64_t start_save = start;
gen8_for_each_pde(pt, pd, start, length, pde) {
/* Don't reallocate page tables */
@@ -1119,8 +1120,11 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
}
pt = alloc_pt(dev_priv);
- if (IS_ERR(pt))
- goto unwind_out;
+ if (IS_ERR(pt)) {
+ gen8_ppgtt_clear_pd(vm, pd, start_save,
+ start - start_save);
+ return PTR_ERR(pt);
+ }
gen8_initialize_pt(vm, pt);
pd->page_table[pde] = pt;
@@ -1129,12 +1133,6 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
}
return 0;
-
-unwind_out:
- for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev_priv, pd->page_table[pde]);
-
- return -ENOMEM;
}
/**
@@ -1171,6 +1169,7 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
struct i915_page_directory *pd;
uint32_t pdpe;
uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
+ const uint64_t start_save = start;
WARN_ON(!bitmap_empty(new_pds, pdpes));
@@ -1179,8 +1178,11 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
continue;
pd = alloc_pd(dev_priv);
- if (IS_ERR(pd))
- goto unwind_out;
+ if (IS_ERR(pd)) {
+ gen8_ppgtt_clear_pdp(vm, pdp, start_save,
+ start - start_save);
+ return PTR_ERR(pd);
+ }
gen8_initialize_pd(vm, pd);
pdp->page_directory[pdpe] = pd;
@@ -1189,12 +1191,6 @@ gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
}
return 0;
-
-unwind_out:
- for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev_priv, pdp->page_directory[pdpe]);
-
- return -ENOMEM;
}
/**
@@ -1223,14 +1219,18 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory_pointer *pdp;
uint32_t pml4e;
+ const uint64_t start_save = start;
WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
if (!test_bit(pml4e, pml4->used_pml4es)) {
pdp = alloc_pdp(dev_priv);
- if (IS_ERR(pdp))
- goto unwind_out;
+ if (IS_ERR(pdp)) {
+ gen8_ppgtt_clear_pml4(vm, pml4, start_save,
+ start - start_save);
+ return PTR_ERR(pdp);
+ }
gen8_initialize_pdp(vm, pdp);
pml4->pdps[pml4e] = pdp;
@@ -1243,12 +1243,6 @@ gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
}
return 0;
-
-unwind_out:
- for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev_priv, pml4->pdps[pml4e]);
-
- return -ENOMEM;
}
static void
@@ -1295,7 +1289,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned long *new_page_dirs, *new_page_tables;
- struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
const uint64_t start_save = start;
const uint64_t length_save = length;
@@ -1328,8 +1321,12 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
- if (ret)
- goto err_out;
+ if (ret) {
+ gen8_ppgtt_clear_pdp(vm, pdp, start_save,
+ start - start_save);
+ mark_tlbs_dirty(ppgtt);
+ return ret;
+ }
}
start = start_save;
@@ -1381,23 +1378,6 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
mark_tlbs_dirty(ppgtt);
return 0;
-
-err_out:
- while (pdpe--) {
- unsigned long temp;
-
- for_each_set_bit(temp, new_page_tables + pdpe *
- BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev_priv,
- pdp->page_directory[pdpe]->page_table[temp]);
- }
-
- for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev_priv, pdp->page_directory[pdpe]);
-
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
- mark_tlbs_dirty(ppgtt);
- return ret;
}
static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
@@ -1410,6 +1390,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
uint64_t pml4e;
int ret = 0;
+ const uint64_t start_save = start;
/* Do the pml4 allocations first, so we don't need to track the newly
* allocated tables below the pdp */
@@ -1431,8 +1412,11 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
WARN_ON(!pdp);
ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
- if (ret)
- goto err_out;
+ if (ret) {
+ gen8_ppgtt_clear_pml4(vm, pml4, start_save,
+ start - start_save);
+ return ret;
+ }
gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
}
@@ -1441,12 +1425,6 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
GEN8_PML4ES_PER_PML4);
return 0;
-
-err_out:
- for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
-
- return ret;
}
static int gen8_alloc_va_range(struct i915_address_space *vm,
--
2.7.4
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2016-12-12 11:45 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-12-12 11:44 [PATCH 0/8] GTT bitmaps purge Michał Winiarski
2016-12-12 11:44 ` [PATCH 1/8] drm/i915/gtt: Don't pass ppgtt to ppgtt_cleanup_4lvl Michał Winiarski
2016-12-12 11:56 ` Chris Wilson
2016-12-12 11:44 ` [PATCH 2/8] drm/i915/gtt: Rename orig_start/orig_length Michał Winiarski
2016-12-12 11:44 ` [PATCH 3/8] drm/i915/gtt: Extract unwind to separate function for gen6_alloc_va_range Michał Winiarski
2016-12-12 11:44 ` Michał Winiarski [this message]
2016-12-12 11:46 ` [PATCH 5/8] drm/i915/gtt: Purge temp bitmaps Michał Winiarski
2016-12-12 11:46 ` [PATCH 6/8] drm/i915: Prepare i915_page_table_entry_map tracepoint for bitmap purge Michał Winiarski
2016-12-12 11:48 ` [PATCH 7/8] drm/i915/gtt: Purge page tracking bitmaps Michał Winiarski
2016-12-12 11:48 ` [PATCH 8/8] drm/i915: Clear range when unbinding closed vma Michał Winiarski
2016-12-12 12:00 ` Chris Wilson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1481543057-333-5-git-send-email-michal.winiarski@intel.com \
--to=michal.winiarski@intel.com \
--cc=intel-gfx@lists.freedesktop.org \
--cc=mika.kuoppala@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).