From: Michel Thierry <michel.thierry@intel.com>
To: Mika Kuoppala <mika.kuoppala@linux.intel.com>,
intel-gfx@lists.freedesktop.org,
Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: miku@iki.fi
Subject: Re: [PATCH 4/4] drm/i915/gtt: Per ppgtt scratch page
Date: Wed, 1 Jul 2015 15:05:44 +0100 [thread overview]
Message-ID: <5593F3B8.6070102@intel.com> (raw)
In-Reply-To: <1435677400-7630-4-git-send-email-mika.kuoppala@intel.com>
On 6/30/2015 4:16 PM, Mika Kuoppala wrote:
> Previously we have pointed the page where the individual ppgtt
> scratch structures refer to, to be the instance which GGTT setup have
> allocated. So it has been shared.
>
> To achive full isolation between ppgtts also in this regard,
^^^^^achieve
> allocate per ppgtt scratch page.
>
Maybe also say that it moved scratch page/pt/pd operations together
(genx_init/free_scratch functions).
Daniel, since you requested this, should it get yours r-b?
It looks ok to me.
-Michel
> Cc: Michel Thierry <michel.thierry@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
> drivers/gpu/drm/i915/i915_gem_gtt.c | 94 +++++++++++++++++++++++++++++--------
> 1 file changed, 74 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 402d6d3..b1a8fc4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -682,6 +682,42 @@ static void gen8_free_page_tables(struct drm_device *dev,
> }
> }
>
> +static int gen8_init_scratch(struct i915_address_space *vm)
> +{
> + struct drm_device *dev = vm->dev;
> +
> + vm->scratch_page = alloc_scratch_page(dev);
> + if (IS_ERR(vm->scratch_page))
> + return PTR_ERR(vm->scratch_page);
> +
> + vm->scratch_pt = alloc_pt(dev);
> + if (IS_ERR(vm->scratch_pt)) {
> + free_scratch_page(dev, vm->scratch_page);
> + return PTR_ERR(vm->scratch_pt);
> + }
> +
> + vm->scratch_pd = alloc_pd(dev);
> + if (IS_ERR(vm->scratch_pd)) {
> + free_pt(dev, vm->scratch_pt);
> + free_scratch_page(dev, vm->scratch_page);
> + return PTR_ERR(vm->scratch_pd);
> + }
> +
> + gen8_initialize_pt(vm, vm->scratch_pt);
> + gen8_initialize_pd(vm, vm->scratch_pd);
> +
> + return 0;
> +}
> +
> +static void gen8_free_scratch(struct i915_address_space *vm)
> +{
> + struct drm_device *dev = vm->dev;
> +
> + free_pd(dev, vm->scratch_pd);
> + free_pt(dev, vm->scratch_pt);
> + free_scratch_page(dev, vm->scratch_page);
> +}
> +
> static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> {
> struct i915_hw_ppgtt *ppgtt =
> @@ -697,8 +733,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
> free_pd(ppgtt->base.dev, ppgtt->pdp.page_directory[i]);
> }
>
> - free_pd(vm->dev, vm->scratch_pd);
> - free_pt(vm->dev, vm->scratch_pt);
> + gen8_free_scratch(vm);
> }
>
> /**
> @@ -985,16 +1020,11 @@ err_out:
> */
> static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
> {
> - ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
> - if (IS_ERR(ppgtt->base.scratch_pt))
> - return PTR_ERR(ppgtt->base.scratch_pt);
> -
> - ppgtt->base.scratch_pd = alloc_pd(ppgtt->base.dev);
> - if (IS_ERR(ppgtt->base.scratch_pd))
> - return PTR_ERR(ppgtt->base.scratch_pd);
> + int ret;
>
> - gen8_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
> - gen8_initialize_pd(&ppgtt->base, ppgtt->base.scratch_pd);
> + ret = gen8_init_scratch(&ppgtt->base);
> + if (ret)
> + return ret;
>
> ppgtt->base.start = 0;
> ppgtt->base.total = 1ULL << 32;
> @@ -1410,6 +1440,33 @@ unwind_out:
> return ret;
> }
>
> +static int gen6_init_scratch(struct i915_address_space *vm)
> +{
> + struct drm_device *dev = vm->dev;
> +
> + vm->scratch_page = alloc_scratch_page(dev);
> + if (IS_ERR(vm->scratch_page))
> + return PTR_ERR(vm->scratch_page);
> +
> + vm->scratch_pt = alloc_pt(dev);
> + if (IS_ERR(vm->scratch_pt)) {
> + free_scratch_page(dev, vm->scratch_page);
> + return PTR_ERR(vm->scratch_pt);
> + }
> +
> + gen6_initialize_pt(vm, vm->scratch_pt);
> +
> + return 0;
> +}
> +
> +static void gen6_free_scratch(struct i915_address_space *vm)
> +{
> + struct drm_device *dev = vm->dev;
> +
> + free_pt(dev, vm->scratch_pt);
> + free_scratch_page(dev, vm->scratch_page);
> +}
> +
> static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
> {
> struct i915_hw_ppgtt *ppgtt =
> @@ -1424,11 +1481,12 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
> free_pt(ppgtt->base.dev, pt);
> }
>
> - free_pt(vm->dev, vm->scratch_pt);
> + gen6_free_scratch(vm);
> }
>
> static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
> {
> + struct i915_address_space *vm = &ppgtt->base;
> struct drm_device *dev = ppgtt->base.dev;
> struct drm_i915_private *dev_priv = dev->dev_private;
> bool retried = false;
> @@ -1439,11 +1497,10 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
> * size. We allocate at the top of the GTT to avoid fragmentation.
> */
> BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
> - ppgtt->base.scratch_pt = alloc_pt(ppgtt->base.dev);
> - if (IS_ERR(ppgtt->base.scratch_pt))
> - return PTR_ERR(ppgtt->base.scratch_pt);
>
> - gen6_initialize_pt(&ppgtt->base, ppgtt->base.scratch_pt);
> + ret = gen6_init_scratch(vm);
> + if (ret)
> + return ret;
>
> alloc:
> ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
> @@ -1474,7 +1531,7 @@ alloc:
> return 0;
>
> err_out:
> - free_pt(ppgtt->base.dev, ppgtt->base.scratch_pt);
> + gen6_free_scratch(vm);
> return ret;
> }
>
> @@ -1548,10 +1605,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
>
> static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
> {
> - struct drm_i915_private *dev_priv = dev->dev_private;
> -
> ppgtt->base.dev = dev;
> - ppgtt->base.scratch_page = dev_priv->gtt.base.scratch_page;
>
> if (INTEL_INFO(dev)->gen < 8)
> return gen6_ppgtt_init(ppgtt);
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2015-07-01 14:05 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-06-30 15:16 [PATCH 1/4] drm/i915/gtt: Reorder page alloc/free/init functions Mika Kuoppala
2015-06-30 15:16 ` [PATCH 2/4] drm/i915/gtt: Warn if the next layer scratch dma is invalid Mika Kuoppala
2015-06-30 16:59 ` Michel Thierry
2015-06-30 17:11 ` Chris Wilson
2015-07-01 10:55 ` Mika Kuoppala
2015-06-30 15:16 ` [PATCH 3/4] drm/i915/gtt: Return struct i915_scratch_page from alloc_scratch Mika Kuoppala
2015-07-01 12:02 ` Michel Thierry
2015-07-01 13:15 ` Daniel Vetter
2015-06-30 15:16 ` [PATCH 4/4] drm/i915/gtt: Per ppgtt scratch page Mika Kuoppala
2015-07-01 14:05 ` Michel Thierry [this message]
2015-07-01 14:26 ` Daniel Vetter
2015-07-01 14:25 ` Michel Thierry
2015-07-01 14:49 ` Daniel Vetter
2015-07-02 14:34 ` shuang.he
2015-06-30 16:58 ` [PATCH 1/4] drm/i915/gtt: Reorder page alloc/free/init functions Michel Thierry
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=5593F3B8.6070102@intel.com \
--to=michel.thierry@intel.com \
--cc=daniel.vetter@ffwll.ch \
--cc=intel-gfx@lists.freedesktop.org \
--cc=mika.kuoppala@linux.intel.com \
--cc=miku@iki.fi \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox