public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
From: "Ville Syrjälä" <ville.syrjala@linux.intel.com>
To: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: intel-gfx@lists.freedesktop.org, miku@iki.fi
Subject: Re: [PATCH 11/20] drm/i915/gtt: Introduce fill_page_dma()
Date: Thu, 21 May 2015 18:16:19 +0300	[thread overview]
Message-ID: <20150521151619.GJ18908@intel.com> (raw)
In-Reply-To: <1432219068-25391-12-git-send-email-mika.kuoppala@intel.com>

On Thu, May 21, 2015 at 05:37:39PM +0300, Mika Kuoppala wrote:
> When we setup page directories and tables, we point the entries
> to a to the next level scratch structure. Make this generic
> by introducing a fill_page_dma which maps and flushes. We also
> need 32 bit variant for legacy gens.
> 
> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_gtt.c | 61 +++++++++++++++++++------------------
>  1 file changed, 31 insertions(+), 30 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 5175eb8..a3ee710 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -330,6 +330,27 @@ static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
>  	memset(p, 0, sizeof(*p));
>  }
>  
> +static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
> +			  const uint64_t val)
> +{
> +	int i;
> +	uint64_t * const vaddr = kmap_atomic(p->page);
> +
> +	for (i = 0; i < 512; i++)
> +		vaddr[i] = val;
> +
> +	kunmap_atomic(vaddr);
> +}

Where did the clflushes go? Also please keep in mind only CHV needs the
clflush and VLV doesn't.

> +
> +static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
> +			     const uint32_t val32)
> +{
> +	uint64_t v = val32;
> +	v = v << 32 | val32;
> +
> +	fill_page_dma(dev, p, v);
> +}
> +
>  static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
>  {
>  	cleanup_page_dma(dev, &pt->base);
> @@ -340,19 +361,12 @@ static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
>  static void gen8_initialize_pt(struct i915_address_space *vm,
>  			       struct i915_page_table *pt)
>  {
> -	gen8_pte_t *pt_vaddr, scratch_pte;
> -	int i;
> +	gen8_pte_t scratch_pte;
>  
> -	pt_vaddr = kmap_atomic(pt->base.page);
>  	scratch_pte = gen8_pte_encode(vm->scratch.addr,
>  				      I915_CACHE_LLC, true);
>  
> -	for (i = 0; i < GEN8_PTES; i++)
> -		pt_vaddr[i] = scratch_pte;
> -
> -	if (!HAS_LLC(vm->dev))
> -		drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
> -	kunmap_atomic(pt_vaddr);
> +	fill_page_dma(vm->dev, &pt->base, scratch_pte);
>  }
>  
>  static struct i915_page_table *alloc_pt(struct drm_device *dev)
> @@ -585,20 +599,13 @@ static void gen8_initialize_pd(struct i915_address_space *vm,
>  			       struct i915_page_directory *pd)
>  {
>  	struct i915_hw_ppgtt *ppgtt =
> -			container_of(vm, struct i915_hw_ppgtt, base);
> -	gen8_pde_t *page_directory;
> -	struct i915_page_table *pt;
> -	int i;
> +		container_of(vm, struct i915_hw_ppgtt, base);
> +	gen8_pde_t scratch_pde;
>  
> -	page_directory = kmap_atomic(pd->base.page);
> -	pt = ppgtt->scratch_pt;
> -	for (i = 0; i < I915_PDES; i++)
> -		/* Map the PDE to the page table */
> -		__gen8_do_map_pt(page_directory + i, pt, vm->dev);
> +	scratch_pde = gen8_pde_encode(vm->dev, ppgtt->scratch_pt->base.daddr,
> +				      I915_CACHE_LLC);
>  
> -	if (!HAS_LLC(vm->dev))
> -		drm_clflush_virt_range(page_directory, PAGE_SIZE);
> -	kunmap_atomic(page_directory);
> +	fill_page_dma(vm->dev, &pd->base, scratch_pde);
>  }
>  
>  static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
> @@ -1242,22 +1249,16 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
>  }
>  
>  static void gen6_initialize_pt(struct i915_address_space *vm,
> -		struct i915_page_table *pt)
> +			       struct i915_page_table *pt)
>  {
> -	gen6_pte_t *pt_vaddr, scratch_pte;
> -	int i;
> +	gen6_pte_t scratch_pte;
>  
>  	WARN_ON(vm->scratch.addr == 0);
>  
>  	scratch_pte = vm->pte_encode(vm->scratch.addr,
>  			I915_CACHE_LLC, true, 0);
>  
> -	pt_vaddr = kmap_atomic(pt->base.page);
> -
> -	for (i = 0; i < GEN6_PTES; i++)
> -		pt_vaddr[i] = scratch_pte;
> -
> -	kunmap_atomic(pt_vaddr);
> +	fill_page_dma_32(vm->dev, &pt->base, scratch_pte);
>  }
>  
>  static int gen6_alloc_va_range(struct i915_address_space *vm,
> -- 
> 1.9.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Ville Syrjälä
Intel OTC
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2015-05-21 15:16 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-05-21 14:37 [PATCH 00/20] ppgtt cleanups / scratch merge Mika Kuoppala
2015-05-21 14:37 ` [PATCH 01/20] drm/i915/gtt: Mark TLBS dirty for gen8+ Mika Kuoppala
2015-05-21 14:37 ` [PATCH 02/20] drm/i915: Force PD restore on dirty ppGTTs Mika Kuoppala
2015-05-21 15:07   ` Ville Syrjälä
2015-05-21 16:28     ` Barbalho, Rafael
2015-05-22 16:15       ` Mika Kuoppala
2015-05-21 14:37 ` [PATCH 03/20] drm/i915/gtt: Check va range against vm size Mika Kuoppala
2015-05-21 14:37 ` [PATCH 04/20] drm/i915/gtt: Allow >= 4GB sizes for vm Mika Kuoppala
2015-05-21 14:37 ` [PATCH 05/20] drm/i915/gtt: Don't leak scratch page on mapping error Mika Kuoppala
2015-05-21 14:37 ` [PATCH 06/20] drm/i915/gtt: Remove _single from page table allocator Mika Kuoppala
2015-05-21 14:37 ` [PATCH 07/20] drm/i915/gtt: Introduce i915_page_dir_dma_addr Mika Kuoppala
2015-05-21 14:37 ` [PATCH 08/20] drm/i915/gtt: Introduce struct i915_page_dma Mika Kuoppala
2015-05-21 14:37 ` [PATCH 09/20] drm/i915/gtt: Rename unmap_and_free_px to free_px Mika Kuoppala
2015-05-21 14:37 ` [PATCH 10/20] drm/i915/gtt: Remove superfluous free_pd with gen6/7 Mika Kuoppala
2015-05-21 14:37 ` [PATCH 11/20] drm/i915/gtt: Introduce fill_page_dma() Mika Kuoppala
2015-05-21 15:16   ` Ville Syrjälä [this message]
2015-05-21 14:37 ` [PATCH 12/20] drm/i915/gtt: Introduce kmap|kunmap for dma page Mika Kuoppala
2015-05-21 15:19   ` Ville Syrjälä
2015-05-21 14:37 ` [PATCH 13/20] drm/i915/gtt: Introduce copy_page_dma and copy_px Mika Kuoppala
2015-05-21 14:37 ` [PATCH 14/20] drm/i915/gtt: Use macros to access dma mapped pages Mika Kuoppala
2015-05-21 14:37 ` [PATCH 15/20] drm/i915/gtt: Make scratch page i915_page_dma compatible Mika Kuoppala
2015-05-21 14:37 ` [PATCH 16/20] drm/i915/gtt: Fill scratch page Mika Kuoppala
2015-05-21 14:56   ` Chris Wilson
2015-05-21 14:37 ` [PATCH 17/20] drm/i915/gtt: Pin vma during virtual address allocation Mika Kuoppala
2015-05-21 14:37 ` [PATCH 18/20] drm/i915/gtt: Cleanup page directory encoding Mika Kuoppala
2015-05-21 14:37 ` [PATCH 19/20] drm/i915/gtt: Move scratch_pd and scratch_pt into vm area Mika Kuoppala
2015-05-21 14:37 ` [PATCH 20/20] drm/i915/gtt: One instance of scratch page table/directory Mika Kuoppala
2015-05-21 18:27   ` shuang.he

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20150521151619.GJ18908@intel.com \
    --to=ville.syrjala@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=mika.kuoppala@linux.intel.com \
    --cc=miku@iki.fi \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox