public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
* [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow
@ 2019-06-17 11:20 Chris Wilson
  2019-06-17 11:30 ` Matthew Auld
                   ` (5 more replies)
  0 siblings, 6 replies; 7+ messages in thread
From: Chris Wilson @ 2019-06-17 11:20 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala, Matthew Auld

Currently, we perform a locked update of the shadow entry when
allocating a page directory entry such that if two clients are
concurrently allocating neighbouring ranges we only insert one new entry
for the pair of them. However, we also need to serialise both clients
wrt to the actual entry in the HW table, or else we may allow one client
or even a third client to proceed ahead of the HW write. My handwave
before was that under the _pathological_ condition we would see the
scratch entry instead of the expected entry, causing a temporary
glitch. That starvation condition will eventually show up in practice, so
fix it.

The reason for the previous cheat was to avoid having to free the extra
allocation while under the spinlock. Now, we keep the extra entry
allocated until the end instead.

v2: Fix error paths for gen6

Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 130 +++++++++++++++-------------
 1 file changed, 72 insertions(+), 58 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0392a4c4bb9b..0987748d327b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1387,82 +1387,88 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
 			       struct i915_page_directory *pd,
 			       u64 start, u64 length)
 {
+	struct i915_page_table *alloc = NULL;
 	struct i915_page_table *pt;
 	u64 from = start;
 	unsigned int pde;
+	int ret = 0;
 
 	spin_lock(&pd->lock);
 	gen8_for_each_pde(pt, pd, start, length, pde) {
 		const int count = gen8_pte_count(start, length);
 
 		if (pt == vm->scratch_pt) {
-			struct i915_page_table *old;
-
 			spin_unlock(&pd->lock);
 
-			pt = alloc_pt(vm);
-			if (IS_ERR(pt))
+			pt = fetch_and_zero(&alloc);
+			if (!pt)
+				pt = alloc_pt(vm);
+			if (IS_ERR(pt)) {
+				ret = PTR_ERR(pt);
 				goto unwind;
+			}
 
 			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
 				gen8_initialize_pt(vm, pt);
 
-			old = cmpxchg(&pd->page_table[pde], vm->scratch_pt, pt);
-			if (old == vm->scratch_pt) {
+			spin_lock(&pd->lock);
+			if (pd->page_table[pde] == vm->scratch_pt) {
 				gen8_ppgtt_set_pde(vm, pd, pt, pde);
+				pd->page_table[pde] = pt;
 				atomic_inc(&pd->used_pdes);
 			} else {
-				free_pt(vm, pt);
-				pt = old;
+				alloc = pt;
+				pt = pd->page_table[pde];
 			}
-
-			spin_lock(&pd->lock);
 		}
 
 		atomic_add(count, &pt->used_ptes);
 	}
 	spin_unlock(&pd->lock);
-
-	return 0;
+	goto out;
 
 unwind:
 	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pt(vm, alloc);
+	return ret;
 }
 
 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 				struct i915_page_directory_pointer *pdp,
 				u64 start, u64 length)
 {
+	struct i915_page_directory *alloc = NULL;
 	struct i915_page_directory *pd;
 	u64 from = start;
 	unsigned int pdpe;
-	int ret;
+	int ret = 0;
 
 	spin_lock(&pdp->lock);
 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
 		if (pd == vm->scratch_pd) {
-			struct i915_page_directory *old;
-
 			spin_unlock(&pdp->lock);
 
-			pd = alloc_pd(vm);
-			if (IS_ERR(pd))
+			pd = fetch_and_zero(&alloc);
+			if (!pd)
+				pd = alloc_pd(vm);
+			if (IS_ERR(pd)) {
+				ret = PTR_ERR(pd);
 				goto unwind;
+			}
 
 			gen8_initialize_pd(vm, pd);
 
-			old = cmpxchg(&pdp->page_directory[pdpe],
-				      vm->scratch_pd, pd);
-			if (old == vm->scratch_pd) {
+			spin_lock(&pdp->lock);
+			if (pdp->page_directory[pdpe] == vm->scratch_pd) {
 				gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+				pdp->page_directory[pdpe] = pd;
 				atomic_inc(&pdp->used_pdpes);
 			} else {
-				free_pd(vm, pd);
-				pd = old;
+				alloc = pd;
+				pd = pdp->page_directory[pdpe];
 			}
-
-			spin_lock(&pdp->lock);
 		}
 		atomic_inc(&pd->used_pdes);
 		spin_unlock(&pdp->lock);
@@ -1475,8 +1481,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 		atomic_dec(&pd->used_pdes);
 	}
 	spin_unlock(&pdp->lock);
-
-	return 0;
+	goto out;
 
 unwind_pd:
 	spin_lock(&pdp->lock);
@@ -1489,7 +1494,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 	spin_unlock(&pdp->lock);
 unwind:
 	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pd(vm, alloc);
+	return ret;
 }
 
 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
@@ -1504,33 +1512,35 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 {
 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct i915_pml4 *pml4 = &ppgtt->pml4;
+	struct i915_page_directory_pointer *alloc = NULL;
 	struct i915_page_directory_pointer *pdp;
 	u64 from = start;
+	int ret = 0;
 	u32 pml4e;
-	int ret;
 
 	spin_lock(&pml4->lock);
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
 		if (pdp == vm->scratch_pdp) {
-			struct i915_page_directory_pointer *old;
-
 			spin_unlock(&pml4->lock);
 
-			pdp = alloc_pdp(vm);
-			if (IS_ERR(pdp))
+			pdp = fetch_and_zero(&alloc);
+			if (!pdp)
+				pdp = alloc_pdp(vm);
+			if (IS_ERR(pdp)) {
+				ret = PTR_ERR(pdp);
 				goto unwind;
+			}
 
 			gen8_initialize_pdp(vm, pdp);
 
-			old = cmpxchg(&pml4->pdps[pml4e], vm->scratch_pdp, pdp);
-			if (old == vm->scratch_pdp) {
+			spin_lock(&pml4->lock);
+			if (pml4->pdps[pml4e] == vm->scratch_pdp) {
 				gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+				pml4->pdps[pml4e] = pdp;
 			} else {
-				free_pdp(vm, pdp);
-				pdp = old;
+				alloc = pdp;
+				pdp = pml4->pdps[pml4e];
 			}
-
-			spin_lock(&pml4->lock);
 		}
 		atomic_inc(&pdp->used_pdpes);
 		spin_unlock(&pml4->lock);
@@ -1543,8 +1553,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 		atomic_dec(&pdp->used_pdpes);
 	}
 	spin_unlock(&pml4->lock);
-
-	return 0;
+	goto out;
 
 unwind_pdp:
 	spin_lock(&pml4->lock);
@@ -1555,7 +1564,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 	spin_unlock(&pml4->lock);
 unwind:
 	gen8_ppgtt_clear_4lvl(vm, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pdp(vm, alloc);
+	return ret;
 }
 
 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -1820,11 +1832,13 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 			       u64 start, u64 length)
 {
 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
+	struct i915_page_table *alloc = NULL;
 	struct i915_page_table *pt;
 	intel_wakeref_t wakeref;
 	u64 from = start;
 	unsigned int pde;
 	bool flush = false;
+	int ret = 0;
 
 	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
 
@@ -1833,19 +1847,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		const unsigned int count = gen6_pte_count(start, length);
 
 		if (pt == vm->scratch_pt) {
-			struct i915_page_table *old;
-
 			spin_unlock(&ppgtt->base.pd.lock);
 
-			pt = alloc_pt(vm);
-			if (IS_ERR(pt))
+			pt = fetch_and_zero(&alloc);
+			if (!pt)
+				pt = alloc_pt(vm);
+			if (IS_ERR(pt)) {
+				ret = PTR_ERR(pt);
 				goto unwind_out;
+			}
 
 			gen6_initialize_pt(vm, pt);
 
-			old = cmpxchg(&ppgtt->base.pd.page_table[pde],
-				      vm->scratch_pt, pt);
-			if (old == vm->scratch_pt) {
+			spin_lock(&ppgtt->base.pd.lock);
+			if (ppgtt->base.pd.page_table[pde] == vm->scratch_pt) {
 				ppgtt->base.pd.page_table[pde] = pt;
 				if (i915_vma_is_bound(ppgtt->vma,
 						      I915_VMA_GLOBAL_BIND)) {
@@ -1853,11 +1868,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 					flush = true;
 				}
 			} else {
-				free_pt(vm, pt);
-				pt = old;
+				alloc = pt;
+				pt = ppgtt->base.pd.page_table[pde];
 			}
-
-			spin_lock(&ppgtt->base.pd.lock);
 		}
 
 		atomic_add(count, &pt->used_ptes);
@@ -1869,14 +1882,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		gen6_ggtt_invalidate(vm->i915);
 	}
 
-	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
-	return 0;
+	goto out;
 
 unwind_out:
-	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
 	gen6_ppgtt_clear_range(vm, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pt(vm, alloc);
+	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+	return ret;
 }
 
 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow
  2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
@ 2019-06-17 11:30 ` Matthew Auld
  2019-06-17 13:55 ` Mika Kuoppala
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Matthew Auld @ 2019-06-17 11:30 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: Mika Kuoppala

On 17/06/2019 12:20, Chris Wilson wrote:
> Currently, we perform a locked update of the shadow entry when
> allocating a page directory entry such that if two clients are
> concurrently allocating neighbouring ranges we only insert one new entry
> for the pair of them. However, we also need to serialise both clients
> wrt to the actual entry in the HW table, or else we may allow one client
> or even a third client to proceed ahead of the HW write. My handwave
> before was that under the _pathological_ condition we would see the
> scratch entry instead of the expected entry, causing a temporary
> glitch. That starvation condition will eventually show up in practice, so
> fix it.
> 
> The reason for the previous cheat was to avoid having to free the extra
> allocation while under the spinlock. Now, we keep the extra entry
> allocated until the end instead.
> 
> v2: Fix error paths for gen6
> 
> Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow
  2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
  2019-06-17 11:30 ` Matthew Auld
@ 2019-06-17 13:55 ` Mika Kuoppala
  2019-06-17 14:04 ` [PATCH v3] " Chris Wilson
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Mika Kuoppala @ 2019-06-17 13:55 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: Matthew Auld

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Currently, we perform a locked update of the shadow entry when
> allocating a page directory entry such that if two clients are
> concurrently allocating neighbouring ranges we only insert one new entry
> for the pair of them. However, we also need to serialise both clients
> wrt to the actual entry in the HW table, or else we may allow one client
> or even a third client to proceed ahead of the HW write. My handwave
> before was that under the _pathological_ condition we would see the
> scratch entry instead of the expected entry, causing a temporary
> glitch. That starvation condition will eventually show up in practice, so
> fix it.
>
> The reason for the previous cheat was to avoid having to free the extra
> allocation while under the spinlock. Now, we keep the extra entry
> allocated until the end instead.
>
> v2: Fix error paths for gen6
>
> Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@intel.com>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/i915_gem_gtt.c | 130 +++++++++++++++-------------
>  1 file changed, 72 insertions(+), 58 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 0392a4c4bb9b..0987748d327b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -1387,82 +1387,88 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
>  			       struct i915_page_directory *pd,
>  			       u64 start, u64 length)
>  {
> +	struct i915_page_table *alloc = NULL;
>  	struct i915_page_table *pt;
>  	u64 from = start;
>  	unsigned int pde;
> +	int ret = 0;
>  
>  	spin_lock(&pd->lock);
>  	gen8_for_each_pde(pt, pd, start, length, pde) {
>  		const int count = gen8_pte_count(start, length);
>  
>  		if (pt == vm->scratch_pt) {
> -			struct i915_page_table *old;
> -
>  			spin_unlock(&pd->lock);
>  
> -			pt = alloc_pt(vm);
> -			if (IS_ERR(pt))
> +			pt = fetch_and_zero(&alloc);
> +			if (!pt)
> +				pt = alloc_pt(vm);
> +			if (IS_ERR(pt)) {
> +				ret = PTR_ERR(pt);
>  				goto unwind;
> +			}
>  
>  			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
>  				gen8_initialize_pt(vm, pt);
>  
> -			old = cmpxchg(&pd->page_table[pde], vm->scratch_pt, pt);
> -			if (old == vm->scratch_pt) {
> +			spin_lock(&pd->lock);
> +			if (pd->page_table[pde] == vm->scratch_pt) {
>  				gen8_ppgtt_set_pde(vm, pd, pt, pde);
> +				pd->page_table[pde] = pt;
>  				atomic_inc(&pd->used_pdes);
>  			} else {
> -				free_pt(vm, pt);
> -				pt = old;
> +				alloc = pt;
> +				pt = pd->page_table[pde];
>  			}
> -
> -			spin_lock(&pd->lock);
>  		}
>  
>  		atomic_add(count, &pt->used_ptes);
>  	}
>  	spin_unlock(&pd->lock);
> -
> -	return 0;
> +	goto out;
>  
>  unwind:
>  	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
> -	return -ENOMEM;
> +out:
> +	if (alloc)
> +		free_pt(vm, alloc);
> +	return ret;
>  }
>  
>  static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
>  				struct i915_page_directory_pointer *pdp,
>  				u64 start, u64 length)
>  {
> +	struct i915_page_directory *alloc = NULL;
>  	struct i915_page_directory *pd;
>  	u64 from = start;
>  	unsigned int pdpe;
> -	int ret;
> +	int ret = 0;
>  
>  	spin_lock(&pdp->lock);
>  	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
>  		if (pd == vm->scratch_pd) {
> -			struct i915_page_directory *old;
> -
>  			spin_unlock(&pdp->lock);
>  
> -			pd = alloc_pd(vm);
> -			if (IS_ERR(pd))
> +			pd = fetch_and_zero(&alloc);
> +			if (!pd)
> +				pd = alloc_pd(vm);
> +			if (IS_ERR(pd)) {
> +				ret = PTR_ERR(pd);
>  				goto unwind;
> +			}
>  
>  			gen8_initialize_pd(vm, pd);
>  
> -			old = cmpxchg(&pdp->page_directory[pdpe],
> -				      vm->scratch_pd, pd);
> -			if (old == vm->scratch_pd) {
> +			spin_lock(&pdp->lock);
> +			if (pdp->page_directory[pdpe] == vm->scratch_pd) {
>  				gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
> +				pdp->page_directory[pdpe] = pd;
>  				atomic_inc(&pdp->used_pdpes);
>  			} else {
> -				free_pd(vm, pd);
> -				pd = old;
> +				alloc = pd;
> +				pd = pdp->page_directory[pdpe];
>  			}
> -
> -			spin_lock(&pdp->lock);
>  		}
>  		atomic_inc(&pd->used_pdes);
>  		spin_unlock(&pdp->lock);
> @@ -1475,8 +1481,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
>  		atomic_dec(&pd->used_pdes);
>  	}
>  	spin_unlock(&pdp->lock);
> -
> -	return 0;
> +	goto out;
>  
>  unwind_pd:
>  	spin_lock(&pdp->lock);
> @@ -1489,7 +1494,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
>  	spin_unlock(&pdp->lock);
>  unwind:
>  	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
> -	return -ENOMEM;
> +out:
> +	if (alloc)
> +		free_pd(vm, alloc);
> +	return ret;
>  }
>  
>  static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
> @@ -1504,33 +1512,35 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
>  {
>  	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
>  	struct i915_pml4 *pml4 = &ppgtt->pml4;
> +	struct i915_page_directory_pointer *alloc = NULL;
>  	struct i915_page_directory_pointer *pdp;
>  	u64 from = start;
> +	int ret = 0;
>  	u32 pml4e;
> -	int ret;
>  
>  	spin_lock(&pml4->lock);
>  	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
>  		if (pdp == vm->scratch_pdp) {
> -			struct i915_page_directory_pointer *old;
> -
>  			spin_unlock(&pml4->lock);
>  
> -			pdp = alloc_pdp(vm);
> -			if (IS_ERR(pdp))
> +			pdp = fetch_and_zero(&alloc);
> +			if (!pdp)
> +				pdp = alloc_pdp(vm);
> +			if (IS_ERR(pdp)) {
> +				ret = PTR_ERR(pdp);
>  				goto unwind;
> +			}
>  
>  			gen8_initialize_pdp(vm, pdp);
>  
> -			old = cmpxchg(&pml4->pdps[pml4e], vm->scratch_pdp, pdp);
> -			if (old == vm->scratch_pdp) {
> +			spin_lock(&pml4->lock);
> +			if (pml4->pdps[pml4e] == vm->scratch_pdp) {
>  				gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
> +				pml4->pdps[pml4e] = pdp;
>  			} else {
> -				free_pdp(vm, pdp);
> -				pdp = old;
> +				alloc = pdp;
> +				pdp = pml4->pdps[pml4e];
>  			}
> -
> -			spin_lock(&pml4->lock);
>  		}
>  		atomic_inc(&pdp->used_pdpes);
>  		spin_unlock(&pml4->lock);
> @@ -1543,8 +1553,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
>  		atomic_dec(&pdp->used_pdpes);
>  	}
>  	spin_unlock(&pml4->lock);
> -
> -	return 0;
> +	goto out;
>  
>  unwind_pdp:
>  	spin_lock(&pml4->lock);
> @@ -1555,7 +1564,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
>  	spin_unlock(&pml4->lock);
>  unwind:
>  	gen8_ppgtt_clear_4lvl(vm, from, start - from);
> -	return -ENOMEM;
> +out:
> +	if (alloc)
> +		free_pdp(vm, alloc);
> +	return ret;
>  }
>  
>  static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
> @@ -1820,11 +1832,13 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
>  			       u64 start, u64 length)
>  {
>  	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
> +	struct i915_page_table *alloc = NULL;
>  	struct i915_page_table *pt;
>  	intel_wakeref_t wakeref;
>  	u64 from = start;
>  	unsigned int pde;
>  	bool flush = false;
> +	int ret = 0;
>  
>  	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
>  
> @@ -1833,19 +1847,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
>  		const unsigned int count = gen6_pte_count(start, length);
>  
>  		if (pt == vm->scratch_pt) {
> -			struct i915_page_table *old;
> -
>  			spin_unlock(&ppgtt->base.pd.lock);
>  
> -			pt = alloc_pt(vm);
> -			if (IS_ERR(pt))
> +			pt = fetch_and_zero(&alloc);
> +			if (!pt)
> +				pt = alloc_pt(vm);
> +			if (IS_ERR(pt)) {
> +				ret = PTR_ERR(pt);
>  				goto unwind_out;
> +			}
>  
>  			gen6_initialize_pt(vm, pt);
>  
> -			old = cmpxchg(&ppgtt->base.pd.page_table[pde],
> -				      vm->scratch_pt, pt);
> -			if (old == vm->scratch_pt) {
> +			spin_lock(&ppgtt->base.pd.lock);
> +			if (ppgtt->base.pd.page_table[pde] == vm->scratch_pt) {
>  				ppgtt->base.pd.page_table[pde] = pt;
>  				if (i915_vma_is_bound(ppgtt->vma,
>  						      I915_VMA_GLOBAL_BIND)) {
> @@ -1853,11 +1868,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
>  					flush = true;
>  				}
>  			} else {
> -				free_pt(vm, pt);
> -				pt = old;
> +				alloc = pt;
> +				pt = ppgtt->base.pd.page_table[pde];
>  			}
> -
> -			spin_lock(&ppgtt->base.pd.lock);
>  		}
>  
>  		atomic_add(count, &pt->used_ptes);
> @@ -1869,14 +1882,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
>  		gen6_ggtt_invalidate(vm->i915);
>  	}
>  
> -	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
> -
> -	return 0;
> +	goto out;
>  
>  unwind_out:
> -	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
>  	gen6_ppgtt_clear_range(vm, from, start - from);
> -	return -ENOMEM;
> +out:
> +	if (alloc)
> +		free_pt(vm, alloc);
> +	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
> +	return ret;
>  }
>  
>  static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
> -- 
> 2.20.1
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [PATCH v3] drm/i915/gtt: Serialise both updates to PDE and our shadow
  2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
  2019-06-17 11:30 ` Matthew Auld
  2019-06-17 13:55 ` Mika Kuoppala
@ 2019-06-17 14:04 ` Chris Wilson
  2019-06-17 18:06 ` ✗ Fi.CI.SPARSE: warning for drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2) Patchwork
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 7+ messages in thread
From: Chris Wilson @ 2019-06-17 14:04 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala, Matthew Auld

Currently, we perform a locked update of the shadow entry when
allocating a page directory entry such that if two clients are
concurrently allocating neighbouring ranges we only insert one new entry
for the pair of them. However, we also need to serialise both clients
wrt to the actual entry in the HW table, or else we may allow one client
or even a third client to proceed ahead of the HW write. My handwave
before was that under the _pathological_ condition we would see the
scratch entry instead of the expected entry, causing a temporary
glitch. That starvation condition will eventually show up in practice, so
fix it.

The reason for the previous cheat was to avoid having to free the extra
allocation while under the spinlock. Now, we keep the extra entry
allocated until the end instead.

v2: Fix error paths for gen6

Fixes: 1d1b5490b91c ("drm/i915/gtt: Replace struct_mutex serialisation for allocation")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 133 +++++++++++++++-------------
 1 file changed, 73 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ec00fccd0c6f..8ab820145ea6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1346,81 +1346,86 @@ static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
 			       struct i915_page_directory *pd,
 			       u64 start, u64 length)
 {
-	struct i915_page_table *pt;
+	struct i915_page_table *pt, *alloc = NULL;
 	u64 from = start;
 	unsigned int pde;
+	int ret = 0;
 
 	spin_lock(&pd->lock);
 	gen8_for_each_pde(pt, pd, start, length, pde) {
 		const int count = gen8_pte_count(start, length);
 
 		if (pt == vm->scratch_pt) {
-			struct i915_page_table *old;
-
 			spin_unlock(&pd->lock);
 
-			pt = alloc_pt(vm);
-			if (IS_ERR(pt))
+			pt = fetch_and_zero(&alloc);
+			if (!pt)
+				pt = alloc_pt(vm);
+			if (IS_ERR(pt)) {
+				ret = PTR_ERR(pt);
 				goto unwind;
+			}
 
 			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
 				gen8_initialize_pt(vm, pt);
 
-			old = cmpxchg(&pd->entry[pde], vm->scratch_pt, pt);
-			if (old == vm->scratch_pt) {
+			spin_lock(&pd->lock);
+			if (pd->entry[pde] == vm->scratch_pt) {
 				gen8_ppgtt_set_pde(vm, pd, pt, pde);
+				pd->entry[pde] = pt;
 				atomic_inc(&pd->used);
 			} else {
-				free_pt(vm, pt);
-				pt = old;
+				alloc = pt;
+				pt = pd->entry[pde];
 			}
-
-			spin_lock(&pd->lock);
 		}
 
 		atomic_add(count, &pt->used);
 	}
 	spin_unlock(&pd->lock);
-
-	return 0;
+	goto out;
 
 unwind:
 	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pt(vm, alloc);
+	return ret;
 }
 
 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 				struct i915_page_directory *pdp,
 				u64 start, u64 length)
 {
-	struct i915_page_directory *pd;
+	struct i915_page_directory *pd, *alloc = NULL;
 	u64 from = start;
 	unsigned int pdpe;
-	int ret;
+	int ret = 0;
 
 	spin_lock(&pdp->lock);
 	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
 		if (pd == vm->scratch_pd) {
-			struct i915_page_directory *old;
-
 			spin_unlock(&pdp->lock);
 
-			pd = alloc_pd(vm);
-			if (IS_ERR(pd))
+			pd = fetch_and_zero(&alloc);
+			if (!pd)
+				pd = alloc_pd(vm);
+			if (IS_ERR(pd)) {
+				ret = PTR_ERR(pd);
 				goto unwind;
+			}
 
 			init_pd_with_page(vm, pd, vm->scratch_pt);
 
-			old = cmpxchg(&pdp->entry[pdpe], vm->scratch_pd, pd);
-			if (old == vm->scratch_pd) {
+			spin_lock(&pdp->lock);
+			if (pdp->entry[pdpe] == vm->scratch_pd) {
 				gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
+				pdp->entry[pdpe] = pd;
 				atomic_inc(&pdp->used);
 			} else {
-				free_pd(vm, pd);
-				pd = old;
+				alloc = pd;
+				pd = pdp->entry[pdpe];
 			}
-
-			spin_lock(&pdp->lock);
 		}
 		atomic_inc(&pd->used);
 		spin_unlock(&pdp->lock);
@@ -1433,8 +1438,7 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 		atomic_dec(&pd->used);
 	}
 	spin_unlock(&pdp->lock);
-
-	return 0;
+	goto out;
 
 unwind_pd:
 	spin_lock(&pdp->lock);
@@ -1447,7 +1451,10 @@ static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
 	spin_unlock(&pdp->lock);
 unwind:
 	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pd(vm, alloc);
+	return ret;
 }
 
 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
@@ -1462,33 +1469,34 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 {
 	struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
 	struct i915_page_directory * const pml4 = ppgtt->pd;
-	struct i915_page_directory *pdp;
+	struct i915_page_directory *pdp, *alloc = NULL;
 	u64 from = start;
+	int ret = 0;
 	u32 pml4e;
-	int ret;
 
 	spin_lock(&pml4->lock);
 	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
 		if (pdp == vm->scratch_pdp) {
-			struct i915_page_directory *old;
-
 			spin_unlock(&pml4->lock);
 
-			pdp = alloc_pd(vm);
-			if (IS_ERR(pdp))
+			pdp = fetch_and_zero(&alloc);
+			if (!pdp)
+				pdp = alloc_pd(vm);
+			if (IS_ERR(pdp)) {
+				ret = PTR_ERR(pdp);
 				goto unwind;
+			}
 
 			init_pd(vm, pdp, vm->scratch_pd);
 
-			old = cmpxchg(&pml4->entry[pml4e], vm->scratch_pdp, pdp);
-			if (old == vm->scratch_pdp) {
+			spin_lock(&pml4->lock);
+			if (pml4->entry[pml4e] == vm->scratch_pdp) {
 				gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+				pml4->entry[pml4e] = pdp;
 			} else {
-				free_pd(vm, pdp);
-				pdp = old;
+				alloc = pdp;
+				pdp = pml4->entry[pml4e];
 			}
-
-			spin_lock(&pml4->lock);
 		}
 		atomic_inc(&pdp->used);
 		spin_unlock(&pml4->lock);
@@ -1501,8 +1509,7 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 		atomic_dec(&pdp->used);
 	}
 	spin_unlock(&pml4->lock);
-
-	return 0;
+	goto out;
 
 unwind_pdp:
 	spin_lock(&pml4->lock);
@@ -1513,7 +1520,10 @@ static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
 	spin_unlock(&pml4->lock);
 unwind:
 	gen8_ppgtt_clear_4lvl(vm, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pd(vm, alloc);
+	return ret;
 }
 
 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
@@ -1792,11 +1802,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 {
 	struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
 	struct i915_page_directory * const pd = ppgtt->base.pd;
-	struct i915_page_table *pt;
+	struct i915_page_table *pt, *alloc = NULL;
 	intel_wakeref_t wakeref;
 	u64 from = start;
 	unsigned int pde;
 	bool flush = false;
+	int ret = 0;
 
 	wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
 
@@ -1805,29 +1816,30 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		const unsigned int count = gen6_pte_count(start, length);
 
 		if (pt == vm->scratch_pt) {
-			struct i915_page_table *old;
-
 			spin_unlock(&pd->lock);
 
-			pt = alloc_pt(vm);
-			if (IS_ERR(pt))
+			pt = fetch_and_zero(&alloc);
+			if (!pt)
+				pt = alloc_pt(vm);
+			if (IS_ERR(pt)) {
+				ret = PTR_ERR(pt);
 				goto unwind_out;
+			}
 
 			gen6_initialize_pt(vm, pt);
 
-			old = cmpxchg(&pd->entry[pde], vm->scratch_pt, pt);
-			if (old == vm->scratch_pt) {
+			spin_lock(&pd->lock);
+			if (pd->entry[pde] == vm->scratch_pt) {
+				pd->entry[pde] = pt;
 				if (i915_vma_is_bound(ppgtt->vma,
 						      I915_VMA_GLOBAL_BIND)) {
 					gen6_write_pde(ppgtt, pde, pt);
 					flush = true;
 				}
 			} else {
-				free_pt(vm, pt);
-				pt = old;
+				alloc = pt;
+				pt = pd->entry[pde];
 			}
-
-			spin_lock(&pd->lock);
 		}
 
 		atomic_add(count, &pt->used);
@@ -1839,14 +1851,15 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
 		gen6_ggtt_invalidate(vm->i915);
 	}
 
-	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
-	return 0;
+	goto out;
 
 unwind_out:
-	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
 	gen6_ppgtt_clear_range(vm, from, start - from);
-	return -ENOMEM;
+out:
+	if (alloc)
+		free_pt(vm, alloc);
+	intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
+	return ret;
 }
 
 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* ✗ Fi.CI.SPARSE: warning for drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2)
  2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
                   ` (2 preceding siblings ...)
  2019-06-17 14:04 ` [PATCH v3] " Chris Wilson
@ 2019-06-17 18:06 ` Patchwork
  2019-06-17 18:23 ` ✓ Fi.CI.BAT: success " Patchwork
  2019-06-18  7:28 ` ✓ Fi.CI.IGT: " Patchwork
  5 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-06-17 18:06 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2)
URL   : https://patchwork.freedesktop.org/series/62203/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915/gtt: Serialise both updates to PDE and our shadow
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1354:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1354:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1402:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1402:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1471:9: warning: expression using sizeof(void)
-O:drivers/gpu/drm/i915/i915_gem_gtt.c:1471:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1355:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1355:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1406:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1406:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1478:9: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/i915_gem_gtt.c:1478:9: warning: expression using sizeof(void)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2)
  2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
                   ` (3 preceding siblings ...)
  2019-06-17 18:06 ` ✗ Fi.CI.SPARSE: warning for drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2) Patchwork
@ 2019-06-17 18:23 ` Patchwork
  2019-06-18  7:28 ` ✓ Fi.CI.IGT: " Patchwork
  5 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-06-17 18:23 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2)
URL   : https://patchwork.freedesktop.org/series/62203/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6287 -> Patchwork_13310
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/

Known issues
------------

  Here are the changes found in Patchwork_13310 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_cpu_reloc@basic:
    - fi-icl-u3:          [PASS][1] -> [INCOMPLETE][2] ([fdo#107713] / [fdo#110246])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/fi-icl-u3/igt@gem_cpu_reloc@basic.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/fi-icl-u3/igt@gem_cpu_reloc@basic.html

  * igt@i915_selftest@live_evict:
    - fi-bsw-kefka:       [PASS][3] -> [DMESG-WARN][4] ([fdo#107709])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/fi-bsw-kefka/igt@i915_selftest@live_evict.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/fi-bsw-kefka/igt@i915_selftest@live_evict.html

  * igt@kms_force_connector_basic@prune-stale-modes:
    - fi-ilk-650:         [PASS][5] -> [DMESG-WARN][6] ([fdo#106387])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/fi-ilk-650/igt@kms_force_connector_basic@prune-stale-modes.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/fi-ilk-650/igt@kms_force_connector_basic@prune-stale-modes.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-u2:          [PASS][7] -> [FAIL][8] ([fdo#103167])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html

  
#### Possible fixes ####

  * igt@i915_selftest@live_hangcheck:
    - fi-icl-dsi:         [INCOMPLETE][9] ([fdo#107713] / [fdo#108569]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/fi-icl-dsi/igt@i915_selftest@live_hangcheck.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/fi-icl-dsi/igt@i915_selftest@live_hangcheck.html

  
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#106387]: https://bugs.freedesktop.org/show_bug.cgi?id=106387
  [fdo#107709]: https://bugs.freedesktop.org/show_bug.cgi?id=107709
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
  [fdo#110246]: https://bugs.freedesktop.org/show_bug.cgi?id=110246


Participating hosts (48 -> 38)
------------------------------

  Additional (1): fi-icl-guc 
  Missing    (11): fi-kbl-soraka fi-cml-u2 fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-skl-iommu fi-kbl-8809g fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_6287 -> Patchwork_13310

  CI_DRM_6287: 3765c2bb2bf60f35709fba4c23070e2b74e14247 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5059: 1f67ee0d09d6513f487f2be74aae9700e755258a @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13310: 8ff0438ea49ee84c9f62c31b04c293991f55ca44 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

8ff0438ea49e drm/i915/gtt: Serialise both updates to PDE and our shadow

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

* ✓ Fi.CI.IGT: success for drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2)
  2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
                   ` (4 preceding siblings ...)
  2019-06-17 18:23 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-06-18  7:28 ` Patchwork
  5 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-06-18  7:28 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2)
URL   : https://patchwork.freedesktop.org/series/62203/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6287_full -> Patchwork_13310_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_13310_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_tiled_swapping@non-threaded:
    - shard-kbl:          [PASS][1] -> [DMESG-WARN][2] ([fdo#108686])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-kbl6/igt@gem_tiled_swapping@non-threaded.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-kbl3/igt@gem_tiled_swapping@non-threaded.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-kbl:          [PASS][3] -> [DMESG-WARN][4] ([fdo#110913 ])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-kbl4/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-kbl6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-apl:          [PASS][5] -> [DMESG-WARN][6] ([fdo#110913 ]) +2 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-apl8/igt@gem_userptr_blits@sync-unmap-cycles.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-apl3/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@i915_suspend@sysfs-reader:
    - shard-apl:          [PASS][7] -> [DMESG-WARN][8] ([fdo#108566]) +5 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-apl6/igt@i915_suspend@sysfs-reader.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-apl4/igt@i915_suspend@sysfs-reader.html

  * igt@kms_flip@2x-flip-vs-absolute-wf_vblank:
    - shard-hsw:          [PASS][9] -> [SKIP][10] ([fdo#109271]) +12 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-hsw6/igt@kms_flip@2x-flip-vs-absolute-wf_vblank.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-hsw1/igt@kms_flip@2x-flip-vs-absolute-wf_vblank.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-snb:          [PASS][11] -> [INCOMPLETE][12] ([fdo#105411]) +1 similar issue
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-snb7/igt@kms_flip@flip-vs-suspend.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-snb1/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_flip_tiling@flip-yf-tiled:
    - shard-skl:          [PASS][13] -> [FAIL][14] ([fdo#108145])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-skl3/igt@kms_flip_tiling@flip-yf-tiled.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-skl10/igt@kms_flip_tiling@flip-yf-tiled.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render:
    - shard-iclb:         [PASS][15] -> [FAIL][16] ([fdo#103167]) +4 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-iclb8/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-iclb7/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][17] -> [SKIP][18] ([fdo#109441]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-iclb4/igt@kms_psr@psr2_cursor_render.html

  * igt@kms_vblank@pipe-b-ts-continuation-suspend:
    - shard-kbl:          [PASS][19] -> [DMESG-WARN][20] ([fdo#108566])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-kbl3/igt@kms_vblank@pipe-b-ts-continuation-suspend.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-kbl6/igt@kms_vblank@pipe-b-ts-continuation-suspend.html

  
#### Possible fixes ####

  * igt@gem_exec_blt@normal-min:
    - shard-apl:          [INCOMPLETE][21] ([fdo#103927]) -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-apl2/igt@gem_exec_blt@normal-min.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-apl6/igt@gem_exec_blt@normal-min.html

  * igt@gem_mmap_gtt@hang:
    - shard-snb:          [INCOMPLETE][23] ([fdo#105411]) -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-snb6/igt@gem_mmap_gtt@hang.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-snb1/igt@gem_mmap_gtt@hang.html

  * igt@gem_persistent_relocs@forked-faulting-reloc-thrashing:
    - shard-snb:          [DMESG-WARN][25] ([fdo#110789] / [fdo#110913 ]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-snb7/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-snb4/igt@gem_persistent_relocs@forked-faulting-reloc-thrashing.html

  * igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy-gup:
    - shard-apl:          [DMESG-WARN][27] ([fdo#110913 ]) -> [PASS][28] +1 similar issue
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-apl8/igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy-gup.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-apl2/igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy-gup.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-kbl:          [DMESG-WARN][29] ([fdo#110913 ]) -> [PASS][30] +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-kbl3/igt@gem_userptr_blits@sync-unmap-cycles.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-kbl1/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@i915_suspend@fence-restore-tiled2untiled:
    - shard-apl:          [DMESG-WARN][31] ([fdo#108566]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-apl8/igt@i915_suspend@fence-restore-tiled2untiled.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-apl4/igt@i915_suspend@fence-restore-tiled2untiled.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [SKIP][33] ([fdo#109349]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-iclb5/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  * igt@kms_draw_crc@draw-method-xrgb2101010-mmap-gtt-ytiled:
    - shard-skl:          [FAIL][35] ([fdo#103184] / [fdo#103232]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-skl3/igt@kms_draw_crc@draw-method-xrgb2101010-mmap-gtt-ytiled.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-skl10/igt@kms_draw_crc@draw-method-xrgb2101010-mmap-gtt-ytiled.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-kbl:          [DMESG-WARN][37] ([fdo#108566]) -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-kbl4/igt@kms_flip@flip-vs-suspend.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-kbl6/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt:
    - shard-iclb:         [FAIL][39] ([fdo#103167]) -> [PASS][40] +8 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt.html

  * igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-pwrite:
    - shard-hsw:          [SKIP][41] ([fdo#109271]) -> [PASS][42] +20 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-hsw1/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-pwrite.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-hsw5/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-cur-indfb-draw-pwrite.html

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
    - shard-skl:          [INCOMPLETE][43] ([fdo#104108]) -> [PASS][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-skl9/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-skl9/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [SKIP][45] ([fdo#109441]) -> [PASS][46] +2 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-iclb5/igt@kms_psr@psr2_primary_mmap_cpu.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html

  
#### Warnings ####

  * igt@gem_workarounds@suspend-resume-context:
    - shard-kbl:          [DMESG-WARN][47] ([fdo#108566]) -> [INCOMPLETE][48] ([fdo#103665])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-kbl4/igt@gem_workarounds@suspend-resume-context.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-kbl6/igt@gem_workarounds@suspend-resume-context.html

  * igt@i915_pm_rpm@modeset-pc8-residency-stress:
    - shard-iclb:         [INCOMPLETE][49] ([fdo#107713] / [fdo#108840]) -> [SKIP][50] ([fdo#109293])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6287/shard-iclb7/igt@i915_pm_rpm@modeset-pc8-residency-stress.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/shard-iclb8/igt@i915_pm_rpm@modeset-pc8-residency-stress.html

  
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103184]: https://bugs.freedesktop.org/show_bug.cgi?id=103184
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#108686]: https://bugs.freedesktop.org/show_bug.cgi?id=108686
  [fdo#108840]: https://bugs.freedesktop.org/show_bug.cgi?id=108840
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109293]: https://bugs.freedesktop.org/show_bug.cgi?id=109293
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110789]: https://bugs.freedesktop.org/show_bug.cgi?id=110789
  [fdo#110913 ]: https://bugs.freedesktop.org/show_bug.cgi?id=110913 


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_6287 -> Patchwork_13310

  CI_DRM_6287: 3765c2bb2bf60f35709fba4c23070e2b74e14247 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5059: 1f67ee0d09d6513f487f2be74aae9700e755258a @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13310: 8ff0438ea49ee84c9f62c31b04c293991f55ca44 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13310/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-06-18  7:28 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2019-06-17 11:20 [PATCH v2] drm/i915/gtt: Serialise both updates to PDE and our shadow Chris Wilson
2019-06-17 11:30 ` Matthew Auld
2019-06-17 13:55 ` Mika Kuoppala
2019-06-17 14:04 ` [PATCH v3] " Chris Wilson
2019-06-17 18:06 ` ✗ Fi.CI.SPARSE: warning for drm/i915/gtt: Serialise both updates to PDE and our shadow (rev2) Patchwork
2019-06-17 18:23 ` ✓ Fi.CI.BAT: success " Patchwork
2019-06-18  7:28 ` ✓ Fi.CI.IGT: " Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox