Intel-GFX Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel@ffwll.ch>
To: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com>,
	intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Subject: Re: [Intel-gfx] [PATCH v9 30/70] drm/i915: Fix pread/pwrite to work with new locking rules.
Date: Wed, 24 Mar 2021 15:45:06 +0100	[thread overview]
Message-ID: <YFtQcojSJxLTUbDH@phenom.ffwll.local> (raw)
In-Reply-To: <20210323155059.628690-31-maarten.lankhorst@linux.intel.com>

On Tue, Mar 23, 2021 at 04:50:19PM +0100, Maarten Lankhorst wrote:
> We are removing obj->mm.lock, and need to take the reservation lock
> before we can pin pages. Move the pinning pages into the helper, and
> merge gtt pwrite/pread preparation and cleanup paths.
> 
> The fence lock is also removed; it will conflict with fence annotations,
> because of memory allocations done when pagefaulting inside copy_*_user.
> 
> Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
> Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>

Same here, I picked the old version from there

https://lore.kernel.org/intel-gfx/20210128162612.927917-31-maarten.lankhorst@linux.intel.com/

because too much conflicts with this version here.
-Daniel

> ---
>  drivers/gpu/drm/i915/Makefile              |   1 -
>  drivers/gpu/drm/i915/gem/i915_gem_fence.c  |  95 ---------
>  drivers/gpu/drm/i915/gem/i915_gem_object.h |   5 -
>  drivers/gpu/drm/i915/i915_gem.c            | 215 +++++++++++----------
>  4 files changed, 112 insertions(+), 204 deletions(-)
>  delete mode 100644 drivers/gpu/drm/i915/gem/i915_gem_fence.c
> 
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index 33c2100414a0..70a535798ef5 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -140,7 +140,6 @@ gem-y += \
>  	gem/i915_gem_dmabuf.o \
>  	gem/i915_gem_domain.o \
>  	gem/i915_gem_execbuffer.o \
> -	gem/i915_gem_fence.o \
>  	gem/i915_gem_internal.o \
>  	gem/i915_gem_object.o \
>  	gem/i915_gem_object_blt.o \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_fence.c b/drivers/gpu/drm/i915/gem/i915_gem_fence.c
> deleted file mode 100644
> index 8ab842c80f99..000000000000
> --- a/drivers/gpu/drm/i915/gem/i915_gem_fence.c
> +++ /dev/null
> @@ -1,95 +0,0 @@
> -/*
> - * SPDX-License-Identifier: MIT
> - *
> - * Copyright © 2019 Intel Corporation
> - */
> -
> -#include "i915_drv.h"
> -#include "i915_gem_object.h"
> -
> -struct stub_fence {
> -	struct dma_fence dma;
> -	struct i915_sw_fence chain;
> -};
> -
> -static int __i915_sw_fence_call
> -stub_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
> -{
> -	struct stub_fence *stub = container_of(fence, typeof(*stub), chain);
> -
> -	switch (state) {
> -	case FENCE_COMPLETE:
> -		dma_fence_signal(&stub->dma);
> -		break;
> -
> -	case FENCE_FREE:
> -		dma_fence_put(&stub->dma);
> -		break;
> -	}
> -
> -	return NOTIFY_DONE;
> -}
> -
> -static const char *stub_driver_name(struct dma_fence *fence)
> -{
> -	return DRIVER_NAME;
> -}
> -
> -static const char *stub_timeline_name(struct dma_fence *fence)
> -{
> -	return "object";
> -}
> -
> -static void stub_release(struct dma_fence *fence)
> -{
> -	struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
> -
> -	i915_sw_fence_fini(&stub->chain);
> -
> -	BUILD_BUG_ON(offsetof(typeof(*stub), dma));
> -	dma_fence_free(&stub->dma);
> -}
> -
> -static const struct dma_fence_ops stub_fence_ops = {
> -	.get_driver_name = stub_driver_name,
> -	.get_timeline_name = stub_timeline_name,
> -	.release = stub_release,
> -};
> -
> -struct dma_fence *
> -i915_gem_object_lock_fence(struct drm_i915_gem_object *obj)
> -{
> -	struct stub_fence *stub;
> -
> -	assert_object_held(obj);
> -
> -	stub = kmalloc(sizeof(*stub), GFP_KERNEL);
> -	if (!stub)
> -		return NULL;
> -
> -	i915_sw_fence_init(&stub->chain, stub_notify);
> -	dma_fence_init(&stub->dma, &stub_fence_ops, &stub->chain.wait.lock,
> -		       0, 0);
> -
> -	if (i915_sw_fence_await_reservation(&stub->chain,
> -					    obj->base.resv, NULL, true,
> -					    i915_fence_timeout(to_i915(obj->base.dev)),
> -					    I915_FENCE_GFP) < 0)
> -		goto err;
> -
> -	dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
> -
> -	return &stub->dma;
> -
> -err:
> -	stub_release(&stub->dma);
> -	return NULL;
> -}
> -
> -void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
> -				  struct dma_fence *fence)
> -{
> -	struct stub_fence *stub = container_of(fence, typeof(*stub), dma);
> -
> -	i915_sw_fence_commit(&stub->chain);
> -}
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index fef0d62f3eb7..6c3f75adb53c 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -189,11 +189,6 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
>  	dma_resv_unlock(obj->base.resv);
>  }
>  
> -struct dma_fence *
> -i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
> -void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
> -				  struct dma_fence *fence);
> -
>  static inline void
>  i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
>  {
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 8373662e4b5f..eeb952889e4a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -204,7 +204,6 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
>  {
>  	unsigned int needs_clflush;
>  	unsigned int idx, offset;
> -	struct dma_fence *fence;
>  	char __user *user_data;
>  	u64 remain;
>  	int ret;
> @@ -213,19 +212,17 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
>  	if (ret)
>  		return ret;
>  
> +	ret = i915_gem_object_pin_pages(obj);
> +	if (ret)
> +		goto err_unlock;
> +
>  	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
> -	if (ret) {
> -		i915_gem_object_unlock(obj);
> -		return ret;
> -	}
> +	if (ret)
> +		goto err_unpin;
>  
> -	fence = i915_gem_object_lock_fence(obj);
>  	i915_gem_object_finish_access(obj);
>  	i915_gem_object_unlock(obj);
>  
> -	if (!fence)
> -		return -ENOMEM;
> -
>  	remain = args->size;
>  	user_data = u64_to_user_ptr(args->data_ptr);
>  	offset = offset_in_page(args->offset);
> @@ -243,7 +240,13 @@ i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
>  		offset = 0;
>  	}
>  
> -	i915_gem_object_unlock_fence(obj, fence);
> +	i915_gem_object_unpin_pages(obj);
> +	return ret;
> +
> +err_unpin:
> +	i915_gem_object_unpin_pages(obj);
> +err_unlock:
> +	i915_gem_object_unlock(obj);
>  	return ret;
>  }
>  
> @@ -271,48 +274,99 @@ gtt_user_read(struct io_mapping *mapping,
>  	return unwritten;
>  }
>  
> -static int
> -i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
> -		   const struct drm_i915_gem_pread *args)
> +static struct i915_vma *i915_gem_gtt_prepare(struct drm_i915_gem_object *obj,
> +					     struct drm_mm_node *node,
> +					     bool write)
>  {
>  	struct drm_i915_private *i915 = to_i915(obj->base.dev);
>  	struct i915_ggtt *ggtt = &i915->ggtt;
> -	intel_wakeref_t wakeref;
> -	struct drm_mm_node node;
> -	struct dma_fence *fence;
> -	void __user *user_data;
>  	struct i915_vma *vma;
> -	u64 remain, offset;
> +	struct i915_gem_ww_ctx ww;
>  	int ret;
>  
> -	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> +	i915_gem_ww_ctx_init(&ww, true);
> +retry:
>  	vma = ERR_PTR(-ENODEV);
> +	ret = i915_gem_object_lock(obj, &ww);
> +	if (ret)
> +		goto err_ww;
> +
> +	i915_gem_object_set_to_gtt_domain(obj, write);
>  	if (!i915_gem_object_is_tiled(obj))
> -		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
> -					       PIN_MAPPABLE |
> -					       PIN_NONBLOCK /* NOWARN */ |
> -					       PIN_NOEVICT);
> -	if (!IS_ERR(vma)) {
> -		node.start = i915_ggtt_offset(vma);
> -		node.flags = 0;
> +		vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
> +						  PIN_MAPPABLE |
> +						  PIN_NONBLOCK /* NOWARN */ |
> +						  PIN_NOEVICT);
> +	if (vma == ERR_PTR(-EDEADLK)) {
> +		ret = -EDEADLK;
> +		goto err_ww;
> +	} else if (!IS_ERR(vma)) {
> +		node->start = i915_ggtt_offset(vma);
> +		node->flags = 0;
>  	} else {
> -		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
> +		ret = insert_mappable_node(ggtt, node, PAGE_SIZE);
>  		if (ret)
> -			goto out_rpm;
> -		GEM_BUG_ON(!drm_mm_node_allocated(&node));
> +			goto err_ww;
> +		GEM_BUG_ON(!drm_mm_node_allocated(node));
> +		vma = NULL;
>  	}
>  
> -	ret = i915_gem_object_lock_interruptible(obj, NULL);
> -	if (ret)
> -		goto out_unpin;
> +	ret = i915_gem_object_pin_pages(obj);
> +	if (ret) {
> +		if (drm_mm_node_allocated(node)) {
> +			ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
> +			remove_mappable_node(ggtt, node);
> +		} else {
> +			i915_vma_unpin(vma);
> +		}
> +	}
> +
> +err_ww:
> +	if (ret == -EDEADLK) {
> +		ret = i915_gem_ww_ctx_backoff(&ww);
> +		if (!ret)
> +			goto retry;
> +	}
> +	i915_gem_ww_ctx_fini(&ww);
>  
> -	i915_gem_object_set_to_gtt_domain(obj, false);
> +	return ret ? ERR_PTR(ret) : vma;
> +}
>  
> -	fence = i915_gem_object_lock_fence(obj);
> -	i915_gem_object_unlock(obj);
> -	if (!fence) {
> -		ret = -ENOMEM;
> -		goto out_unpin;
> +static void i915_gem_gtt_cleanup(struct drm_i915_gem_object *obj,
> +				 struct drm_mm_node *node,
> +				 struct i915_vma *vma)
> +{
> +	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	struct i915_ggtt *ggtt = &i915->ggtt;
> +
> +	i915_gem_object_unpin_pages(obj);
> +	if (drm_mm_node_allocated(node)) {
> +		ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
> +		remove_mappable_node(ggtt, node);
> +	} else {
> +		i915_vma_unpin(vma);
> +	}
> +}
> +
> +static int
> +i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
> +		   const struct drm_i915_gem_pread *args)
> +{
> +	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	struct i915_ggtt *ggtt = &i915->ggtt;
> +	intel_wakeref_t wakeref;
> +	struct drm_mm_node node;
> +	void __user *user_data;
> +	struct i915_vma *vma;
> +	u64 remain, offset;
> +	int ret = 0;
> +
> +	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
> +
> +	vma = i915_gem_gtt_prepare(obj, &node, false);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
> +		goto out_rpm;
>  	}
>  
>  	user_data = u64_to_user_ptr(args->data_ptr);
> @@ -349,14 +403,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
>  		offset += page_length;
>  	}
>  
> -	i915_gem_object_unlock_fence(obj, fence);
> -out_unpin:
> -	if (drm_mm_node_allocated(&node)) {
> -		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
> -		remove_mappable_node(ggtt, &node);
> -	} else {
> -		i915_vma_unpin(vma);
> -	}
> +	i915_gem_gtt_cleanup(obj, &node, vma);
>  out_rpm:
>  	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
>  	return ret;
> @@ -421,15 +468,10 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
>  	if (ret)
>  		goto out;
>  
> -	ret = i915_gem_object_pin_pages(obj);
> -	if (ret)
> -		goto out;
> -
>  	ret = i915_gem_shmem_pread(obj, args);
>  	if (ret == -EFAULT || ret == -ENODEV)
>  		ret = i915_gem_gtt_pread(obj, args);
>  
> -	i915_gem_object_unpin_pages(obj);
>  out:
>  	i915_gem_object_put(obj);
>  	return ret;
> @@ -477,11 +519,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  	struct intel_runtime_pm *rpm = &i915->runtime_pm;
>  	intel_wakeref_t wakeref;
>  	struct drm_mm_node node;
> -	struct dma_fence *fence;
>  	struct i915_vma *vma;
>  	u64 remain, offset;
>  	void __user *user_data;
> -	int ret;
> +	int ret = 0;
>  
>  	if (i915_gem_object_has_struct_page(obj)) {
>  		/*
> @@ -499,33 +540,10 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  		wakeref = intel_runtime_pm_get(rpm);
>  	}
>  
> -	vma = ERR_PTR(-ENODEV);
> -	if (!i915_gem_object_is_tiled(obj))
> -		vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
> -					       PIN_MAPPABLE |
> -					       PIN_NONBLOCK /* NOWARN */ |
> -					       PIN_NOEVICT);
> -	if (!IS_ERR(vma)) {
> -		node.start = i915_ggtt_offset(vma);
> -		node.flags = 0;
> -	} else {
> -		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
> -		if (ret)
> -			goto out_rpm;
> -		GEM_BUG_ON(!drm_mm_node_allocated(&node));
> -	}
> -
> -	ret = i915_gem_object_lock_interruptible(obj, NULL);
> -	if (ret)
> -		goto out_unpin;
> -
> -	i915_gem_object_set_to_gtt_domain(obj, true);
> -
> -	fence = i915_gem_object_lock_fence(obj);
> -	i915_gem_object_unlock(obj);
> -	if (!fence) {
> -		ret = -ENOMEM;
> -		goto out_unpin;
> +	vma = i915_gem_gtt_prepare(obj, &node, true);
> +	if (IS_ERR(vma)) {
> +		ret = PTR_ERR(vma);
> +		goto out_rpm;
>  	}
>  
>  	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
> @@ -574,14 +592,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
>  	intel_gt_flush_ggtt_writes(ggtt->vm.gt);
>  	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
>  
> -	i915_gem_object_unlock_fence(obj, fence);
> -out_unpin:
> -	if (drm_mm_node_allocated(&node)) {
> -		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
> -		remove_mappable_node(ggtt, &node);
> -	} else {
> -		i915_vma_unpin(vma);
> -	}
> +	i915_gem_gtt_cleanup(obj, &node, vma);
>  out_rpm:
>  	intel_runtime_pm_put(rpm, wakeref);
>  	return ret;
> @@ -621,7 +632,6 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
>  	unsigned int partial_cacheline_write;
>  	unsigned int needs_clflush;
>  	unsigned int offset, idx;
> -	struct dma_fence *fence;
>  	void __user *user_data;
>  	u64 remain;
>  	int ret;
> @@ -630,19 +640,17 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
>  	if (ret)
>  		return ret;
>  
> +	ret = i915_gem_object_pin_pages(obj);
> +	if (ret)
> +		goto err_unlock;
> +
>  	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
> -	if (ret) {
> -		i915_gem_object_unlock(obj);
> -		return ret;
> -	}
> +	if (ret)
> +		goto err_unpin;
>  
> -	fence = i915_gem_object_lock_fence(obj);
>  	i915_gem_object_finish_access(obj);
>  	i915_gem_object_unlock(obj);
>  
> -	if (!fence)
> -		return -ENOMEM;
> -
>  	/* If we don't overwrite a cacheline completely we need to be
>  	 * careful to have up-to-date data by first clflushing. Don't
>  	 * overcomplicate things and flush the entire patch.
> @@ -670,8 +678,14 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
>  	}
>  
>  	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
> -	i915_gem_object_unlock_fence(obj, fence);
>  
> +	i915_gem_object_unpin_pages(obj);
> +	return ret;
> +
> +err_unpin:
> +	i915_gem_object_unpin_pages(obj);
> +err_unlock:
> +	i915_gem_object_unlock(obj);
>  	return ret;
>  }
>  
> @@ -735,10 +749,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
>  	if (ret)
>  		goto err;
>  
> -	ret = i915_gem_object_pin_pages(obj);
> -	if (ret)
> -		goto err;
> -
>  	ret = -EFAULT;
>  	/* We can only do the GTT pwrite on untiled buffers, as otherwise
>  	 * it would end up going through the fenced access, and we'll get
> @@ -759,7 +769,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
>  			ret = i915_gem_shmem_pwrite(obj, args);
>  	}
>  
> -	i915_gem_object_unpin_pages(obj);
>  err:
>  	i915_gem_object_put(obj);
>  	return ret;
> -- 
> 2.31.0
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2021-03-24 14:45 UTC|newest]

Thread overview: 108+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-23 15:49 [Intel-gfx] [PATCH v9 00/70] drm/i915: Remove obj->mm.lock! Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 01/70] drm/i915: Do not share hwsp across contexts any more, v8 Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 02/70] drm/i915: Pin timeline map after first timeline pin, v4 Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 03/70] drm/i915: Move cmd parser pinning to execbuffer Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 04/70] drm/i915: Add missing -EDEADLK handling to execbuf pinning, v2 Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 05/70] drm/i915: Ensure we hold the object mutex in pin correctly Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 06/70] drm/i915: Add gem object locking to madvise Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 07/70] drm/i915: Move HAS_STRUCT_PAGE to obj->flags Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 08/70] drm/i915: Rework struct phys attachment handling Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 09/70] drm/i915: Convert i915_gem_object_attach_phys() to ww locking, v2 Maarten Lankhorst
2021-03-23 15:49 ` [Intel-gfx] [PATCH v9 10/70] drm/i915: make lockdep slightly happier about execbuf Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 11/70] drm/i915: Disable userptr pread/pwrite support Maarten Lankhorst
2021-03-24 13:57   ` Jason Ekstrand
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 12/70] drm/i915: No longer allow exporting userptr through dma-buf Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 13/70] drm/i915: Reject more ioctls for userptr, v2 Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 14/70] drm/i915: Reject UNSYNCHRONIZED " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 15/70] drm/i915: Make compilation of userptr code depend on MMU_NOTIFIER Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 16/70] drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v7 Maarten Lankhorst
2021-03-24 11:28   ` Daniel Vetter
2021-03-24 11:34     ` Thomas Hellström (Intel)
2021-03-25  9:23       ` [Intel-gfx] [PATCH] drm/i915: Fix userptr so we do not have to worry about obj->mm.lock, v8 Maarten Lankhorst
2021-03-25  9:55         ` Thomas Hellström (Intel)
2021-03-25 10:27           ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 17/70] drm/i915: Flatten obj->mm.lock Maarten Lankhorst
2021-03-24 11:13   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 18/70] drm/i915: Populate logical context during first pin Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 19/70] drm/i915: Make ring submission compatible with obj->mm.lock removal, v2 Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 20/70] drm/i915: Handle ww locking in init_status_page Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 21/70] drm/i915: Rework clflush to work correctly without obj->mm.lock Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 22/70] drm/i915: Pass ww ctx to intel_pin_to_display_plane Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 23/70] drm/i915: Add object locking to vm_fault_cpu Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 24/70] drm/i915: Move pinning to inside engine_wa_list_verify() Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 25/70] drm/i915: Take reservation lock around i915_vma_pin Maarten Lankhorst
2021-03-24 12:35   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 26/70] drm/i915: Make lrc_init_wa_ctx compatible with ww locking, v3 Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 27/70] drm/i915: Make __engine_unpark() compatible with ww locking Maarten Lankhorst
2021-03-24 12:37   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 28/70] drm/i915: Take obj lock around set_domain ioctl Maarten Lankhorst
2021-03-24 14:12   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 29/70] drm/i915: Defer pin calls in buffer pool until first use by caller Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 30/70] drm/i915: Fix pread/pwrite to work with new locking rules Maarten Lankhorst
2021-03-24 14:45   ` Daniel Vetter [this message]
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 31/70] drm/i915: Fix workarounds selftest, part 1 Maarten Lankhorst
2021-03-24 16:16   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 32/70] drm/i915: Prepare for obj->mm.lock removal, v2 Maarten Lankhorst
2021-03-23 16:18   ` Matthew Auld
2021-03-23 20:25     ` Thomas Hellström
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 33/70] drm/i915: Add igt_spinner_pin() to allow for ww locking around spinner Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 34/70] drm/i915: Add ww locking around vm_access() Maarten Lankhorst
2021-03-24 16:21   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 35/70] drm/i915: Increase ww locking for perf Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 36/70] drm/i915: Lock ww in ucode objects correctly Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 37/70] drm/i915: Add ww locking to dma-buf ops, v2 Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 38/70] drm/i915: Add missing ww lock in intel_dsb_prepare Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 39/70] drm/i915: Fix ww locking in shmem_create_from_object Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 40/70] drm/i915: Use a single page table lock for each gtt Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 41/70] drm/i915/selftests: Prepare huge_pages testcases for obj->mm.lock removal Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 42/70] drm/i915/selftests: Prepare client blit " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 43/70] drm/i915/selftests: Prepare coherency tests " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 44/70] drm/i915/selftests: Prepare context " Maarten Lankhorst
2021-03-24 16:40   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 45/70] drm/i915/selftests: Prepare dma-buf " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 46/70] drm/i915/selftests: Prepare execbuf " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 47/70] drm/i915/selftests: Prepare mman testcases " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 48/70] drm/i915/selftests: Prepare object tests " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 49/70] drm/i915/selftests: Prepare object blit " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 50/70] drm/i915/selftests: Prepare igt_gem_utils " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 51/70] drm/i915/selftests: Prepare context selftest " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 52/70] drm/i915/selftests: Prepare hangcheck " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 53/70] drm/i915/selftests: Prepare execlists and lrc selftests " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 54/70] drm/i915/selftests: Prepare mocs tests " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 55/70] drm/i915/selftests: Prepare ring submission " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 56/70] drm/i915/selftests: Prepare timeline tests " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 57/70] drm/i915/selftests: Prepare i915_request " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 58/70] drm/i915/selftests: Prepare memory region " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 59/70] drm/i915/selftests: Prepare cs engine " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 60/70] drm/i915/selftests: Prepare gtt " Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 61/70] drm/i915: Finally remove obj->mm.lock Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 62/70] drm/i915: Keep userpointer bindings if seqcount is unchanged, v2 Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 63/70] drm/i915: Move gt_revoke() slightly Maarten Lankhorst
2021-03-24 17:00   ` Daniel Vetter
2021-03-24 17:15     ` Ville Syrjälä
2021-03-24 17:16       ` Daniel Vetter
2021-03-24 17:58         ` Ville Syrjälä
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 64/70] drm/i915: Add missing -EDEADLK path in execbuffer ggtt pinning Maarten Lankhorst
2021-03-24 17:05   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 65/70] drm/i915: Fix pin_map in scheduler selftests Maarten Lankhorst
2021-03-24 17:14   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 66/70] drm/i915: Add ww parameter to get_pages() callback Maarten Lankhorst
2021-03-24 17:20   ` Daniel Vetter
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 67/70] drm/i915: Add ww context to prepare_(read/write) Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 68/70] drm/i915: Pass ww ctx to pin_map Maarten Lankhorst
2021-03-23 17:30   ` Matthew Auld
2021-03-24  9:31     ` Maarten Lankhorst
2021-03-24 10:11       ` Daniel Vetter
2021-03-24 11:54         ` [Intel-gfx] [PATCH] drm/i915: Pass ww ctx to pin_map, v2 Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 69/70] drm/i915: Pass ww ctx to i915_gem_object_pin_pages Maarten Lankhorst
2021-03-23 15:50 ` [Intel-gfx] [PATCH v9 70/70] drm/i915: Remove asynchronous vma binding Maarten Lankhorst
2021-03-24 17:19   ` Daniel Vetter
2021-03-23 16:07 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Remove obj->mm.lock! (rev18) Patchwork
2021-03-23 16:09 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-03-23 16:12 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2021-03-23 16:35 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2021-03-24 12:44 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Remove obj->mm.lock! (rev19) Patchwork
2021-03-24 12:46 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-03-24 12:49 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2021-03-24 13:13 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2021-03-25 21:28 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/i915: Remove obj->mm.lock! (rev20) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YFtQcojSJxLTUbDH@phenom.ffwll.local \
    --to=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=thomas.hellstrom@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox