intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Mika Kuoppala <mika.kuoppala@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 29/40] drm/i915: Move GEM object waiting to its own file
Date: Fri, 10 May 2019 17:17:12 +0300	[thread overview]
Message-ID: <87bm0a8kd3.fsf@gaia.fi.intel.com> (raw)
In-Reply-To: <20190508080704.24223-29-chris@chris-wilson.co.uk>

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Continuing the decluttering of i915_gem.c by moving the object wait
> decomposition into its own file.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/Makefile              |   1 +
>  drivers/gpu/drm/i915/gem/i915_gem_object.h |   8 +
>  drivers/gpu/drm/i915/gem/i915_gem_wait.c   | 277 +++++++++++++++++++++
>  drivers/gpu/drm/i915/i915_drv.h            |   7 -
>  drivers/gpu/drm/i915/i915_gem.c            | 254 -------------------
>  drivers/gpu/drm/i915/i915_utils.h          |  10 -
>  6 files changed, 286 insertions(+), 271 deletions(-)
>  create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_wait.c
>
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index e5348c355987..a4cc2f7f9bc6 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -105,6 +105,7 @@ gem-y += \
>  	gem/i915_gem_stolen.o \
>  	gem/i915_gem_tiling.o \
>  	gem/i915_gem_userptr.o \
> +	gem/i915_gem_wait.o \
>  	gem/i915_gemfs.o
>  i915-y += \
>  	  $(gem-y) \
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index 509d145d808a..23bca003fbfb 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -436,4 +436,12 @@ static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
>  		obj->cache_dirty = true;
>  }
>  
> +int i915_gem_object_wait(struct drm_i915_gem_object *obj,
> +			 unsigned int flags,
> +			 long timeout);
> +int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> +				  unsigned int flags,
> +				  const struct i915_sched_attr *attr);
> +#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
> +
>  #endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> new file mode 100644
> index 000000000000..fed5c751ef37
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
> @@ -0,0 +1,277 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2016 Intel Corporation
> + */
> +
> +#include <linux/dma-fence-array.h>
> +#include <linux/jiffies.h>
> +
> +#include "gt/intel_engine.h"
> +
> +#include "i915_gem_ioctls.h"
> +#include "i915_gem_object.h"
> +
> +static long
> +i915_gem_object_wait_fence(struct dma_fence *fence,
> +			   unsigned int flags,
> +			   long timeout)
> +{
> +	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
> +
> +	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
> +		return timeout;
> +
> +	if (dma_fence_is_i915(fence))
> +		return i915_request_wait(to_request(fence), flags, timeout);
> +
> +	return dma_fence_wait_timeout(fence,
> +				      flags & I915_WAIT_INTERRUPTIBLE,
> +				      timeout);
> +}
> +
> +static long
> +i915_gem_object_wait_reservation(struct reservation_object *resv,
> +				 unsigned int flags,
> +				 long timeout)
> +{
> +	unsigned int seq = __read_seqcount_begin(&resv->seq);
> +	struct dma_fence *excl;
> +	bool prune_fences = false;
> +
> +	if (flags & I915_WAIT_ALL) {
> +		struct dma_fence **shared;
> +		unsigned int count, i;
> +		int ret;
> +
> +		ret = reservation_object_get_fences_rcu(resv,
> +							&excl, &count, &shared);
> +		if (ret)
> +			return ret;
> +
> +		for (i = 0; i < count; i++) {
> +			timeout = i915_gem_object_wait_fence(shared[i],
> +							     flags, timeout);
> +			if (timeout < 0)
> +				break;
> +
> +			dma_fence_put(shared[i]);
> +		}
> +
> +		for (; i < count; i++)
> +			dma_fence_put(shared[i]);
> +		kfree(shared);
> +
> +		/*
> +		 * If both shared fences and an exclusive fence exist,
> +		 * then by construction the shared fences must be later
> +		 * than the exclusive fence. If we successfully wait for
> +		 * all the shared fences, we know that the exclusive fence
> +		 * must all be signaled. If all the shared fences are
> +		 * signaled, we can prune the array and recover the
> +		 * floating references on the fences/requests.
> +		 */
> +		prune_fences = count && timeout >= 0;
> +	} else {
> +		excl = reservation_object_get_excl_rcu(resv);
> +	}
> +
> +	if (excl && timeout >= 0)
> +		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
> +
> +	dma_fence_put(excl);
> +
> +	/*
> +	 * Opportunistically prune the fences iff we know they have *all* been
> +	 * signaled and that the reservation object has not been changed (i.e.
> +	 * no new fences have been added).
> +	 */
> +	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
> +		if (reservation_object_trylock(resv)) {
> +			if (!__read_seqcount_retry(&resv->seq, seq))
> +				reservation_object_add_excl_fence(resv, NULL);
> +			reservation_object_unlock(resv);
> +		}
> +	}
> +
> +	return timeout;
> +}
> +
> +static void __fence_set_priority(struct dma_fence *fence,
> +				 const struct i915_sched_attr *attr)
> +{
> +	struct i915_request *rq;
> +	struct intel_engine_cs *engine;
> +
> +	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
> +		return;
> +
> +	rq = to_request(fence);
> +	engine = rq->engine;
> +
> +	local_bh_disable();
> +	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
> +	if (engine->schedule)
> +		engine->schedule(rq, attr);
> +	rcu_read_unlock();
> +	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
> +}
> +
> +static void fence_set_priority(struct dma_fence *fence,
> +			       const struct i915_sched_attr *attr)
> +{
> +	/* Recurse once into a fence-array */
> +	if (dma_fence_is_array(fence)) {
> +		struct dma_fence_array *array = to_dma_fence_array(fence);
> +		int i;
> +
> +		for (i = 0; i < array->num_fences; i++)
> +			__fence_set_priority(array->fences[i], attr);
> +	} else {
> +		__fence_set_priority(fence, attr);
> +	}
> +}
> +
> +int
> +i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> +			      unsigned int flags,
> +			      const struct i915_sched_attr *attr)
> +{
> +	struct dma_fence *excl;
> +
> +	if (flags & I915_WAIT_ALL) {
> +		struct dma_fence **shared;
> +		unsigned int count, i;
> +		int ret;
> +
> +		ret = reservation_object_get_fences_rcu(obj->resv,
> +							&excl, &count, &shared);
> +		if (ret)
> +			return ret;
> +
> +		for (i = 0; i < count; i++) {
> +			fence_set_priority(shared[i], attr);
> +			dma_fence_put(shared[i]);
> +		}
> +
> +		kfree(shared);
> +	} else {
> +		excl = reservation_object_get_excl_rcu(obj->resv);
> +	}
> +
> +	if (excl) {
> +		fence_set_priority(excl, attr);
> +		dma_fence_put(excl);
> +	}
> +	return 0;
> +}
> +
> +/**
> + * Waits for rendering to the object to be completed
> + * @obj: i915 gem object
> + * @flags: how to wait (under a lock, for all rendering or just for writes etc)
> + * @timeout: how long to wait
> + */
> +int
> +i915_gem_object_wait(struct drm_i915_gem_object *obj,
> +		     unsigned int flags,
> +		     long timeout)
> +{
> +	might_sleep();
> +	GEM_BUG_ON(timeout < 0);
> +
> +	timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
> +	return timeout < 0 ? timeout : 0;
> +}
> +
> +static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
> +{
> +	/* nsecs_to_jiffies64() does not guard against overflow */
> +	if (NSEC_PER_SEC % HZ &&
> +	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
> +		return MAX_JIFFY_OFFSET;
> +
> +	return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
> +}
> +
> +static unsigned long to_wait_timeout(s64 timeout_ns)
> +{
> +	if (timeout_ns < 0)
> +		return MAX_SCHEDULE_TIMEOUT;
> +
> +	if (timeout_ns == 0)
> +		return 0;
> +
> +	return nsecs_to_jiffies_timeout(timeout_ns);
> +}
> +
> +/**
> + * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
> + * @dev: drm device pointer
> + * @data: ioctl data blob
> + * @file: drm file pointer
> + *
> + * Returns 0 if successful, else an error is returned with the remaining time in
> + * the timeout parameter.
> + *  -ETIME: object is still busy after timeout
> + *  -ERESTARTSYS: signal interrupted the wait
> + *  -ENONENT: object doesn't exist
> + * Also possible, but rare:
> + *  -EAGAIN: incomplete, restart syscall
> + *  -ENOMEM: damn
> + *  -ENODEV: Internal IRQ fail
> + *  -E?: The add request failed
> + *
> + * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
> + * non-zero timeout parameter the wait ioctl will wait for the given number of
> + * nanoseconds on an object becoming unbusy. Since the wait itself does so
> + * without holding struct_mutex the object may become re-busied before this
> + * function completes. A similar but shorter * race condition exists in the busy
> + * ioctl
> + */
> +int
> +i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> +{
> +	struct drm_i915_gem_wait *args = data;
> +	struct drm_i915_gem_object *obj;
> +	ktime_t start;
> +	long ret;
> +
> +	if (args->flags != 0)
> +		return -EINVAL;
> +
> +	obj = i915_gem_object_lookup(file, args->bo_handle);
> +	if (!obj)
> +		return -ENOENT;
> +
> +	start = ktime_get();
> +
> +	ret = i915_gem_object_wait(obj,
> +				   I915_WAIT_INTERRUPTIBLE |
> +				   I915_WAIT_PRIORITY |
> +				   I915_WAIT_ALL,
> +				   to_wait_timeout(args->timeout_ns));
> +
> +	if (args->timeout_ns > 0) {
> +		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
> +		if (args->timeout_ns < 0)
> +			args->timeout_ns = 0;
> +
> +		/*
> +		 * Apparently ktime isn't accurate enough and occasionally has a
> +		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
> +		 * things up to make the test happy. We allow up to 1 jiffy.
> +		 *
> +		 * This is a regression from the timespec->ktime conversion.
> +		 */
> +		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
> +			args->timeout_ns = 0;
> +
> +		/* Asked to wait beyond the jiffie/scheduler precision? */
> +		if (ret == -ETIME && args->timeout_ns)
> +			ret = -EAGAIN;
> +	}
> +
> +	i915_gem_object_put(obj);
> +	return ret;
> +}
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 6f8ddfbe7d85..8eb01b1b3e0e 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2742,13 +2742,6 @@ void i915_gem_suspend(struct drm_i915_private *dev_priv);
>  void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
>  void i915_gem_resume(struct drm_i915_private *dev_priv);
>  vm_fault_t i915_gem_fault(struct vm_fault *vmf);
> -int i915_gem_object_wait(struct drm_i915_gem_object *obj,
> -			 unsigned int flags,
> -			 long timeout);
> -int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> -				  unsigned int flags,
> -				  const struct i915_sched_attr *attr);
> -#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
>  
>  int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
>  void i915_gem_release(struct drm_device *dev, struct drm_file *file);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 32fdc1977afe..467273dd2d4a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -124,178 +124,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
>  	return ret;
>  }
>  
> -static long
> -i915_gem_object_wait_fence(struct dma_fence *fence,
> -			   unsigned int flags,
> -			   long timeout)
> -{
> -	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
> -
> -	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
> -		return timeout;
> -
> -	if (dma_fence_is_i915(fence))
> -		return i915_request_wait(to_request(fence), flags, timeout);
> -
> -	return dma_fence_wait_timeout(fence,
> -				      flags & I915_WAIT_INTERRUPTIBLE,
> -				      timeout);
> -}
> -
> -static long
> -i915_gem_object_wait_reservation(struct reservation_object *resv,
> -				 unsigned int flags,
> -				 long timeout)
> -{
> -	unsigned int seq = __read_seqcount_begin(&resv->seq);
> -	struct dma_fence *excl;
> -	bool prune_fences = false;
> -
> -	if (flags & I915_WAIT_ALL) {
> -		struct dma_fence **shared;
> -		unsigned int count, i;
> -		int ret;
> -
> -		ret = reservation_object_get_fences_rcu(resv,
> -							&excl, &count, &shared);
> -		if (ret)
> -			return ret;
> -
> -		for (i = 0; i < count; i++) {
> -			timeout = i915_gem_object_wait_fence(shared[i],
> -							     flags, timeout);
> -			if (timeout < 0)
> -				break;
> -
> -			dma_fence_put(shared[i]);
> -		}
> -
> -		for (; i < count; i++)
> -			dma_fence_put(shared[i]);
> -		kfree(shared);
> -
> -		/*
> -		 * If both shared fences and an exclusive fence exist,
> -		 * then by construction the shared fences must be later
> -		 * than the exclusive fence. If we successfully wait for
> -		 * all the shared fences, we know that the exclusive fence
> -		 * must all be signaled. If all the shared fences are
> -		 * signaled, we can prune the array and recover the
> -		 * floating references on the fences/requests.
> -		 */
> -		prune_fences = count && timeout >= 0;
> -	} else {
> -		excl = reservation_object_get_excl_rcu(resv);
> -	}
> -
> -	if (excl && timeout >= 0)
> -		timeout = i915_gem_object_wait_fence(excl, flags, timeout);
> -
> -	dma_fence_put(excl);
> -
> -	/*
> -	 * Opportunistically prune the fences iff we know they have *all* been
> -	 * signaled and that the reservation object has not been changed (i.e.
> -	 * no new fences have been added).
> -	 */
> -	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
> -		if (reservation_object_trylock(resv)) {
> -			if (!__read_seqcount_retry(&resv->seq, seq))
> -				reservation_object_add_excl_fence(resv, NULL);
> -			reservation_object_unlock(resv);
> -		}
> -	}
> -
> -	return timeout;
> -}
> -
> -static void __fence_set_priority(struct dma_fence *fence,
> -				 const struct i915_sched_attr *attr)
> -{
> -	struct i915_request *rq;
> -	struct intel_engine_cs *engine;
> -
> -	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
> -		return;
> -
> -	rq = to_request(fence);
> -	engine = rq->engine;
> -
> -	local_bh_disable();
> -	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
> -	if (engine->schedule)
> -		engine->schedule(rq, attr);
> -	rcu_read_unlock();
> -	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
> -}
> -
> -static void fence_set_priority(struct dma_fence *fence,
> -			       const struct i915_sched_attr *attr)
> -{
> -	/* Recurse once into a fence-array */
> -	if (dma_fence_is_array(fence)) {
> -		struct dma_fence_array *array = to_dma_fence_array(fence);
> -		int i;
> -
> -		for (i = 0; i < array->num_fences; i++)
> -			__fence_set_priority(array->fences[i], attr);
> -	} else {
> -		__fence_set_priority(fence, attr);
> -	}
> -}
> -
> -int
> -i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
> -			      unsigned int flags,
> -			      const struct i915_sched_attr *attr)
> -{
> -	struct dma_fence *excl;
> -
> -	if (flags & I915_WAIT_ALL) {
> -		struct dma_fence **shared;
> -		unsigned int count, i;
> -		int ret;
> -
> -		ret = reservation_object_get_fences_rcu(obj->resv,
> -							&excl, &count, &shared);
> -		if (ret)
> -			return ret;
> -
> -		for (i = 0; i < count; i++) {
> -			fence_set_priority(shared[i], attr);
> -			dma_fence_put(shared[i]);
> -		}
> -
> -		kfree(shared);
> -	} else {
> -		excl = reservation_object_get_excl_rcu(obj->resv);
> -	}
> -
> -	if (excl) {
> -		fence_set_priority(excl, attr);
> -		dma_fence_put(excl);
> -	}
> -	return 0;
> -}
> -
> -/**
> - * Waits for rendering to the object to be completed
> - * @obj: i915 gem object
> - * @flags: how to wait (under a lock, for all rendering or just for writes etc)
> - * @timeout: how long to wait
> - */
> -int
> -i915_gem_object_wait(struct drm_i915_gem_object *obj,
> -		     unsigned int flags,
> -		     long timeout)
> -{
> -	might_sleep();
> -	GEM_BUG_ON(timeout < 0);
> -
> -	timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
> -	return timeout < 0 ? timeout : 0;
> -}
> -
>  static int
>  i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
>  		     struct drm_i915_gem_pwrite *args,
> @@ -1073,88 +901,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
>  	}
>  }
>  
> -static unsigned long to_wait_timeout(s64 timeout_ns)
> -{
> -	if (timeout_ns < 0)
> -		return MAX_SCHEDULE_TIMEOUT;
> -
> -	if (timeout_ns == 0)
> -		return 0;
> -
> -	return nsecs_to_jiffies_timeout(timeout_ns);
> -}
> -
> -/**
> - * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
> - * @dev: drm device pointer
> - * @data: ioctl data blob
> - * @file: drm file pointer
> - *
> - * Returns 0 if successful, else an error is returned with the remaining time in
> - * the timeout parameter.
> - *  -ETIME: object is still busy after timeout
> - *  -ERESTARTSYS: signal interrupted the wait
> - *  -ENONENT: object doesn't exist
> - * Also possible, but rare:
> - *  -EAGAIN: incomplete, restart syscall
> - *  -ENOMEM: damn
> - *  -ENODEV: Internal IRQ fail
> - *  -E?: The add request failed
> - *
> - * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
> - * non-zero timeout parameter the wait ioctl will wait for the given number of
> - * nanoseconds on an object becoming unbusy. Since the wait itself does so
> - * without holding struct_mutex the object may become re-busied before this
> - * function completes. A similar but shorter * race condition exists in the busy
> - * ioctl
> - */
> -int
> -i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
> -{
> -	struct drm_i915_gem_wait *args = data;
> -	struct drm_i915_gem_object *obj;
> -	ktime_t start;
> -	long ret;
> -
> -	if (args->flags != 0)
> -		return -EINVAL;
> -
> -	obj = i915_gem_object_lookup(file, args->bo_handle);
> -	if (!obj)
> -		return -ENOENT;
> -
> -	start = ktime_get();
> -
> -	ret = i915_gem_object_wait(obj,
> -				   I915_WAIT_INTERRUPTIBLE |
> -				   I915_WAIT_PRIORITY |
> -				   I915_WAIT_ALL,
> -				   to_wait_timeout(args->timeout_ns));
> -
> -	if (args->timeout_ns > 0) {
> -		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
> -		if (args->timeout_ns < 0)
> -			args->timeout_ns = 0;
> -
> -		/*
> -		 * Apparently ktime isn't accurate enough and occasionally has a
> -		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
> -		 * things up to make the test happy. We allow up to 1 jiffy.
> -		 *
> -		 * This is a regression from the timespec->ktime conversion.
> -		 */
> -		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
> -			args->timeout_ns = 0;
> -
> -		/* Asked to wait beyond the jiffie/scheduler precision? */
> -		if (ret == -ETIME && args->timeout_ns)
> -			ret = -EAGAIN;
> -	}
> -
> -	i915_gem_object_put(obj);
> -	return ret;
> -}
> -
>  static int wait_for_engines(struct drm_i915_private *i915)
>  {
>  	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
> diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
> index edfc69acdaac..9911f53382a5 100644
> --- a/drivers/gpu/drm/i915/i915_utils.h
> +++ b/drivers/gpu/drm/i915/i915_utils.h
> @@ -218,16 +218,6 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
>  	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
>  }
>  
> -static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
> -{
> -	/* nsecs_to_jiffies64() does not guard against overflow */
> -	if (NSEC_PER_SEC % HZ &&
> -	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
> -		return MAX_JIFFY_OFFSET;
> -
> -        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
> -}

Seems that the wait stuff was only user so jiffiying the timeout. Just looks
generic for other usage too.

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-05-10 14:17 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-08  8:06 [PATCH 01/40] drm/i915/hangcheck: Replace hangcheck.seqno with RING_HEAD Chris Wilson
2019-05-08  8:06 ` [PATCH 02/40] drm/i915: Rearrange i915_scheduler.c Chris Wilson
2019-05-08  8:06 ` [PATCH 03/40] drm/i915: Pass i915_sched_node around internally Chris Wilson
2019-05-08 10:15   ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 04/40] drm/i915: Check for no-op priority changes first Chris Wilson
2019-05-08 10:16   ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 05/40] drm/i915: Bump signaler priority on adding a waiter Chris Wilson
2019-05-08  8:06 ` [PATCH 06/40] drm/i915: Convert inconsistent static engine tables into an init error Chris Wilson
2019-05-08  8:06 ` [PATCH 07/40] drm/i915: Seal races between async GPU cancellation, retirement and signaling Chris Wilson
2019-05-08 10:21   ` Tvrtko Ursulin
2019-05-08 11:24   ` [PATCH] " Chris Wilson
2019-05-08 11:50     ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 08/40] dma-fence: Refactor signaling for manual invocation Chris Wilson
2019-05-08 11:25   ` [PATCH] " Chris Wilson
2019-05-08 11:52     ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 09/40] drm/i915: Restore control over ppgtt for context creation ABI Chris Wilson
2019-05-08 10:24   ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 10/40] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-05-08  8:06 ` [PATCH 11/40] drm/i915: Extend I915_CONTEXT_PARAM_SSEU to support local ctx->engine[] Chris Wilson
2019-05-08 10:25   ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 12/40] drm/i915: Re-expose SINGLE_TIMELINE flags for context creation Chris Wilson
2019-05-08 10:26   ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 13/40] drm/i915: Allow userspace to clone contexts on creation Chris Wilson
2019-05-08  8:06 ` [PATCH 14/40] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-05-08 10:29   ` Tvrtko Ursulin
2019-05-08 11:17     ` Chris Wilson
2019-05-08 11:36       ` Tvrtko Ursulin
2019-05-08 11:23   ` [PATCH] " Chris Wilson
2019-05-08 11:35     ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 15/40] drm/i915: Apply an execution_mask to the virtual_engine Chris Wilson
2019-05-08 10:13   ` Tvrtko Ursulin
2019-05-08  8:06 ` [PATCH 16/40] drm/i915: Extend execution fence to support a callback Chris Wilson
2019-05-08  8:06 ` [PATCH 17/40] drm/i915/execlists: Virtual engine bonding Chris Wilson
2019-05-08  8:06 ` [PATCH 18/40] drm/i915: Allow specification of parallel execbuf Chris Wilson
2019-05-08  8:06 ` [PATCH 19/40] drm/i915: Split GEM object type definition to its own header Chris Wilson
2019-05-08  8:06 ` [PATCH 20/40] drm/i915: Pull GEM ioctls interface to its own file Chris Wilson
2019-05-08  8:06 ` [PATCH 21/40] drm/i915: Move object->pages API to i915_gem_object.[ch] Chris Wilson
2019-05-08  8:06 ` [PATCH 22/40] drm/i915: Move shmem object setup to its own file Chris Wilson
2019-05-08  8:06 ` [PATCH 23/40] drm/i915: Move phys objects " Chris Wilson
2019-05-08  8:06 ` [PATCH 24/40] drm/i915: Move mmap and friends " Chris Wilson
2019-05-08  8:06 ` [PATCH 25/40] drm/i915: Move GEM domain management " Chris Wilson
2019-05-08  8:06 ` [PATCH 26/40] drm/i915: Move more GEM objects under gem/ Chris Wilson
2019-05-08  8:06 ` [PATCH 27/40] drm/i915: Pull scatterlist utils out of i915_gem.h Chris Wilson
2019-05-08  8:06 ` [PATCH 28/40] drm/i915: Move GEM object domain management from struct_mutex to local Chris Wilson
2019-05-08  8:06 ` [PATCH 29/40] drm/i915: Move GEM object waiting to its own file Chris Wilson
2019-05-10 14:17   ` Mika Kuoppala [this message]
2019-05-10 14:33     ` Chris Wilson
2019-05-08  8:06 ` [PATCH 30/40] drm/i915: Move GEM object busy checking " Chris Wilson
2019-05-10 14:29   ` Mika Kuoppala
2019-05-08  8:06 ` [PATCH 31/40] drm/i915: Move GEM client throttling " Chris Wilson
2019-05-10 14:37   ` Mika Kuoppala
2019-05-08  8:06 ` [PATCH 32/40] drm/i915: Drop the deferred active reference Chris Wilson
2019-05-08  8:06 ` [PATCH 33/40] drm/i915: Move object close under its own lock Chris Wilson
2019-05-08  8:06 ` [PATCH 34/40] drm/i915: Rename intel_context.active to .inflight Chris Wilson
2019-05-10 14:44   ` Mika Kuoppala
2019-05-08  8:06 ` [PATCH 35/40] drm/i915: Keep contexts pinned until after the next kernel context switch Chris Wilson
2019-05-08  8:07 ` [PATCH 36/40] drm/i915: Stop retiring along engine Chris Wilson
2019-05-08  8:07 ` [PATCH 37/40] drm/i915: Replace engine->timeline with a plain list Chris Wilson
2019-05-08  8:07 ` [PATCH 38/40] drm/i915: Flush the execution-callbacks on retiring Chris Wilson
2019-05-08  8:07 ` [PATCH 39/40] drm/i915/execlists: Preempt-to-busy Chris Wilson
2019-05-08  8:07 ` [PATCH 40/40] drm/i915/execlists: Minimalistic timeslicing Chris Wilson
2019-05-08 10:47 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/40] drm/i915/hangcheck: Replace hangcheck.seqno with RING_HEAD Patchwork
2019-05-08 10:59 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-05-08 11:09 ` ✓ Fi.CI.BAT: success " Patchwork
2019-05-08 11:49 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/40] drm/i915/hangcheck: Replace hangcheck.seqno with RING_HEAD (rev4) Patchwork
2019-05-08 12:06 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-05-08 12:10 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-05-08 12:30 ` [PATCH 01/40] drm/i915/hangcheck: Replace hangcheck.seqno with RING_HEAD Mika Kuoppala
2019-05-08 12:40   ` Chris Wilson
2019-05-08 14:00     ` Mika Kuoppala
2019-05-08 14:10       ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87bm0a8kd3.fsf@gaia.fi.intel.com \
    --to=mika.kuoppala@linux.intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).