public inbox for intel-gfx@lists.freedesktop.org
 help / color / mirror / Atom feed
From: John Harrison <John.C.Harrison@Intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: Re: [PATCH 1/2] drm/i915: Split adding request to smaller functions
Date: Thu, 19 Feb 2015 16:54:33 +0000	[thread overview]
Message-ID: <54E61549.504@Intel.com> (raw)
In-Reply-To: <1424362735-10569-1-git-send-email-mika.kuoppala@intel.com>

Please note that a lot of the issues with _i915_add_request are cleaned 
up by my patch series to remove the outstanding_lazy_request. The add to 
client in some random client context is fixed, the messy execlist vs 
legacy ringbuf decisions are removed, the execlist vs legacy one-sided 
context reference is removed, ...

Also, I am in the process of converting the request structure to use 
struct fence which will possibly answer some of your locking concerns in 
the subsequent patch.

So can you hold of on merging these two patches at least until the dust 
has settled on the anti-OLR series?

Thanks.


On 19/02/2015 16:18, Mika Kuoppala wrote:
> Clean __i915_add_request by splitting request submission to
> preparation, actual submission and adding to client.
>
> While doing this we can remove the request->start which
> was not used.
>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_gem.c | 116 +++++++++++++++++++++++++++-------------
>   1 file changed, 78 insertions(+), 38 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 61134ab..06265e7 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2407,26 +2407,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
>   	return 0;
>   }
>   
> -int __i915_add_request(struct intel_engine_cs *ring,
> -		       struct drm_file *file,
> -		       struct drm_i915_gem_object *obj)
> +static struct intel_ringbuffer *
> +__request_to_ringbuf(struct drm_i915_gem_request *request)
> +{
> +	if (i915.enable_execlists)
> +		return request->ctx->engine[request->ring->id].ringbuf;
> +
> +	return request->ring->buffer;
> +}
> +
> +static struct drm_i915_gem_request *
> +i915_gem_request_prepare(struct intel_engine_cs *ring, struct drm_file *file)
>   {
> -	struct drm_i915_private *dev_priv = ring->dev->dev_private;
>   	struct drm_i915_gem_request *request;
>   	struct intel_ringbuffer *ringbuf;
> -	u32 request_start;
>   	int ret;
>   
>   	request = ring->outstanding_lazy_request;
>   	if (WARN_ON(request == NULL))
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>   
> -	if (i915.enable_execlists) {
> -		ringbuf = request->ctx->engine[ring->id].ringbuf;
> -	} else
> -		ringbuf = ring->buffer;
> +	/* execlist submission has this already set */
> +	if (!request->ctx)
> +		request->ctx = ring->last_context;
> +
> +	ringbuf = __request_to_ringbuf(request);
> +	if (WARN_ON(ringbuf == NULL))
> +		return ERR_PTR(-EIO);
>   
> -	request_start = intel_ring_get_tail(ringbuf);
>   	/*
>   	 * Emit any outstanding flushes - execbuf can fail to emit the flush
>   	 * after having emitted the batchbuffer command. Hence we need to fix
> @@ -2434,21 +2442,30 @@ int __i915_add_request(struct intel_engine_cs *ring,
>   	 * is that the flush _must_ happen before the next request, no matter
>   	 * what.
>   	 */
> -	if (i915.enable_execlists) {
> +	if (i915.enable_execlists)
>   		ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
> -		if (ret)
> -			return ret;
> -	} else {
> +	else
>   		ret = intel_ring_flush_all_caches(ring);
> -		if (ret)
> -			return ret;
> -	}
> +
> +	if (ret)
> +		return ERR_PTR(ret);
> +
> +	return request;
> +}
> +
> +static int i915_gem_request_submit(struct drm_i915_gem_request *request,
> +				   struct drm_i915_gem_object *batch)
> +{
> +	struct intel_ringbuffer *ringbuf = __request_to_ringbuf(request);
> +	struct intel_engine_cs *ring = request->ring;
> +	int ret;
>   
>   	/* Record the position of the start of the request so that
>   	 * should we detect the updated seqno part-way through the
>   	 * GPU processing the request, we never over-estimate the
>   	 * position of the head.
>   	 */
> +	request->batch_obj = batch;
>   	request->postfix = intel_ring_get_tail(ringbuf);
>   
>   	if (i915.enable_execlists) {
> @@ -2461,7 +2478,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
>   			return ret;
>   	}
>   
> -	request->head = request_start;
>   	request->tail = intel_ring_get_tail(ringbuf);
>   
>   	/* Whilst this request exists, batch_obj will be on the
> @@ -2470,35 +2486,59 @@ int __i915_add_request(struct intel_engine_cs *ring,
>   	 * inactive_list and lose its active reference. Hence we do not need
>   	 * to explicitly hold another reference here.
>   	 */
> -	request->batch_obj = obj;
>   
> -	if (!i915.enable_execlists) {
> -		/* Hold a reference to the current context so that we can inspect
> -		 * it later in case a hangcheck error event fires.
> +	if (!i915.enable_execlists && request->ctx) {
> +		/* Hold a reference to the current context so that we can
> +		 * inspect it later in case a hangcheck error event fires.
>   		 */
> -		request->ctx = ring->last_context;
> -		if (request->ctx)
> -			i915_gem_context_reference(request->ctx);
> +		i915_gem_context_reference(request->ctx);
>   	}
>   
>   	request->emitted_jiffies = jiffies;
> +
>   	list_add_tail(&request->list, &ring->request_list);
> -	request->file_priv = NULL;
> +	ring->outstanding_lazy_request = NULL;
>   
> -	if (file) {
> -		struct drm_i915_file_private *file_priv = file->driver_priv;
> +	trace_i915_gem_request_add(request);
>   
> -		spin_lock(&file_priv->mm.lock);
> -		request->file_priv = file_priv;
> -		list_add_tail(&request->client_list,
> -			      &file_priv->mm.request_list);
> -		spin_unlock(&file_priv->mm.lock);
> +	return 0;
> +}
>   
> -		request->pid = get_pid(task_pid(current));
> -	}
> +static void i915_gem_request_add_to_client(struct drm_i915_gem_request *request)
> +{
> +	struct drm_i915_file_private *file_priv;
>   
> -	trace_i915_gem_request_add(request);
> -	ring->outstanding_lazy_request = NULL;
> +	if (!request->file_priv)
> +		return;
> +
> +	file_priv = request->file_priv;
> +
> +	spin_lock(&file_priv->mm.lock);
> +	request->file_priv = file_priv;
> +	list_add_tail(&request->client_list,
> +		      &file_priv->mm.request_list);
> +	spin_unlock(&file_priv->mm.lock);
> +
> +	request->pid = get_pid(task_pid(current));
> +}
> +
> +int __i915_add_request(struct intel_engine_cs *ring,
> +		       struct drm_file *file,
> +		       struct drm_i915_gem_object *batch)
> +{
> +	struct drm_i915_private *dev_priv = ring->dev->dev_private;
> +	struct drm_i915_gem_request *request;
> +	int ret;
> +
> +	request = i915_gem_request_prepare(ring, file);
> +	if (IS_ERR(request))
> +		return PTR_ERR(request);
> +
> +	ret = i915_gem_request_submit(request, batch);
> +	if (ret)
> +		return ret;
> +
> +	i915_gem_request_add_to_client(request);
>   
>   	i915_queue_hangcheck(ring->dev);
>   

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2015-02-19 16:54 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-02-19 16:18 [PATCH 1/2] drm/i915: Split adding request to smaller functions Mika Kuoppala
2015-02-19 16:18 ` [PATCH 2/2] drm/i915: Protect engine request list with spinlock Mika Kuoppala
2015-02-19 16:41   ` Chris Wilson
2015-02-23 23:58     ` Daniel Vetter
2015-02-24  8:31       ` Chris Wilson
2015-02-24 10:39         ` Daniel Vetter
2015-02-24 10:52           ` Chris Wilson
2015-02-24 11:23             ` Mika Kuoppala
2015-02-24 11:40               ` Chris Wilson
2015-02-24 12:57             ` Daniel Vetter
2015-02-19 16:54 ` John Harrison [this message]
2015-02-20  9:16   ` [PATCH 1/2] drm/i915: Split adding request to smaller functions Mika Kuoppala

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=54E61549.504@Intel.com \
    --to=john.c.harrison@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox