From: Tomas Elf <tomas.elf@intel.com>
To: "John.C.Harrison@Intel.com" <John.C.Harrison@Intel.com>,
"Intel-GFX@Lists.FreeDesktop.Org"
<Intel-GFX@Lists.FreeDesktop.Org>
Subject: Re: [PATCH 02/59] drm/i915: Make intel_logical_ring_begin() static
Date: Tue, 31 Mar 2015 16:47:09 +0100 [thread overview]
Message-ID: <551AC17D.3030702@intel.com> (raw)
In-Reply-To: <1426768264-16996-3-git-send-email-John.C.Harrison@Intel.com>
On 19/03/2015 12:30, John.C.Harrison@Intel.com wrote:
> From: John Harrison <John.C.Harrison@Intel.com>
>
> The only usage of intel_logical_ring_begin() is within intel_lrc.c so it can be
> made static. To avoid a forward declaration at the top of the file, it and bunch
> of other functions have been shuffled upwards.
>
> For: VIZ-5115
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> ---
> drivers/gpu/drm/i915/intel_lrc.c | 474 +++++++++++++++++++-------------------
> drivers/gpu/drm/i915/intel_lrc.h | 3 -
> 2 files changed, 237 insertions(+), 240 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index fcb074b..cad4300 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -611,6 +611,243 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
> return logical_ring_invalidate_all_caches(ringbuf, ctx);
> }
>
> +static int logical_ring_alloc_request(struct intel_engine_cs *ring,
> + struct intel_context *ctx)
> +{
> + struct drm_i915_gem_request *request;
> + struct drm_i915_private *dev_private = ring->dev->dev_private;
> + int ret;
> +
> + if (ring->outstanding_lazy_request)
> + return 0;
> +
> + request = kzalloc(sizeof(*request), GFP_KERNEL);
> + if (request == NULL)
> + return -ENOMEM;
> +
> + if (ctx != ring->default_context) {
> + ret = intel_lr_context_pin(ring, ctx);
> + if (ret) {
> + kfree(request);
> + return ret;
> + }
> + }
> +
> + kref_init(&request->ref);
> + request->ring = ring;
> + request->uniq = dev_private->request_uniq++;
> +
> + ret = i915_gem_get_seqno(ring->dev, &request->seqno);
> + if (ret) {
> + intel_lr_context_unpin(ring, ctx);
> + kfree(request);
> + return ret;
> + }
> +
> + request->ctx = ctx;
> + i915_gem_context_reference(request->ctx);
> + request->ringbuf = ctx->engine[ring->id].ringbuf;
> +
> + ring->outstanding_lazy_request = request;
> + return 0;
> +}
> +
> +static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
> + int bytes)
> +{
> + struct intel_engine_cs *ring = ringbuf->ring;
> + struct drm_i915_gem_request *request;
> + int ret;
> +
> + if (intel_ring_space(ringbuf) >= bytes)
> + return 0;
> +
> + list_for_each_entry(request, &ring->request_list, list) {
> + /*
> + * The request queue is per-engine, so can contain requests
> + * from multiple ringbuffers. Here, we must ignore any that
> + * aren't from the ringbuffer we're considering.
> + */
> + struct intel_context *ctx = request->ctx;
> + if (ctx->engine[ring->id].ringbuf != ringbuf)
> + continue;
> +
> + /* Would completion of this request free enough space? */
> + if (__intel_ring_space(request->tail, ringbuf->tail,
> + ringbuf->size) >= bytes) {
> + break;
> + }
> + }
> +
> + if (&request->list == &ring->request_list)
> + return -ENOSPC;
> +
> + ret = i915_wait_request(request);
> + if (ret)
> + return ret;
> +
> + i915_gem_retire_requests_ring(ring);
> +
> + return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
> +}
> +
> +/*
> + * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
> + * @ringbuf: Logical Ringbuffer to advance.
> + *
> + * The tail is updated in our logical ringbuffer struct, not in the actual context. What
> + * really happens during submission is that the context and current tail will be placed
> + * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
> + * point, the tail *inside* the context is updated and the ELSP written to.
> + */
> +static void
> +intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
> + struct intel_context *ctx,
> + struct drm_i915_gem_request *request)
> +{
> + struct intel_engine_cs *ring = ringbuf->ring;
> +
> + intel_logical_ring_advance(ringbuf);
> +
> + if (intel_ring_stopped(ring))
> + return;
> +
> + execlists_context_queue(ring, ctx, ringbuf->tail, request);
> +}
> +
> +static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
> + struct intel_context *ctx,
> + int bytes)
> +{
> + struct intel_engine_cs *ring = ringbuf->ring;
> + struct drm_device *dev = ring->dev;
> + struct drm_i915_private *dev_priv = dev->dev_private;
> + unsigned long end;
> + int ret;
> +
> + ret = logical_ring_wait_request(ringbuf, bytes);
> + if (ret != -ENOSPC)
> + return ret;
> +
> + /* Force the context submission in case we have been skipping it */
> + intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
> +
> + /* With GEM the hangcheck timer should kick us out of the loop,
> + * leaving it early runs the risk of corrupting GEM state (due
> + * to running on almost untested codepaths). But on resume
> + * timers don't work yet, so prevent a complete hang in that
> + * case by choosing an insanely large timeout. */
> + end = jiffies + 60 * HZ;
> +
> + ret = 0;
> + do {
> + if (intel_ring_space(ringbuf) >= bytes)
> + break;
> +
> + msleep(1);
> +
> + if (dev_priv->mm.interruptible && signal_pending(current)) {
> + ret = -ERESTARTSYS;
> + break;
> + }
> +
> + ret = i915_gem_check_wedge(&dev_priv->gpu_error,
> + dev_priv->mm.interruptible);
> + if (ret)
> + break;
> +
> + if (time_after(jiffies, end)) {
> + ret = -EBUSY;
> + break;
> + }
> + } while (1);
> +
> + return ret;
> +}
> +
> +static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
> + struct intel_context *ctx)
> +{
> + uint32_t __iomem *virt;
> + int rem = ringbuf->size - ringbuf->tail;
> +
> + if (ringbuf->space < rem) {
> + int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
> +
> + if (ret)
> + return ret;
> + }
> +
> + virt = ringbuf->virtual_start + ringbuf->tail;
> + rem /= 4;
> + while (rem--)
> + iowrite32(MI_NOOP, virt++);
> +
> + ringbuf->tail = 0;
> + intel_ring_update_space(ringbuf);
> +
> + return 0;
> +}
> +
> +static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
> + struct intel_context *ctx, int bytes)
> +{
> + int ret;
> +
> + if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
> + ret = logical_ring_wrap_buffer(ringbuf, ctx);
> + if (unlikely(ret))
> + return ret;
> + }
> +
> + if (unlikely(ringbuf->space < bytes)) {
> + ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
> + if (unlikely(ret))
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +/**
> + * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
> + *
> + * @ringbuf: Logical ringbuffer.
> + * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
> + *
> + * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
> + * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
> + * and also preallocates a request (every workload submission is still mediated through
> + * requests, same as it did with legacy ringbuffer submission).
> + *
> + * Return: non-zero if the ringbuffer is not ready to be written to.
> + */
> +static int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
> + struct intel_context *ctx, int num_dwords)
> +{
> + struct intel_engine_cs *ring = ringbuf->ring;
> + struct drm_device *dev = ring->dev;
> + struct drm_i915_private *dev_priv = dev->dev_private;
> + int ret;
> +
> + ret = i915_gem_check_wedge(&dev_priv->gpu_error,
> + dev_priv->mm.interruptible);
> + if (ret)
> + return ret;
> +
> + ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
> + if (ret)
> + return ret;
> +
> + /* Preallocate the olr before touching the ring */
> + ret = logical_ring_alloc_request(ring, ctx);
> + if (ret)
> + return ret;
> +
> + ringbuf->space -= num_dwords * sizeof(uint32_t);
> + return 0;
> +}
> +
> /**
> * execlists_submission() - submit a batchbuffer for execution, Execlists style
> * @dev: DRM device.
> @@ -787,30 +1024,6 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
> return 0;
> }
>
> -/*
> - * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
> - * @ringbuf: Logical Ringbuffer to advance.
> - *
> - * The tail is updated in our logical ringbuffer struct, not in the actual context. What
> - * really happens during submission is that the context and current tail will be placed
> - * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
> - * point, the tail *inside* the context is updated and the ELSP written to.
> - */
> -static void
> -intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf,
> - struct intel_context *ctx,
> - struct drm_i915_gem_request *request)
> -{
> - struct intel_engine_cs *ring = ringbuf->ring;
> -
> - intel_logical_ring_advance(ringbuf);
> -
> - if (intel_ring_stopped(ring))
> - return;
> -
> - execlists_context_queue(ring, ctx, ringbuf->tail, request);
> -}
> -
> static int intel_lr_context_pin(struct intel_engine_cs *ring,
> struct intel_context *ctx)
> {
> @@ -855,219 +1068,6 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
> }
> }
>
> -static int logical_ring_alloc_request(struct intel_engine_cs *ring,
> - struct intel_context *ctx)
> -{
> - struct drm_i915_gem_request *request;
> - struct drm_i915_private *dev_private = ring->dev->dev_private;
> - int ret;
> -
> - if (ring->outstanding_lazy_request)
> - return 0;
> -
> - request = kzalloc(sizeof(*request), GFP_KERNEL);
> - if (request == NULL)
> - return -ENOMEM;
> -
> - if (ctx != ring->default_context) {
> - ret = intel_lr_context_pin(ring, ctx);
> - if (ret) {
> - kfree(request);
> - return ret;
> - }
> - }
> -
> - kref_init(&request->ref);
> - request->ring = ring;
> - request->uniq = dev_private->request_uniq++;
> -
> - ret = i915_gem_get_seqno(ring->dev, &request->seqno);
> - if (ret) {
> - intel_lr_context_unpin(ring, ctx);
> - kfree(request);
> - return ret;
> - }
> -
> - request->ctx = ctx;
> - i915_gem_context_reference(request->ctx);
> - request->ringbuf = ctx->engine[ring->id].ringbuf;
> -
> - ring->outstanding_lazy_request = request;
> - return 0;
> -}
> -
> -static int logical_ring_wait_request(struct intel_ringbuffer *ringbuf,
> - int bytes)
> -{
> - struct intel_engine_cs *ring = ringbuf->ring;
> - struct drm_i915_gem_request *request;
> - int ret;
> -
> - if (intel_ring_space(ringbuf) >= bytes)
> - return 0;
> -
> - list_for_each_entry(request, &ring->request_list, list) {
> - /*
> - * The request queue is per-engine, so can contain requests
> - * from multiple ringbuffers. Here, we must ignore any that
> - * aren't from the ringbuffer we're considering.
> - */
> - struct intel_context *ctx = request->ctx;
> - if (ctx->engine[ring->id].ringbuf != ringbuf)
> - continue;
> -
> - /* Would completion of this request free enough space? */
> - if (__intel_ring_space(request->tail, ringbuf->tail,
> - ringbuf->size) >= bytes) {
> - break;
> - }
> - }
> -
> - if (&request->list == &ring->request_list)
> - return -ENOSPC;
> -
> - ret = i915_wait_request(request);
> - if (ret)
> - return ret;
> -
> - i915_gem_retire_requests_ring(ring);
> -
> - return intel_ring_space(ringbuf) >= bytes ? 0 : -ENOSPC;
> -}
> -
> -static int logical_ring_wait_for_space(struct intel_ringbuffer *ringbuf,
> - struct intel_context *ctx,
> - int bytes)
> -{
> - struct intel_engine_cs *ring = ringbuf->ring;
> - struct drm_device *dev = ring->dev;
> - struct drm_i915_private *dev_priv = dev->dev_private;
> - unsigned long end;
> - int ret;
> -
> - ret = logical_ring_wait_request(ringbuf, bytes);
> - if (ret != -ENOSPC)
> - return ret;
> -
> - /* Force the context submission in case we have been skipping it */
> - intel_logical_ring_advance_and_submit(ringbuf, ctx, NULL);
> -
> - /* With GEM the hangcheck timer should kick us out of the loop,
> - * leaving it early runs the risk of corrupting GEM state (due
> - * to running on almost untested codepaths). But on resume
> - * timers don't work yet, so prevent a complete hang in that
> - * case by choosing an insanely large timeout. */
> - end = jiffies + 60 * HZ;
> -
> - ret = 0;
> - do {
> - if (intel_ring_space(ringbuf) >= bytes)
> - break;
> -
> - msleep(1);
> -
> - if (dev_priv->mm.interruptible && signal_pending(current)) {
> - ret = -ERESTARTSYS;
> - break;
> - }
> -
> - ret = i915_gem_check_wedge(&dev_priv->gpu_error,
> - dev_priv->mm.interruptible);
> - if (ret)
> - break;
> -
> - if (time_after(jiffies, end)) {
> - ret = -EBUSY;
> - break;
> - }
> - } while (1);
> -
> - return ret;
> -}
> -
> -static int logical_ring_wrap_buffer(struct intel_ringbuffer *ringbuf,
> - struct intel_context *ctx)
> -{
> - uint32_t __iomem *virt;
> - int rem = ringbuf->size - ringbuf->tail;
> -
> - if (ringbuf->space < rem) {
> - int ret = logical_ring_wait_for_space(ringbuf, ctx, rem);
> -
> - if (ret)
> - return ret;
> - }
> -
> - virt = ringbuf->virtual_start + ringbuf->tail;
> - rem /= 4;
> - while (rem--)
> - iowrite32(MI_NOOP, virt++);
> -
> - ringbuf->tail = 0;
> - intel_ring_update_space(ringbuf);
> -
> - return 0;
> -}
> -
> -static int logical_ring_prepare(struct intel_ringbuffer *ringbuf,
> - struct intel_context *ctx, int bytes)
> -{
> - int ret;
> -
> - if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {
> - ret = logical_ring_wrap_buffer(ringbuf, ctx);
> - if (unlikely(ret))
> - return ret;
> - }
> -
> - if (unlikely(ringbuf->space < bytes)) {
> - ret = logical_ring_wait_for_space(ringbuf, ctx, bytes);
> - if (unlikely(ret))
> - return ret;
> - }
> -
> - return 0;
> -}
> -
> -/**
> - * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
> - *
> - * @ringbuf: Logical ringbuffer.
> - * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
> - *
> - * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
> - * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
> - * and also preallocates a request (every workload submission is still mediated through
> - * requests, same as it did with legacy ringbuffer submission).
> - *
> - * Return: non-zero if the ringbuffer is not ready to be written to.
> - */
> -int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
> - struct intel_context *ctx, int num_dwords)
> -{
> - struct intel_engine_cs *ring = ringbuf->ring;
> - struct drm_device *dev = ring->dev;
> - struct drm_i915_private *dev_priv = dev->dev_private;
> - int ret;
> -
> - ret = i915_gem_check_wedge(&dev_priv->gpu_error,
> - dev_priv->mm.interruptible);
> - if (ret)
> - return ret;
> -
> - ret = logical_ring_prepare(ringbuf, ctx, num_dwords * sizeof(uint32_t));
> - if (ret)
> - return ret;
> -
> - /* Preallocate the olr before touching the ring */
> - ret = logical_ring_alloc_request(ring, ctx);
> - if (ret)
> - return ret;
> -
> - ringbuf->space -= num_dwords * sizeof(uint32_t);
> - return 0;
> -}
> -
> static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
> struct intel_context *ctx)
> {
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
> index adb731e4..ac8f81a 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -63,9 +63,6 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
> iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
> ringbuf->tail += 4;
> }
> -int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf,
> - struct intel_context *ctx,
> - int num_dwords);
>
> /* Logical Ring Contexts */
> void intel_lr_context_free(struct intel_context *ctx);
>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Thanks,
Tomas
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2015-03-31 15:47 UTC|newest]
Thread overview: 122+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-03-19 12:30 [PATCH 00/59] Remove the outstanding_lazy_request John.C.Harrison
2015-03-19 12:30 ` [PATCH 01/59] drm/i915: Rename 'do_execbuf' to 'execbuf_submit' John.C.Harrison
2015-03-31 15:45 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 02/59] drm/i915: Make intel_logical_ring_begin() static John.C.Harrison
2015-03-31 15:47 ` Tomas Elf [this message]
2015-03-19 12:30 ` [PATCH 03/59] drm/i915: Move common request allocation code into a common function John.C.Harrison
2015-03-31 15:48 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 04/59] drm/i915: Fix for ringbuf space wait in LRC mode John.C.Harrison
2015-03-19 14:56 ` Daniel, Thomas
2015-03-31 15:50 ` Tomas Elf
2015-04-01 5:56 ` Daniel Vetter
2015-04-01 12:00 ` John Harrison
2015-04-01 8:53 ` Chris Wilson
2015-03-19 12:30 ` [PATCH 05/59] drm/i915: Reserve ring buffer space for i915_add_request() commands John.C.Harrison
2015-03-20 15:13 ` Daniel Vetter
2015-03-20 15:55 ` John Harrison
2015-03-23 8:54 ` Daniel Vetter
2015-03-31 16:03 ` Chris Wilson
2015-03-20 16:19 ` John Harrison
2015-03-20 18:13 ` John Harrison
2015-03-31 15:58 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 06/59] drm/i915: i915_add_request must not fail John.C.Harrison
2015-03-19 12:30 ` [PATCH 07/59] drm/i915: Early alloc request in execbuff John.C.Harrison
2015-03-31 16:09 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 08/59] drm/i915: Set context in request from creation even in legacy mode John.C.Harrison
2015-03-31 16:10 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 09/59] drm/i915: Merged the many do_execbuf() parameters into a structure John.C.Harrison
2015-03-31 16:16 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 10/59] drm/i915: Simplify i915_gem_execbuffer_retire_commands() parameters John.C.Harrison
2015-03-19 12:30 ` [PATCH 11/59] drm/i915: Update alloc_request to return the allocated request John.C.Harrison
2015-03-31 16:20 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 12/59] drm/i915: Add request to execbuf params and add explicit cleanup John.C.Harrison
2015-03-19 12:30 ` [PATCH 13/59] drm/i915: Update the dispatch tracepoint to use params->request John.C.Harrison
2015-03-19 12:30 ` [PATCH 14/59] drm/i915: Update move_to_gpu() to take a request structure John.C.Harrison
2015-03-19 12:30 ` [PATCH 15/59] drm/i915: Update execbuffer_move_to_active() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 16/59] drm/i915: Add flag to i915_add_request() to skip the cache flush John.C.Harrison
2015-03-31 16:32 ` Tomas Elf
2015-04-01 5:59 ` Daniel Vetter
2015-04-01 8:52 ` Chris Wilson
2015-03-19 12:30 ` [PATCH 17/59] drm/i915: Update i915_gpu_idle() to manage its own request John.C.Harrison
2015-03-19 12:30 ` [PATCH 18/59] drm/i915: Split i915_ppgtt_init_hw() in half - generic and per ring John.C.Harrison
2015-03-31 16:34 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 19/59] drm/i915: Moved the for_each_ring loop outside of i915_gem_context_enable() John.C.Harrison
2015-03-19 12:30 ` [PATCH 20/59] drm/i915: Don't tag kernel batches as user batches John.C.Harrison
2015-03-31 16:35 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 21/59] drm/i915: Add explicit request management to i915_gem_init_hw() John.C.Harrison
2015-03-31 16:38 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 22/59] drm/i915: Update ppgtt_init_ring() & context_enable() to take requests John.C.Harrison
2015-03-31 16:38 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 23/59] drm/i915: Update i915_switch_context() to take a request structure John.C.Harrison
2015-03-19 12:30 ` [PATCH 24/59] drm/i915: Update do_switch() " John.C.Harrison
2015-03-31 16:40 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 25/59] drm/i915: Update deferred context creation to do explicit request management John.C.Harrison
2015-03-31 16:43 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 26/59] drm/i915: Update init_context() to take a request structure John.C.Harrison
2015-03-19 12:30 ` [PATCH 27/59] drm/i915: Update render_state_init() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 28/59] drm/i915: Update i915_gem_object_sync() " John.C.Harrison
2015-03-31 16:53 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 29/59] drm/i915: Update overlay code to do explicit request management John.C.Harrison
2015-03-31 16:53 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 30/59] drm/i915: Update queue_flip() to take a request structure John.C.Harrison
2015-03-31 16:54 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 31/59] drm/i915: Update add_request() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 32/59] drm/i915: Update [vma|object]_move_to_active() to take request structures John.C.Harrison
2015-03-19 12:30 ` [PATCH 33/59] drm/i915: Update l3_remap to take a request structure John.C.Harrison
2015-03-19 12:30 ` [PATCH 34/59] drm/i915: Update mi_set_context() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 35/59] drm/i915: Update a bunch of execbuffer helpers to take request structures John.C.Harrison
2015-03-19 12:30 ` [PATCH 36/59] drm/i915: Update workarounds_emit() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 37/59] drm/i915: Update flush_all_caches() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 38/59] drm/i915: Update switch_mm() to take a request structure John.C.Harrison
2015-03-31 16:57 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 39/59] drm/i915: Update ring->flush() to take a requests structure John.C.Harrison
2015-03-31 16:59 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 40/59] drm/i915: Update some flush helpers to take request structures John.C.Harrison
2015-03-19 12:30 ` [PATCH 41/59] drm/i915: Update ring->emit_flush() to take a request structure John.C.Harrison
2015-03-19 12:30 ` [PATCH 42/59] drm/i915: Update ring->add_request() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 43/59] drm/i915: Update ring->emit_request() " John.C.Harrison
2015-03-31 17:01 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 44/59] drm/i915: Update ring->dispatch_execbuffer() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 45/59] drm/i915: Update ring->emit_bb_start() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 46/59] drm/i915: Update ring->sync_to() " John.C.Harrison
2015-03-31 17:03 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 47/59] drm/i915: Update ring->signal() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 48/59] drm/i915: Update cacheline_align() " John.C.Harrison
2015-03-19 12:30 ` [PATCH 49/59] drm/i915: Update intel_ring_begin() " John.C.Harrison
2015-03-31 17:04 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 50/59] drm/i915: Update intel_logical_ring_begin() " John.C.Harrison
2015-03-31 17:05 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 51/59] drm/i915: Add *_ring_begin() to request allocation John.C.Harrison
2015-03-20 15:23 ` Daniel Vetter
2015-03-20 15:30 ` Chris Wilson
2015-03-20 16:09 ` John Harrison
2015-03-23 9:10 ` Daniel Vetter
2015-03-31 17:17 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 52/59] drm/i915: Remove the now obsolete intel_ring_get_request() John.C.Harrison
2015-03-19 12:30 ` [PATCH 53/59] drm/i915: Remove the now obsolete 'outstanding_lazy_request' John.C.Harrison
2015-03-31 18:01 ` Tomas Elf
2015-03-19 12:30 ` [PATCH 54/59] drm/i915: Move the request/file and request/pid association to creation time John.C.Harrison
2015-03-31 18:07 ` Tomas Elf
2015-03-19 12:31 ` [PATCH 55/59] drm/i915: Remove fallback poll for ring buffer space John.C.Harrison
2015-03-19 15:00 ` Daniel, Thomas
2015-03-19 15:16 ` Jani Nikula
2015-03-19 16:33 ` John Harrison
2015-03-19 17:29 ` Daniel Vetter
2015-03-31 18:13 ` Tomas Elf
2015-04-01 6:02 ` Daniel Vetter
2015-04-01 8:51 ` Chris Wilson
2015-03-19 12:31 ` [PATCH 56/59] drm/i915: Remove 'faked' request from LRC submission John.C.Harrison
2015-03-19 15:02 ` Daniel, Thomas
2015-03-31 18:14 ` Tomas Elf
2015-03-19 12:31 ` [PATCH 57/59] drm/i915: Update a bunch of LRC functions to take requests John.C.Harrison
2015-03-31 18:18 ` Tomas Elf
2015-03-19 12:31 ` [PATCH 58/59] drm/i915: Remove the now obsolete 'i915_gem_check_olr()' John.C.Harrison
2015-03-31 18:18 ` Tomas Elf
2015-03-19 12:31 ` [PATCH 59/59] drm/i915: Remove the almost obsolete i915_gem_object_flush_active() John.C.Harrison
2015-03-31 18:32 ` Tomas Elf
2015-04-01 6:06 ` Daniel Vetter
2015-05-28 20:02 ` [PATCH 00/59] Remove the outstanding_lazy_request Jesse Barnes
2015-05-28 21:20 ` Chris Wilson
2015-05-29 14:37 ` Jesse Barnes
2015-05-29 18:07 ` Chris Wilson
2015-05-29 11:00 ` John Harrison
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=551AC17D.3030702@intel.com \
--to=tomas.elf@intel.com \
--cc=Intel-GFX@Lists.FreeDesktop.Org \
--cc=John.C.Harrison@Intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox