From: Ben Widawsky <ben@bwidawsk.net>
To: intel-gfx@lists.freedesktop.org
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>, Ben Widawsky <ben@bwidawsk.net>
Subject: [PATCH 04/15] drm/i915: drop lock support for i915_wait_request
Date: Fri, 18 Nov 2011 18:24:21 -0800 [thread overview]
Message-ID: <1321669472-8045-5-git-send-email-ben@bwidawsk.net> (raw)
In-Reply-To: <1321669472-8045-1-git-send-email-ben@bwidawsk.net>
Provide a way for callers to instruct the wait request to drop
struct_mutex before the actually wait. This gives an opportunity for GPU
clients to submit independent work.
While it's tempting to make i915_wait_request always drop the lock and
enforce the caller to check their specific criteria for a sane
environment in turns out to be quite difficult to implement in most
cases as almost any state can change out from underneath one the lock is
dropped, and things such as domains aren't easy to figure out whether
they've changed (they could have been modified, and then changed back to
the original flags).
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
drivers/gpu/drm/i915/i915_drv.h | 3 +-
drivers/gpu/drm/i915/i915_gem.c | 42 +++++++++++++++++++++++++---------
drivers/gpu/drm/i915/intel_overlay.c | 4 +-
3 files changed, 35 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d2da91f..9d9d160 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1187,7 +1187,8 @@ int __must_check i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
struct drm_i915_gem_request *request);
int __must_check i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno);
+ uint32_t seqno,
+ bool drop_mutex);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ed0b68f..9c743ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1943,13 +1943,15 @@ i915_gem_retire_work_handler(struct work_struct *work)
*/
int
i915_wait_request(struct intel_ring_buffer *ring,
- uint32_t seqno)
+ uint32_t seqno,
+ bool drop_mutex)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
u32 ier;
int ret = 0;
BUG_ON(seqno == 0);
+ BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
@@ -1994,8 +1996,12 @@ i915_wait_request(struct intel_ring_buffer *ring,
trace_i915_gem_request_wait_begin(ring, seqno);
- ring->waiting_seqno = seqno;
+ if (!drop_mutex)
+ ring->waiting_seqno = seqno;
if (ring->irq_get(ring)) {
+ if (drop_mutex)
+ mutex_unlock(&ring->dev->struct_mutex);
+
if (dev_priv->mm.interruptible)
ret = wait_event_interruptible(ring->irq_queue,
i915_seqno_passed(ring->get_seqno(ring), seqno)
@@ -2005,12 +2011,23 @@ i915_wait_request(struct intel_ring_buffer *ring,
i915_seqno_passed(ring->get_seqno(ring), seqno)
|| atomic_read(&dev_priv->mm.wedged));
+ if (drop_mutex)
+ mutex_lock(&ring->dev->struct_mutex);
+
ring->irq_put(ring);
- } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
- ret = -EBUSY;
- ring->waiting_seqno = 0;
+ } else {
+ if (drop_mutex)
+ mutex_unlock(&ring->dev->struct_mutex);
+ if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
+ ret = -EBUSY;
+ if (drop_mutex)
+ mutex_lock(&ring->dev->struct_mutex);
+ }
+
+ if (!drop_mutex)
+ ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(ring, seqno);
}
@@ -2051,7 +2068,8 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
* it.
*/
if (obj->active) {
- ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+ ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
+ false);
if (ret)
return ret;
}
@@ -2186,7 +2204,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
return ret;
}
- return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
+ return i915_wait_request(ring, i915_gem_next_request_seqno(ring), false);
}
int
@@ -2400,7 +2418,8 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
obj->last_fenced_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- obj->last_fenced_seqno);
+ obj->last_fenced_seqno,
+ false);
if (ret)
return ret;
}
@@ -2541,7 +2560,8 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
if (!ring_passed_seqno(obj->last_fenced_ring,
reg->setup_seqno)) {
ret = i915_wait_request(obj->last_fenced_ring,
- reg->setup_seqno);
+ reg->setup_seqno,
+ false);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index cdf17d4..8f27f2b 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -227,7 +227,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
}
overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, false);
if (ret)
return ret;
@@ -448,7 +448,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
if (overlay->last_flip_req == 0)
return 0;
- ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
+ ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req, false);
if (ret)
return ret;
--
1.7.7.3
next prev parent reply other threads:[~2011-11-19 2:24 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2011-11-19 2:24 [PATCH 00/15] [RFC] Forced throttling/scheduling Ben Widawsky
2011-11-19 2:24 ` [PATCH 01/15] drm/i915: refactor debugfs open function Ben Widawsky
2011-11-20 18:50 ` Kenneth Graunke
2011-11-19 2:24 ` [PATCH 02/15] drm/i915: refactor debugfs create functions Ben Widawsky
2011-11-19 2:24 ` [PATCH 03/15] drm/i915: relative_constants_mode race fix Ben Widawsky
2011-11-23 22:30 ` Ben Widawsky
2011-11-19 2:24 ` Ben Widawsky [this message]
2011-11-19 2:24 ` [PATCH 05/15] drm/i915: remove mm structure from file_priv Ben Widawsky
2011-11-19 2:24 ` [PATCH 06/15] drm/i915: Keep track of drm_file in file_priv Ben Widawsky
2011-11-19 2:24 ` [PATCH 07/15] drm/i915: Keep track of request counts Ben Widawsky
2011-11-19 2:24 ` [PATCH 08/15] drm/i915: fairness Ben Widawsky
2011-11-19 2:24 ` [PATCH 09/15] drm/i915: Keep track of open i915 clients Ben Widawsky
2011-11-19 2:24 ` [PATCH 10/15] drm/i915: debugfs entry for " Ben Widawsky
2011-11-19 2:24 ` [PATCH 11/15] drm/i915: debugfs entries for scheduler params Ben Widawsky
2011-11-19 2:24 ` [PATCH 12/15] drm/i915: infrastructure to support scheduler types Ben Widawsky
2011-11-19 2:24 ` [PATCH 13/15] drm/i915: get/set scheduler type from debugfs Ben Widawsky
2011-11-19 2:24 ` [PATCH 14/15] drm/i915: Implement batch scheduler Ben Widawsky
2011-11-19 2:24 ` [PATCH 15/15] drm/i915: Add handling for batch parameters in debugfs Ben Widawsky
2011-11-29 14:30 ` [PATCH 00/15] [RFC] Forced throttling/scheduling Eugeni Dodonov
2012-04-02 18:28 ` Ben Widawsky
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1321669472-8045-5-git-send-email-ben@bwidawsk.net \
--to=ben@bwidawsk.net \
--cc=daniel.vetter@ffwll.ch \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).