From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [CI 18/20] drm/i915: Move the get/put irq locking into the caller
Date: Thu, 19 May 2016 12:32:54 +0100 [thread overview]
Message-ID: <1463657576-32063-18-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1463657576-32063-1-git-send-email-chris@chris-wilson.co.uk>
With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
we can reduce the code size by moving the common preamble into the
caller, and we can also eliminate the reference counting.
For completeness, as we are no longer doing reference counting on irq,
rename the get/put vfunctions to enable/disable respectively.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
drivers/gpu/drm/i915/i915_irq.c | 8 +-
drivers/gpu/drm/i915/intel_breadcrumbs.c | 8 +-
drivers/gpu/drm/i915/intel_lrc.c | 34 +---
drivers/gpu/drm/i915/intel_ringbuffer.c | 269 ++++++++++---------------------
drivers/gpu/drm/i915/intel_ringbuffer.h | 5 +-
5 files changed, 106 insertions(+), 218 deletions(-)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f62fcf3f6ea8..70f7617b5bf1 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
dev_priv->gt_irq_mask &= ~interrupt_mask;
dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
}
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
{
ilk_update_gt_irq(dev_priv, mask, mask);
+ POSTING_READ_FW(GTIMR);
}
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -2840,9 +2840,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
}
static bool
-ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
+ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
{
- if (INTEL_GEN(dev_priv) >= 8) {
+ if (INTEL_GEN(engine->i915) >= 8) {
return (ipehr >> 23) == 0x1c;
} else {
ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2913,7 +2913,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
return NULL;
ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
+ if (!ipehr_is_semaphore_wait(engine, ipehr))
return NULL;
/*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 14fb9fdcde3a..475c454d11bf 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -51,12 +51,16 @@ static void irq_enable(struct intel_engine_cs *engine)
*/
engine->irq_posted = true;
- WARN_ON(!engine->irq_get(engine));
+ spin_lock_irq(&engine->i915->irq_lock);
+ engine->irq_enable(engine);
+ spin_unlock_irq(&engine->i915->irq_lock);
}
static void irq_disable(struct intel_engine_cs *engine)
{
- engine->irq_put(engine);
+ spin_lock_irq(&engine->i915->irq_lock);
+ engine->irq_disable(engine);
+ spin_unlock_irq(&engine->i915->irq_lock);
engine->irq_posted = false;
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 2991fd2352b9..8ac0f1e5a36f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1582,36 +1582,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
return 0;
}
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1899,8 +1881,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->init_hw = gen8_init_common_ring;
engine->emit_request = gen8_emit_request;
engine->emit_flush = gen8_emit_flush;
- engine->irq_get = gen8_logical_ring_get_irq;
- engine->irq_put = gen8_logical_ring_put_irq;
+ engine->irq_enable = gen8_logical_ring_enable_irq;
+ engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
engine->irq_seqno_barrier = bxt_a_seqno_barrier;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 63a0c78d6a22..25b7ad684a28 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1551,103 +1551,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
spin_unlock_irq(&dev_priv->uncore.lock);
}
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen5_ring_enable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0)
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
-gen5_ring_put_irq(struct intel_engine_cs *engine)
+gen5_ring_disable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0)
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i9xx_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- if (!intel_irqs_enabled(dev_priv))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
- return true;
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
static void
-i9xx_ring_put_irq(struct intel_engine_cs *engine)
+i9xx_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ dev_priv->irq_mask |= engine->irq_enable_mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
}
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *engine)
+static void
+i8xx_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- if (!intel_irqs_enabled(dev_priv))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
- POSTING_READ16(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ dev_priv->irq_mask &= ~engine->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
+ POSTING_READ16(RING_IMR(engine->mmio_base));
}
static void
-i8xx_ring_put_irq(struct intel_engine_cs *engine)
+i8xx_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
- POSTING_READ16(IMR);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ dev_priv->irq_mask |= engine->irq_enable_mask;
+ I915_WRITE16(IMR, dev_priv->irq_mask);
}
static int
@@ -1688,122 +1639,74 @@ i9xx_add_request(struct drm_i915_gem_request *req)
return 0;
}
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen6_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- GT_PARITY_ERROR(dev_priv)));
- else
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
+ GT_PARITY_ERROR(dev_priv)));
+ else
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
}
static void
-gen6_ring_put_irq(struct intel_engine_cs *engine)
+gen6_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
- I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
- else
- I915_WRITE_IMR(engine, ~0);
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+ I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv));
+ else
+ I915_WRITE_IMR(engine, ~0);
+ gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
}
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *engine)
+static void
+hsw_vebox_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
-
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
}
static void
-hsw_vebox_put_irq(struct intel_engine_cs *engine)
+hsw_vebox_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- I915_WRITE_IMR(engine, ~0);
- gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ I915_WRITE_IMR(engine, ~0);
+ gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
}
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *engine)
+static void
+gen8_ring_enable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
- return false;
-
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (engine->irq_refcount++ == 0) {
- if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
- } else {
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
- }
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
- return true;
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+ I915_WRITE_IMR(engine,
+ ~(engine->irq_enable_mask |
+ GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+ else
+ I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ POSTING_READ_FW(RING_IMR(engine->mmio_base));
}
static void
-gen8_ring_put_irq(struct intel_engine_cs *engine)
+gen8_ring_disable_irq(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- unsigned long flags;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--engine->irq_refcount == 0) {
- if (HAS_L3_DPF(dev_priv) && engine->id == RCS) {
- I915_WRITE_IMR(engine,
- ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
- } else {
- I915_WRITE_IMR(engine, ~0);
- }
- POSTING_READ(RING_IMR(engine->mmio_base));
- }
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ if (HAS_L3_DPF(dev_priv) && engine->id == RCS)
+ I915_WRITE_IMR(engine,
+ ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ else
+ I915_WRITE_IMR(engine, ~0);
}
static int
@@ -2674,8 +2577,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->init_context = intel_rcs_ctx_init;
engine->add_request = gen8_render_add_request;
engine->flush = gen8_render_ring_flush;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable = gen8_ring_enable_irq;
+ engine->irq_disable = gen8_ring_disable_irq;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
if (i915_semaphore_is_enabled(dev_priv)) {
WARN_ON(!dev_priv->semaphore_obj);
@@ -2689,8 +2592,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->flush = gen7_render_ring_flush;
if (IS_GEN6(dev_priv))
engine->flush = gen6_render_ring_flush;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
+ engine->irq_enable = gen6_ring_enable_irq;
+ engine->irq_disable = gen6_ring_disable_irq;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
engine->irq_seqno_barrier = gen6_seqno_barrier;
if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2717,8 +2620,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
} else if (IS_GEN5(dev_priv)) {
engine->add_request = i9xx_add_request;
engine->flush = gen4_render_ring_flush;
- engine->irq_get = gen5_ring_get_irq;
- engine->irq_put = gen5_ring_put_irq;
+ engine->irq_enable = gen5_ring_enable_irq;
+ engine->irq_disable = gen5_ring_disable_irq;
engine->irq_seqno_barrier = gen5_seqno_barrier;
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
} else {
@@ -2728,11 +2631,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
else
engine->flush = gen4_render_ring_flush;
if (IS_GEN2(dev_priv)) {
- engine->irq_get = i8xx_ring_get_irq;
- engine->irq_put = i8xx_ring_put_irq;
+ engine->irq_enable = i8xx_ring_enable_irq;
+ engine->irq_disable = i8xx_ring_disable_irq;
} else {
- engine->irq_get = i9xx_ring_get_irq;
- engine->irq_put = i9xx_ring_put_irq;
+ engine->irq_enable = i9xx_ring_enable_irq;
+ engine->irq_disable = i9xx_ring_disable_irq;
}
engine->irq_enable_mask = I915_USER_INTERRUPT;
}
@@ -2792,8 +2695,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable = gen8_ring_enable_irq;
+ engine->irq_disable = gen8_ring_disable_irq;
engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2803,8 +2706,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
}
} else {
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
+ engine->irq_enable = gen6_ring_enable_irq;
+ engine->irq_disable = gen6_ring_disable_irq;
engine->dispatch_execbuffer =
gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2828,13 +2731,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->add_request = i9xx_add_request;
if (IS_GEN5(dev_priv)) {
engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
- engine->irq_get = gen5_ring_get_irq;
- engine->irq_put = gen5_ring_put_irq;
+ engine->irq_enable = gen5_ring_enable_irq;
+ engine->irq_disable = gen5_ring_disable_irq;
engine->irq_seqno_barrier = gen5_seqno_barrier;
} else {
engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
- engine->irq_get = i9xx_ring_get_irq;
- engine->irq_put = i9xx_ring_put_irq;
+ engine->irq_enable = i9xx_ring_enable_irq;
+ engine->irq_disable = i9xx_ring_disable_irq;
}
engine->dispatch_execbuffer = i965_dispatch_execbuffer;
}
@@ -2863,8 +2766,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
engine->irq_seqno_barrier = gen6_seqno_barrier;
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable = gen8_ring_enable_irq;
+ engine->irq_disable = gen8_ring_disable_irq;
engine->dispatch_execbuffer =
gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
@@ -2895,8 +2798,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable = gen8_ring_enable_irq;
+ engine->irq_disable = gen8_ring_disable_irq;
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
@@ -2905,8 +2808,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
}
} else {
engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
- engine->irq_get = gen6_ring_get_irq;
- engine->irq_put = gen6_ring_put_irq;
+ engine->irq_enable = gen6_ring_enable_irq;
+ engine->irq_disable = gen6_ring_disable_irq;
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.signal = gen6_signal;
@@ -2954,8 +2857,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
if (INTEL_GEN(dev_priv) >= 8) {
engine->irq_enable_mask =
GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
- engine->irq_get = gen8_ring_get_irq;
- engine->irq_put = gen8_ring_put_irq;
+ engine->irq_enable = gen8_ring_enable_irq;
+ engine->irq_disable = gen8_ring_disable_irq;
engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen8_ring_sync;
@@ -2964,8 +2867,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
}
} else {
engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
- engine->irq_get = hsw_vebox_get_irq;
- engine->irq_put = hsw_vebox_put_irq;
+ engine->irq_enable = hsw_vebox_enable_irq;
+ engine->irq_disable = hsw_vebox_disable_irq;
engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
if (i915_semaphore_is_enabled(dev_priv)) {
engine->semaphore.sync_to = gen6_ring_sync;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ca611fe6997e..b591f5dd23cd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -200,11 +200,10 @@ struct intel_engine_cs {
struct intel_hw_status_page status_page;
struct i915_ctx_workarounds wa_ctx;
- unsigned irq_refcount; /* protected by dev_priv->irq_lock */
bool irq_posted;
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- bool __must_check (*irq_get)(struct intel_engine_cs *ring);
- void (*irq_put)(struct intel_engine_cs *ring);
+ void (*irq_enable)(struct intel_engine_cs *ring);
+ void (*irq_disable)(struct intel_engine_cs *ring);
int (*init_hw)(struct intel_engine_cs *ring);
--
2.8.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2016-05-19 11:33 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-05-19 11:32 [CI 01/20] drm: Restore double clflush on the last partial cacheline Chris Wilson
2016-05-19 11:32 ` [CI 02/20] drm/i915/shrinker: Flush active on objects before counting Chris Wilson
2016-05-19 12:14 ` Tvrtko Ursulin
2016-05-19 11:32 ` [CI 03/20] drm/i915: Delay queuing hangcheck to wait-request Chris Wilson
2016-05-19 12:34 ` Tvrtko Ursulin
2016-05-19 12:52 ` Chris Wilson
2016-05-19 11:32 ` [CI 04/20] drm/i915: Remove the dedicated hangcheck workqueue Chris Wilson
2016-05-19 12:50 ` Tvrtko Ursulin
2016-05-19 13:13 ` Chris Wilson
2016-05-20 12:07 ` Tvrtko Ursulin
2016-05-20 12:23 ` Chris Wilson
2016-05-23 8:55 ` Tvrtko Ursulin
2016-05-19 11:32 ` [CI 05/20] drm/i915: Make queueing the hangcheck work inline Chris Wilson
2016-05-19 12:53 ` Tvrtko Ursulin
2016-05-19 13:18 ` Chris Wilson
2016-05-19 11:32 ` [CI 06/20] drm/i915: Slaughter the thundering i915_wait_request herd Chris Wilson
2016-05-20 12:04 ` Tvrtko Ursulin
2016-05-20 12:19 ` Chris Wilson
2016-05-23 8:53 ` Tvrtko Ursulin
2016-06-06 10:14 ` Chris Wilson
2016-06-06 11:04 ` Tvrtko Ursulin
2016-05-19 11:32 ` [CI 07/20] drm/i915: Remove the lazy_coherency parameter from request-completed? Chris Wilson
2016-05-19 11:32 ` [CI 08/20] drm/i915: Use HWS for seqno tracking everywhere Chris Wilson
2016-05-19 11:32 ` [CI 09/20] drm/i915: Stop mapping the scratch page into CPU space Chris Wilson
2016-05-19 11:32 ` [CI 10/20] drm/i915: Allocate scratch page from stolen Chris Wilson
2016-05-19 11:32 ` [CI 11/20] drm/i915: Refactor scratch object allocation for gen2 w/a buffer Chris Wilson
2016-05-19 11:32 ` [CI 12/20] drm/i915: Add a delay between interrupt and inspecting the final seqno (ilk) Chris Wilson
2016-05-19 11:32 ` [CI 13/20] drm/i915: Check the CPU cached value of seqno after waking the waiter Chris Wilson
2016-05-19 11:32 ` [CI 14/20] drm/i915: Only apply one barrier after a breadcrumb interrupt is posted Chris Wilson
2016-05-19 11:32 ` [CI 15/20] drm/i915: Stop setting wraparound seqno on initialisation Chris Wilson
2016-05-19 11:32 ` [CI 16/20] drm/i915: Only query timestamp when measuring elapsed time Chris Wilson
2016-05-19 15:44 ` Tvrtko Ursulin
2016-05-20 12:20 ` Chris Wilson
2016-05-23 8:54 ` Tvrtko Ursulin
2016-05-19 11:32 ` [CI 17/20] drm/i915: Convert trace-irq to the breadcrumb waiter Chris Wilson
2016-05-19 11:32 ` Chris Wilson [this message]
2016-05-19 11:32 ` [CI 19/20] drm/i915: Simplify enabling user-interrupts with L3-remapping Chris Wilson
2016-05-19 11:32 ` [CI 20/20] drm/i915: Remove debug noise on detecting fault-injection of missed interrupts Chris Wilson
2016-05-19 12:07 ` ✗ Ro.CI.BAT: warning for series starting with [CI,01/20] drm: Restore double clflush on the last partial cacheline Patchwork
-- strict thread matches above, loose matches on Subject: below --
2016-07-01 16:23 [CI 01/20] drm/i915/shrinker: Flush active on objects before counting Chris Wilson
2016-07-01 16:23 ` [CI 18/20] drm/i915: Move the get/put irq locking into the caller Chris Wilson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1463657576-32063-18-git-send-email-chris@chris-wilson.co.uk \
--to=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).