[Intel-gfx] [PATCH 08/14] drm/i915: abstract away ring-specific irq_get/put
Daniel Vetter
daniel.vetter at ffwll.ch
Wed Apr 11 22:12:53 CEST 2012
Inspired by Ben Widawsky's patch for gen6+. Now after restructuring
how we set up the ring vtables and parameters, we can do this right.
This kills the bsd specific get/put_irq functions, they're now the
same.
Signed-Off-by: Daniel Vetter <daniel.vetter at ffwll.ch>
---
drivers/gpu/drm/i915/intel_ringbuffer.c | 77 ++++++++++---------------------
1 files changed, 24 insertions(+), 53 deletions(-)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 12f9304..6624a22 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -639,7 +639,7 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
}
static bool
-render_ring_get_irq(struct intel_ring_buffer *ring)
+i9xx_ring_get_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -651,9 +651,9 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
if (ring->irq_refcount++ == 0) {
if (INTEL_INFO(dev)->gen >= 5)
ironlake_enable_irq(dev_priv,
- GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
+ ring->irq_enable_mask);
else
- i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ i915_enable_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock(&ring->irq_lock);
@@ -661,7 +661,7 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
}
static void
-render_ring_put_irq(struct intel_ring_buffer *ring)
+i9xx_ring_put_irq(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -670,10 +670,9 @@ render_ring_put_irq(struct intel_ring_buffer *ring)
if (--ring->irq_refcount == 0) {
if (INTEL_INFO(dev)->gen >= 5)
ironlake_disable_irq(dev_priv,
- GT_USER_INTERRUPT |
- GT_PIPE_NOTIFY);
+ ring->irq_enable_mask);
else
- i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ i915_disable_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock(&ring->irq_lock);
}
@@ -789,42 +788,6 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
gen6_gt_force_wake_put(dev_priv);
}
-static bool
-bsd_ring_get_irq(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (!dev->irq_enabled)
- return false;
-
- spin_lock(&ring->irq_lock);
- if (ring->irq_refcount++ == 0) {
- if (IS_G4X(dev))
- i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
-
- return true;
-}
-static void
-bsd_ring_put_irq(struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = ring->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- spin_lock(&ring->irq_lock);
- if (--ring->irq_refcount == 0) {
- if (IS_G4X(dev))
- i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
- else
- ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
- }
- spin_unlock(&ring->irq_lock);
-}
-
static int
ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
@@ -1332,14 +1295,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->add_request = pc_render_add_request;
ring->flush = render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
- ring->irq_get = render_ring_get_irq;
- ring->irq_put = render_ring_put_irq;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
} else {
ring->add_request = render_ring_add_request;
ring->flush = render_ring_flush;
ring->get_seqno = ring_get_seqno;
- ring->irq_get = render_ring_get_irq;
- ring->irq_put = render_ring_put_irq;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
ring->dispatch_execbuffer = render_ring_dispatch_execbuffer;
@@ -1371,14 +1336,16 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
ring->add_request = pc_render_add_request;
ring->flush = render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
- ring->irq_get = render_ring_get_irq;
- ring->irq_put = render_ring_put_irq;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
} else {
ring->add_request = render_ring_add_request;
ring->flush = render_ring_flush;
ring->get_seqno = ring_get_seqno;
- ring->irq_get = render_ring_get_irq;
- ring->irq_put = render_ring_put_irq;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
ring->dispatch_execbuffer = render_ring_dispatch_execbuffer;
@@ -1445,8 +1412,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
ring->flush = bsd_ring_flush;
ring->add_request = ring_add_request;
ring->get_seqno = ring_get_seqno;
- ring->irq_get = bsd_ring_get_irq;
- ring->irq_put = bsd_ring_put_irq;
+ ring->irq_get = i9xx_ring_get_irq;
+ ring->irq_put = i9xx_ring_put_irq;
+ if (IS_GEN5(dev))
+ ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
+ else
+ ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
ring->dispatch_execbuffer = ring_dispatch_execbuffer;
}
ring->init = init_ring_common;
--
1.7.7.5
More information about the Intel-gfx
mailing list