drm/i915/ringbuffer: Simplify the ring irq refcounting

... and move it under the spinlock to gain the appropriate memory
barriers.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32752
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
Chris Wilson 2011-01-04 22:22:56 +00:00
parent 9862e600ce
commit 01a03331e5
2 changed files with 25 additions and 39 deletions

View File

@ -521,22 +521,20 @@ static bool
render_ring_get_irq(struct intel_ring_buffer *ring) render_ring_get_irq(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled) if (!dev->irq_enabled)
return false; return false;
if (atomic_inc_return(&ring->irq_refcount) == 1) { spin_lock(&dev_priv->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (ring->irq_refcount++ == 0) {
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
ironlake_enable_irq(dev_priv, ironlake_enable_irq(dev_priv,
GT_PIPE_NOTIFY | GT_USER_INTERRUPT); GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT); i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
spin_unlock(&dev_priv->irq_lock);
return true; return true;
} }
@ -545,20 +543,18 @@ static void
render_ring_put_irq(struct intel_ring_buffer *ring) render_ring_put_irq(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
if (atomic_dec_and_test(&ring->irq_refcount)) {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0) {
if (HAS_PCH_SPLIT(dev)) if (HAS_PCH_SPLIT(dev))
ironlake_disable_irq(dev_priv, ironlake_disable_irq(dev_priv,
GT_USER_INTERRUPT | GT_USER_INTERRUPT |
GT_PIPE_NOTIFY); GT_PIPE_NOTIFY);
else else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT); i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
spin_unlock(&dev_priv->irq_lock);
} }
void intel_ring_setup_status_page(struct intel_ring_buffer *ring) void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@ -619,18 +615,15 @@ static bool
ring_get_irq(struct intel_ring_buffer *ring, u32 flag) ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled) if (!dev->irq_enabled)
return false; return false;
if (atomic_inc_return(&ring->irq_refcount) == 1) { spin_lock(&dev_priv->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (ring->irq_refcount++ == 0)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ironlake_enable_irq(dev_priv, flag); ironlake_enable_irq(dev_priv, flag);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock(&dev_priv->irq_lock);
}
return true; return true;
} }
@ -639,35 +632,30 @@ static void
ring_put_irq(struct intel_ring_buffer *ring, u32 flag) ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
if (atomic_dec_and_test(&ring->irq_refcount)) {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0)
ironlake_disable_irq(dev_priv, flag); ironlake_disable_irq(dev_priv, flag);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); spin_unlock(&dev_priv->irq_lock);
}
} }
static bool static bool
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev->irq_enabled) if (!dev->irq_enabled)
return false; return false;
if (atomic_inc_return(&ring->irq_refcount) == 1) { spin_lock(&dev_priv->irq_lock);
drm_i915_private_t *dev_priv = dev->dev_private; if (ring->irq_refcount++ == 0) {
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
ring->irq_mask &= ~rflag; ring->irq_mask &= ~rflag;
I915_WRITE_IMR(ring, ring->irq_mask); I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_enable_irq(dev_priv, gflag); ironlake_enable_irq(dev_priv, gflag);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
spin_unlock(&dev_priv->irq_lock);
return true; return true;
} }
@ -676,17 +664,15 @@ static void
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag) gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
if (atomic_dec_and_test(&ring->irq_refcount)) {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags); spin_lock(&dev_priv->irq_lock);
if (--ring->irq_refcount == 0) {
ring->irq_mask |= rflag; ring->irq_mask |= rflag;
I915_WRITE_IMR(ring, ring->irq_mask); I915_WRITE_IMR(ring, ring->irq_mask);
ironlake_disable_irq(dev_priv, gflag); ironlake_disable_irq(dev_priv, gflag);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
} }
spin_unlock(&dev_priv->irq_lock);
} }
static bool static bool

View File

@ -55,11 +55,11 @@ struct intel_ring_buffer {
int effective_size; int effective_size;
struct intel_hw_status_page status_page; struct intel_hw_status_page status_page;
u32 irq_refcount;
u32 irq_mask; u32 irq_mask;
u32 irq_seqno; /* last seq seem at irq time */ u32 irq_seqno; /* last seq seem at irq time */
u32 waiting_seqno; u32 waiting_seqno;
u32 sync_seqno[I915_NUM_RINGS-1]; u32 sync_seqno[I915_NUM_RINGS-1];
atomic_t irq_refcount;
bool __must_check (*irq_get)(struct intel_ring_buffer *ring); bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
void (*irq_put)(struct intel_ring_buffer *ring); void (*irq_put)(struct intel_ring_buffer *ring);