mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 09:34:12 +08:00
drm/i915/ringbuffer: Simplify the ring irq refcounting
... and move it under the spinlock to gain the appropriate memory barriers. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32752 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
This commit is contained in:
parent
9862e600ce
commit
01a03331e5
@ -521,22 +521,20 @@ static bool
|
|||||||
render_ring_get_irq(struct intel_ring_buffer *ring)
|
render_ring_get_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (!dev->irq_enabled)
|
if (!dev->irq_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
spin_lock(&dev_priv->irq_lock);
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
if (ring->irq_refcount++ == 0) {
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (HAS_PCH_SPLIT(dev))
|
||||||
ironlake_enable_irq(dev_priv,
|
ironlake_enable_irq(dev_priv,
|
||||||
GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
|
GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
|
||||||
else
|
else
|
||||||
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -545,20 +543,18 @@ static void
|
|||||||
render_ring_put_irq(struct intel_ring_buffer *ring)
|
render_ring_put_irq(struct intel_ring_buffer *ring)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
spin_lock(&dev_priv->irq_lock);
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
if (--ring->irq_refcount == 0) {
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
if (HAS_PCH_SPLIT(dev))
|
if (HAS_PCH_SPLIT(dev))
|
||||||
ironlake_disable_irq(dev_priv,
|
ironlake_disable_irq(dev_priv,
|
||||||
GT_USER_INTERRUPT |
|
GT_USER_INTERRUPT |
|
||||||
GT_PIPE_NOTIFY);
|
GT_PIPE_NOTIFY);
|
||||||
else
|
else
|
||||||
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
|
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
|
||||||
@ -619,18 +615,15 @@ static bool
|
|||||||
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (!dev->irq_enabled)
|
if (!dev->irq_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
spin_lock(&dev_priv->irq_lock);
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
if (ring->irq_refcount++ == 0)
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
ironlake_enable_irq(dev_priv, flag);
|
ironlake_enable_irq(dev_priv, flag);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -639,35 +632,30 @@ static void
|
|||||||
ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
spin_lock(&dev_priv->irq_lock);
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
if (--ring->irq_refcount == 0)
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
ironlake_disable_irq(dev_priv, flag);
|
ironlake_disable_irq(dev_priv, flag);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (!dev->irq_enabled)
|
if (!dev->irq_enabled)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (atomic_inc_return(&ring->irq_refcount) == 1) {
|
spin_lock(&dev_priv->irq_lock);
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
if (ring->irq_refcount++ == 0) {
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
ring->irq_mask &= ~rflag;
|
ring->irq_mask &= ~rflag;
|
||||||
I915_WRITE_IMR(ring, ring->irq_mask);
|
I915_WRITE_IMR(ring, ring->irq_mask);
|
||||||
ironlake_enable_irq(dev_priv, gflag);
|
ironlake_enable_irq(dev_priv, gflag);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -676,17 +664,15 @@ static void
|
|||||||
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
|
||||||
{
|
{
|
||||||
struct drm_device *dev = ring->dev;
|
struct drm_device *dev = ring->dev;
|
||||||
|
drm_i915_private_t *dev_priv = dev->dev_private;
|
||||||
|
|
||||||
if (atomic_dec_and_test(&ring->irq_refcount)) {
|
spin_lock(&dev_priv->irq_lock);
|
||||||
drm_i915_private_t *dev_priv = dev->dev_private;
|
if (--ring->irq_refcount == 0) {
|
||||||
unsigned long irqflags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
|
||||||
ring->irq_mask |= rflag;
|
ring->irq_mask |= rflag;
|
||||||
I915_WRITE_IMR(ring, ring->irq_mask);
|
I915_WRITE_IMR(ring, ring->irq_mask);
|
||||||
ironlake_disable_irq(dev_priv, gflag);
|
ironlake_disable_irq(dev_priv, gflag);
|
||||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
||||||
}
|
}
|
||||||
|
spin_unlock(&dev_priv->irq_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
@ -55,11 +55,11 @@ struct intel_ring_buffer {
|
|||||||
int effective_size;
|
int effective_size;
|
||||||
struct intel_hw_status_page status_page;
|
struct intel_hw_status_page status_page;
|
||||||
|
|
||||||
|
u32 irq_refcount;
|
||||||
u32 irq_mask;
|
u32 irq_mask;
|
||||||
u32 irq_seqno; /* last seq seem at irq time */
|
u32 irq_seqno; /* last seq seem at irq time */
|
||||||
u32 waiting_seqno;
|
u32 waiting_seqno;
|
||||||
u32 sync_seqno[I915_NUM_RINGS-1];
|
u32 sync_seqno[I915_NUM_RINGS-1];
|
||||||
atomic_t irq_refcount;
|
|
||||||
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
|
||||||
void (*irq_put)(struct intel_ring_buffer *ring);
|
void (*irq_put)(struct intel_ring_buffer *ring);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user