mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 09:44:18 +08:00
drm/i915/gt: execlists->active is serialised by the tasklet
The active/pending execlists is no longer protected by the engine->active.lock, but is serialised by the tasklet instead. Update the locking around the debug and stats to follow suit. v2: local_bh_disable() to prevent recursing into the tasklet in case we trigger a softirq (Tvrtko) Fixes:df40306902
("drm/i915/execlists: Lift process_csb() out of the irq-off spinlock") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191009160906.16195-1-chris@chris-wilson.co.uk (cherry picked from commitc36eebd9ba
) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
749085a213
commit
e137d3abdf
@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
|
||||
return READ_ONCE(*execlists->active);
|
||||
}
|
||||
|
||||
static inline void
|
||||
execlists_active_lock_bh(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
local_bh_disable(); /* prevent local softirq and lock recursion */
|
||||
tasklet_lock(&execlists->tasklet);
|
||||
}
|
||||
|
||||
static inline void
|
||||
execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
tasklet_unlock(&execlists->tasklet);
|
||||
local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
|
||||
}
|
||||
|
||||
struct i915_request *
|
||||
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
|
||||
|
||||
|
@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
const struct intel_engine_execlists * const execlists =
|
||||
&engine->execlists;
|
||||
unsigned long flags;
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
u64 addr;
|
||||
|
||||
if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
|
||||
@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
idx, hws[idx * 2], hws[idx * 2 + 1]);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&engine->active.lock, flags);
|
||||
execlists_active_lock_bh(execlists);
|
||||
for (port = execlists->active; (rq = *port); port++) {
|
||||
char hdr[80];
|
||||
int len;
|
||||
@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
|
||||
hwsp_seqno(rq));
|
||||
print_request(m, rq, hdr);
|
||||
}
|
||||
spin_unlock_irqrestore(&engine->active.lock, flags);
|
||||
execlists_active_unlock_bh(execlists);
|
||||
} else if (INTEL_GEN(dev_priv) > 6) {
|
||||
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
|
||||
ENGINE_READ(engine, RING_PP_DIR_BASE));
|
||||
@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
||||
if (!intel_engine_supports_stats(engine))
|
||||
return -ENODEV;
|
||||
|
||||
spin_lock_irqsave(&engine->active.lock, flags);
|
||||
write_seqlock(&engine->stats.lock);
|
||||
execlists_active_lock_bh(execlists);
|
||||
write_seqlock_irqsave(&engine->stats.lock, flags);
|
||||
|
||||
if (unlikely(engine->stats.enabled == ~0)) {
|
||||
err = -EBUSY;
|
||||
@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
||||
}
|
||||
|
||||
unlock:
|
||||
write_sequnlock(&engine->stats.lock);
|
||||
spin_unlock_irqrestore(&engine->active.lock, flags);
|
||||
write_sequnlock_irqrestore(&engine->stats.lock, flags);
|
||||
execlists_active_unlock_bh(execlists);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -77,6 +77,12 @@ struct drm_i915_private;
|
||||
|
||||
#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
|
||||
|
||||
static inline void tasklet_lock(struct tasklet_struct *t)
|
||||
{
|
||||
while (!tasklet_trylock(t))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
|
||||
{
|
||||
if (!atomic_fetch_inc(&t->count))
|
||||
|
Loading…
Reference in New Issue
Block a user