drm/i915/pmu: avoid an maybe-uninitialized warning

Conditional spinlocks make it hard for gcc and for lockdep to
follow the code flow. This one causes a warning with at least
gcc-9 and higher:

In file included from include/linux/irq.h:14,
                 from drivers/gpu/drm/i915/i915_pmu.c:7:
drivers/gpu/drm/i915/i915_pmu.c: In function 'i915_sample':
include/linux/spinlock.h:289:3: error: 'flags' may be used uninitialized in this function [-Werror=maybe-uninitialized]
  289 |   _raw_spin_unlock_irqrestore(lock, flags); \
      |   ^~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/gpu/drm/i915/i915_pmu.c:288:17: note: 'flags' was declared here
  288 |   unsigned long flags;
      |                 ^~~~~

Split out the part between the locks into a separate function
for readability and to let the compiler figure out what the
logic actually is.

Fixes: d79e1bd676 ("drm/i915/pmu: Only use exclusive mmio access for gen7")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20200527140526.1458215-1-arnd@arndb.de
(cherry picked from commit 6ec81b8273)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
This commit is contained in:
Arnd Bergmann 2020-05-27 16:05:08 +02:00 committed by Joonas Lahtinen
parent 8497376707
commit 54a9adc460

View File

@ -269,12 +269,48 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
return IS_GEN(i915, 7);
}
static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
{
struct intel_engine_pmu *pmu = &engine->pmu;
bool busy;
u32 val;
val = ENGINE_READ_FW(engine, RING_CTL);
if (val == 0) /* powerwell off => engine idle */
return;
if (val & RING_WAIT)
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
/* No need to sample when busy stats are supported. */
if (intel_engine_supports_stats(engine))
return;
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
* execlists sampling, we account for the ring waiting as the
* engine being busy. Therefore, we record the sample as being
* busy if either waiting or !idle.
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
val = ENGINE_READ_FW(engine, RING_MI_MODE);
busy = !(val & MODE_IDLE);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
}
static void
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned long flags;
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
@ -283,53 +319,17 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
return;
for_each_engine(engine, gt, id) {
struct intel_engine_pmu *pmu = &engine->pmu;
spinlock_t *mmio_lock;
unsigned long flags;
bool busy;
u32 val;
if (!intel_engine_pm_get_if_awake(engine))
continue;
mmio_lock = NULL;
if (exclusive_mmio_access(i915))
mmio_lock = &engine->uncore->lock;
if (unlikely(mmio_lock))
spin_lock_irqsave(mmio_lock, flags);
val = ENGINE_READ_FW(engine, RING_CTL);
if (val == 0) /* powerwell off => engine idle */
goto skip;
if (val & RING_WAIT)
add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
/* No need to sample when busy stats are supported. */
if (intel_engine_supports_stats(engine))
goto skip;
/*
* While waiting on a semaphore or event, MI_MODE reports the
* ring as idle. However, previously using the seqno, and with
* execlists sampling, we account for the ring waiting as the
* engine being busy. Therefore, we record the sample as being
* busy if either waiting or !idle.
*/
busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
if (!busy) {
val = ENGINE_READ_FW(engine, RING_MI_MODE);
busy = !(val & MODE_IDLE);
if (exclusive_mmio_access(i915)) {
spin_lock_irqsave(&engine->uncore->lock, flags);
engine_sample(engine, period_ns);
spin_unlock_irqrestore(&engine->uncore->lock, flags);
} else {
engine_sample(engine, period_ns);
}
if (busy)
add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
skip:
if (unlikely(mmio_lock))
spin_unlock_irqrestore(mmio_lock, flags);
intel_engine_pm_put_async(engine);
}
}