drm/i915/gt: Extract busy-stats for ring-scheduler

Lift the busy-stats context-in/out implementation out of intel_lrc, so
that we can reuse it for other scheduler implementations.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210115142331.24458-2-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2021-01-15 14:23:28 +00:00
parent 2c421896ad
commit 4fb05a392a
2 changed files with 50 additions and 33 deletions

View File

@ -0,0 +1,49 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2020 Intel Corporation
*/
#ifndef __INTEL_ENGINE_STATS_H__
#define __INTEL_ENGINE_STATS_H__
#include <linux/atomic.h>
#include <linux/ktime.h>
#include <linux/seqlock.h>
#include "i915_gem.h" /* GEM_BUG_ON */
#include "intel_engine.h"
static inline void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
if (atomic_add_unless(&engine->stats.active, 1, 0))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
engine->stats.start = ktime_get();
atomic_inc(&engine->stats.active);
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
static inline void intel_engine_context_out(struct intel_engine_cs *engine)
{
unsigned long flags;
GEM_BUG_ON(!atomic_read(&engine->stats.active));
if (atomic_add_unless(&engine->stats.active, -1, 1))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
if (atomic_dec_and_test(&engine->stats.active)) {
engine->stats.total =
ktime_add(engine->stats.total,
ktime_sub(ktime_get(), engine->stats.start));
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
#endif /* __INTEL_ENGINE_STATS_H__ */

View File

@ -115,6 +115,7 @@
#include "intel_breadcrumbs.h"
#include "intel_context.h"
#include "intel_engine_pm.h"
#include "intel_engine_stats.h"
#include "intel_execlists_submission.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
@ -431,39 +432,6 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status)
status, rq);
}
static void intel_engine_context_in(struct intel_engine_cs *engine)
{
unsigned long flags;
if (atomic_add_unless(&engine->stats.active, 1, 0))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
if (!atomic_add_unless(&engine->stats.active, 1, 0)) {
engine->stats.start = ktime_get();
atomic_inc(&engine->stats.active);
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
static void intel_engine_context_out(struct intel_engine_cs *engine)
{
unsigned long flags;
GEM_BUG_ON(!atomic_read(&engine->stats.active));
if (atomic_add_unless(&engine->stats.active, -1, 1))
return;
write_seqlock_irqsave(&engine->stats.lock, flags);
if (atomic_dec_and_test(&engine->stats.active)) {
engine->stats.total =
ktime_add(engine->stats.total,
ktime_sub(ktime_get(), engine->stats.start));
}
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
static void reset_active(struct i915_request *rq,
struct intel_engine_cs *engine)
{