mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-05 05:15:02 +08:00
drm/i915/gt: Prefer local path to runtime powermanagement
Avoid going to the base i915 device when we already have a path from gt to the runtime powermanagement interface. The benefit is that it looks a bit more self-consistent to always be acquiring the gt->uncore->rpm for use with the gt->uncore. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191007154531.1750-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
b9dcb97b6c
commit
cd6a851385
@ -62,7 +62,7 @@ int __intel_context_do_pin(struct intel_context *ce)
|
||||
}
|
||||
|
||||
err = 0;
|
||||
with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
|
||||
err = ce->ops->pin(ce);
|
||||
if (err)
|
||||
goto err;
|
||||
|
@ -1458,10 +1458,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
spin_unlock_irqrestore(&engine->active.lock, flags);
|
||||
|
||||
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
|
||||
if (wakeref) {
|
||||
intel_engine_print_registers(engine, m);
|
||||
intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(engine->uncore->rpm, wakeref);
|
||||
} else {
|
||||
drm_printf(m, "\tDevice is asleep; skipping register dump\n");
|
||||
}
|
||||
|
@ -185,7 +185,7 @@ static const struct intel_wakeref_ops wf_ops = {
|
||||
|
||||
void intel_engine_init__pm(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
|
||||
struct intel_runtime_pm *rpm = engine->uncore->rpm;
|
||||
|
||||
intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt)
|
||||
|
||||
void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
/*
|
||||
@ -297,13 +297,12 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
|
||||
|
||||
wmb();
|
||||
|
||||
if (INTEL_INFO(i915)->has_coherent_ggtt)
|
||||
if (INTEL_INFO(gt->i915)->has_coherent_ggtt)
|
||||
return;
|
||||
|
||||
intel_gt_chipset_flush(gt);
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
with_intel_runtime_pm(uncore->rpm, wakeref) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&uncore->lock, flags);
|
||||
|
@ -94,7 +94,7 @@ static const struct intel_wakeref_ops wf_ops = {
|
||||
|
||||
void intel_gt_pm_init_early(struct intel_gt *gt)
|
||||
{
|
||||
intel_wakeref_init(>->wakeref, >->i915->runtime_pm, &wf_ops);
|
||||
intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops);
|
||||
|
||||
BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications);
|
||||
}
|
||||
@ -222,7 +222,7 @@ void intel_gt_suspend(struct intel_gt *gt)
|
||||
/* We expect to be idle already; but also want to be independent */
|
||||
wait_for_idle(gt);
|
||||
|
||||
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
intel_rc6_disable(>->rc6);
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ static void hangcheck_elapsed(struct work_struct *work)
|
||||
if (intel_gt_is_wedged(gt))
|
||||
return;
|
||||
|
||||
wakeref = intel_runtime_pm_get_if_in_use(>->i915->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
|
||||
if (!wakeref)
|
||||
return;
|
||||
|
||||
@ -322,7 +322,7 @@ static void hangcheck_elapsed(struct work_struct *work)
|
||||
if (hung)
|
||||
hangcheck_declare_hang(gt, hung, stuck);
|
||||
|
||||
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
|
||||
/* Reset timer in case GPU hangs without another request being added */
|
||||
intel_gt_queue_hangcheck(gt);
|
||||
|
@ -811,7 +811,7 @@ void intel_gt_set_wedged(struct intel_gt *gt)
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
mutex_lock(>->reset.mutex);
|
||||
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
__intel_gt_set_wedged(gt);
|
||||
mutex_unlock(>->reset.mutex);
|
||||
}
|
||||
@ -1186,7 +1186,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
|
||||
* isn't the case at least when we get here by doing a
|
||||
* simulated reset via debugfs, so get an RPM reference.
|
||||
*/
|
||||
wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
|
||||
engine_mask &= INTEL_INFO(gt->i915)->engine_mask;
|
||||
|
||||
@ -1246,7 +1246,7 @@ void intel_gt_handle_error(struct intel_gt *gt,
|
||||
wake_up_all(>->reset.queue);
|
||||
|
||||
out:
|
||||
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
}
|
||||
|
||||
int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
|
||||
|
@ -1695,14 +1695,14 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
||||
if (intel_gt_is_wedged(gt))
|
||||
return -EIO; /* we're long past hope of a successful reset */
|
||||
|
||||
wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
|
||||
drain_delayed_work(>->hangcheck.work); /* flush param */
|
||||
|
||||
err = intel_gt_live_subtests(tests, gt);
|
||||
|
||||
i915_modparams.enable_hangcheck = saved_hangcheck;
|
||||
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -17,7 +17,7 @@ static int igt_global_reset(void *arg)
|
||||
/* Check that we can issue a global GPU reset */
|
||||
|
||||
igt_global_reset_lock(gt);
|
||||
wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
|
||||
reset_count = i915_reset_count(>->i915->gpu_error);
|
||||
|
||||
@ -28,7 +28,7 @@ static int igt_global_reset(void *arg)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
igt_global_reset_unlock(gt);
|
||||
|
||||
if (intel_gt_is_wedged(gt))
|
||||
@ -45,14 +45,14 @@ static int igt_wedged_reset(void *arg)
|
||||
/* Check that we can recover a wedged device with a GPU reset */
|
||||
|
||||
igt_global_reset_lock(gt);
|
||||
wakeref = intel_runtime_pm_get(>->i915->runtime_pm);
|
||||
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
||||
|
||||
intel_gt_set_wedged(gt);
|
||||
|
||||
GEM_BUG_ON(!intel_gt_is_wedged(gt));
|
||||
intel_gt_reset(gt, ALL_ENGINES, NULL);
|
||||
|
||||
intel_runtime_pm_put(>->i915->runtime_pm, wakeref);
|
||||
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
||||
igt_global_reset_unlock(gt);
|
||||
|
||||
return intel_gt_is_wedged(gt) ? -EIO : 0;
|
||||
|
@ -256,7 +256,7 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
|
||||
GEM_BUG_ON(IS_ERR(ce));
|
||||
|
||||
rq = ERR_PTR(-ENODEV);
|
||||
with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(engine->uncore->rpm, wakeref)
|
||||
rq = igt_spinner_create_request(spin, ce, MI_NOOP);
|
||||
|
||||
intel_context_put(ce);
|
||||
@ -313,7 +313,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
|
||||
if (err)
|
||||
goto out_spin;
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(engine->uncore->rpm, wakeref)
|
||||
err = reset(engine);
|
||||
|
||||
igt_spinner_end(&spin);
|
||||
|
@ -607,7 +607,6 @@ out_unlock:
|
||||
void intel_guc_log_relay_flush(struct intel_guc_log *log)
|
||||
{
|
||||
struct intel_guc *guc = log_to_guc(log);
|
||||
struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
/*
|
||||
@ -616,7 +615,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
|
||||
*/
|
||||
flush_work(&log->relay.flush_work);
|
||||
|
||||
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref)
|
||||
guc_action_flush_log(guc);
|
||||
|
||||
/* GuC would have updated log buffer by now, so capture it */
|
||||
|
@ -185,7 +185,7 @@ int intel_huc_check_status(struct intel_huc *huc)
|
||||
if (!intel_huc_is_supported(huc))
|
||||
return -ENODEV;
|
||||
|
||||
with_intel_runtime_pm(>->i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
status = intel_uncore_read(gt->uncore, huc->status.reg);
|
||||
|
||||
return (status & huc->status.mask) == huc->status.value;
|
||||
|
@ -587,7 +587,7 @@ void intel_uc_suspend(struct intel_uc *uc)
|
||||
if (!intel_guc_is_running(guc))
|
||||
return;
|
||||
|
||||
with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref)
|
||||
with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
|
||||
intel_uc_runtime_suspend(uc);
|
||||
}
|
||||
|
||||
|
@ -412,7 +412,7 @@ void gen9_reset_guc_interrupts(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
assert_rpm_wakelock_held(gt->uncore->rpm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
gen6_gt_pm_reset_iir(gt, gt->pm_guc_events);
|
||||
@ -423,7 +423,7 @@ void gen9_enable_guc_interrupts(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
assert_rpm_wakelock_held(gt->uncore->rpm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (!guc->interrupts.enabled) {
|
||||
@ -440,7 +440,7 @@ void gen9_disable_guc_interrupts(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
|
||||
assert_rpm_wakelock_held(>->i915->runtime_pm);
|
||||
assert_rpm_wakelock_held(gt->uncore->rpm);
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
guc->interrupts.enabled = false;
|
||||
|
Loading…
Reference in New Issue
Block a user