mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 05:24:12 +08:00
drm/i915: Extract GT render power state management
i915_irq.c is large. One reason for this is that has a large chunk of the GT render power management stashed away in it. Extract that logic out of i915_irq.c and intel_pm.c and put it under one roof. Based on a patch by Chris Wilson. Signed-off-by: Andi Shyti <andi.shyti@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20191024211642.7688-1-chris@chris-wilson.co.uk
This commit is contained in:
parent
35865aef05
commit
3e7abf8141
@ -95,6 +95,7 @@ gt-y += \
|
||||
gt/intel_reset.o \
|
||||
gt/intel_ring.o \
|
||||
gt/intel_ring_submission.o \
|
||||
gt/intel_rps.o \
|
||||
gt/intel_sseu.o \
|
||||
gt/intel_timeline.o \
|
||||
gt/intel_workarounds.o
|
||||
|
@ -55,6 +55,8 @@
|
||||
#include "display/intel_tv.h"
|
||||
#include "display/intel_vdsc.h"
|
||||
|
||||
#include "gt/intel_rps.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_acpi.h"
|
||||
@ -14944,7 +14946,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
|
||||
* vblank without our intervention, so leave RPS alone.
|
||||
*/
|
||||
if (!i915_request_started(rq))
|
||||
gen6_rps_boost(rq);
|
||||
intel_rps_boost(rq);
|
||||
i915_request_put(rq);
|
||||
|
||||
drm_crtc_vblank_put(wait->crtc);
|
||||
@ -15138,7 +15140,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
|
||||
* maximum clocks following a vblank miss (see do_rps_boost()).
|
||||
*/
|
||||
if (!intel_state->rps_interactive) {
|
||||
intel_rps_mark_interactive(dev_priv, true);
|
||||
intel_rps_mark_interactive(&dev_priv->gt.rps, true);
|
||||
intel_state->rps_interactive = true;
|
||||
}
|
||||
|
||||
@ -15163,7 +15165,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
|
||||
struct drm_i915_private *dev_priv = to_i915(plane->dev);
|
||||
|
||||
if (intel_state->rps_interactive) {
|
||||
intel_rps_mark_interactive(dev_priv, false);
|
||||
intel_rps_mark_interactive(&dev_priv->gt.rps, false);
|
||||
intel_state->rps_interactive = false;
|
||||
}
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "intel_gt_requests.h"
|
||||
#include "intel_mocs.h"
|
||||
#include "intel_rc6.h"
|
||||
#include "intel_rps.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_pm.h"
|
||||
|
||||
@ -31,9 +32,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
|
||||
void intel_gt_init_hw_early(struct drm_i915_private *i915)
|
||||
{
|
||||
i915->gt.ggtt = &i915->ggtt;
|
||||
|
||||
/* BIOS often leaves RC6 enabled, but disable it for hw init */
|
||||
intel_gt_pm_disable(&i915->gt);
|
||||
}
|
||||
|
||||
static void init_unused_ring(struct intel_gt *gt, u32 base)
|
||||
@ -320,8 +318,7 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
|
||||
|
||||
void intel_gt_driver_register(struct intel_gt *gt)
|
||||
{
|
||||
if (IS_GEN(gt->i915, 5))
|
||||
intel_gpu_ips_init(gt->i915);
|
||||
intel_rps_driver_register(>->rps);
|
||||
}
|
||||
|
||||
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
|
||||
@ -379,20 +376,16 @@ int intel_gt_init(struct intel_gt *gt)
|
||||
void intel_gt_driver_remove(struct intel_gt *gt)
|
||||
{
|
||||
GEM_BUG_ON(gt->awake);
|
||||
intel_gt_pm_disable(gt);
|
||||
}
|
||||
|
||||
void intel_gt_driver_unregister(struct intel_gt *gt)
|
||||
{
|
||||
intel_gpu_ips_teardown();
|
||||
intel_rps_driver_unregister(>->rps);
|
||||
}
|
||||
|
||||
void intel_gt_driver_release(struct intel_gt *gt)
|
||||
{
|
||||
/* Paranoia: make sure we have disabled everything before we exit. */
|
||||
intel_gt_pm_disable(gt);
|
||||
intel_gt_pm_fini(gt);
|
||||
|
||||
intel_gt_fini_scratch(gt);
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_irq.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_rps.h"
|
||||
|
||||
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
|
||||
{
|
||||
@ -77,7 +78,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
|
||||
return guc_irq_handler(>->uc.guc, iir);
|
||||
|
||||
if (instance == OTHER_GTPM_INSTANCE)
|
||||
return gen11_rps_irq_handler(gt, iir);
|
||||
return gen11_rps_irq_handler(>->rps, iir);
|
||||
|
||||
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
|
||||
instance, iir);
|
||||
@ -336,7 +337,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
|
||||
}
|
||||
|
||||
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
|
||||
gen6_rps_irq_handler(gt->i915, gt_iir[2]);
|
||||
gen6_rps_irq_handler(>->rps, gt_iir[2]);
|
||||
guc_irq_handler(>->uc.guc, gt_iir[2] >> 16);
|
||||
}
|
||||
}
|
||||
|
@ -12,8 +12,10 @@
|
||||
#include "intel_gt.h"
|
||||
#include "intel_gt_pm.h"
|
||||
#include "intel_gt_requests.h"
|
||||
#include "intel_llc.h"
|
||||
#include "intel_pm.h"
|
||||
#include "intel_rc6.h"
|
||||
#include "intel_rps.h"
|
||||
#include "intel_wakeref.h"
|
||||
|
||||
static int __gt_unpark(struct intel_wakeref *wf)
|
||||
@ -39,12 +41,7 @@ static int __gt_unpark(struct intel_wakeref *wf)
|
||||
gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
|
||||
GEM_BUG_ON(!gt->awake);
|
||||
|
||||
intel_enable_gt_powersave(i915);
|
||||
|
||||
i915_update_gfx_val(i915);
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
gen6_rps_busy(i915);
|
||||
|
||||
intel_rps_unpark(>->rps);
|
||||
i915_pmu_gt_unparked(i915);
|
||||
|
||||
intel_gt_unpark_requests(gt);
|
||||
@ -64,8 +61,7 @@ static int __gt_park(struct intel_wakeref *wf)
|
||||
|
||||
i915_vma_parked(gt);
|
||||
i915_pmu_gt_parked(i915);
|
||||
if (INTEL_GEN(i915) >= 6)
|
||||
gen6_rps_idle(i915);
|
||||
intel_rps_park(>->rps);
|
||||
|
||||
/* Everything switched off, flush any residual interrupt just in case */
|
||||
intel_synchronize_irq(i915);
|
||||
@ -97,6 +93,7 @@ void intel_gt_pm_init(struct intel_gt *gt)
|
||||
* user.
|
||||
*/
|
||||
intel_rc6_init(>->rc6);
|
||||
intel_rps_init(>->rps);
|
||||
}
|
||||
|
||||
static bool reset_engines(struct intel_gt *gt)
|
||||
@ -140,12 +137,6 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
|
||||
engine->reset.finish(engine);
|
||||
}
|
||||
|
||||
void intel_gt_pm_disable(struct intel_gt *gt)
|
||||
{
|
||||
if (!is_mock_gt(gt))
|
||||
intel_sanitize_gt_powersave(gt->i915);
|
||||
}
|
||||
|
||||
void intel_gt_pm_fini(struct intel_gt *gt)
|
||||
{
|
||||
intel_rc6_fini(>->rc6);
|
||||
@ -164,9 +155,13 @@ int intel_gt_resume(struct intel_gt *gt)
|
||||
* allowing us to fixup the user contexts on their first pin.
|
||||
*/
|
||||
intel_gt_pm_get(gt);
|
||||
|
||||
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
|
||||
intel_rc6_sanitize(>->rc6);
|
||||
|
||||
intel_rps_enable(>->rps);
|
||||
intel_llc_enable(>->llc);
|
||||
|
||||
for_each_engine(engine, gt, id) {
|
||||
struct intel_context *ce;
|
||||
|
||||
@ -217,8 +212,11 @@ void intel_gt_suspend(struct intel_gt *gt)
|
||||
/* We expect to be idle already; but also want to be independent */
|
||||
wait_for_idle(gt);
|
||||
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
|
||||
with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
|
||||
intel_rps_disable(>->rps);
|
||||
intel_rc6_disable(>->rc6);
|
||||
intel_llc_disable(>->llc);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_gt_runtime_suspend(struct intel_gt *gt)
|
||||
|
@ -39,7 +39,6 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
|
||||
|
||||
void intel_gt_pm_init_early(struct intel_gt *gt);
|
||||
void intel_gt_pm_init(struct intel_gt *gt);
|
||||
void intel_gt_pm_disable(struct intel_gt *gt);
|
||||
void intel_gt_pm_fini(struct intel_gt *gt);
|
||||
|
||||
void intel_gt_sanitize(struct intel_gt *gt, bool force);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include "intel_llc_types.h"
|
||||
#include "intel_reset_types.h"
|
||||
#include "intel_rc6_types.h"
|
||||
#include "intel_rps_types.h"
|
||||
#include "intel_wakeref.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
@ -73,6 +74,7 @@ struct intel_gt {
|
||||
|
||||
struct intel_llc llc;
|
||||
struct intel_rc6 rc6;
|
||||
struct intel_rps rps;
|
||||
|
||||
ktime_t last_init_time;
|
||||
|
||||
|
@ -48,7 +48,7 @@ static bool get_ia_constants(struct intel_llc *llc,
|
||||
struct ia_constants *consts)
|
||||
{
|
||||
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
|
||||
struct intel_rps *rps = &i915->gt_pm.rps;
|
||||
struct intel_rps *rps = &llc_to_gt(llc)->rps;
|
||||
|
||||
if (rps->max_freq <= rps->min_freq)
|
||||
return false;
|
||||
|
1872
drivers/gpu/drm/i915/gt/intel_rps.c
Normal file
1872
drivers/gpu/drm/i915/gt/intel_rps.c
Normal file
File diff suppressed because it is too large
Load Diff
37
drivers/gpu/drm/i915/gt/intel_rps.h
Normal file
37
drivers/gpu/drm/i915/gt/intel_rps.h
Normal file
@ -0,0 +1,37 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef INTEL_RPS_H
|
||||
#define INTEL_RPS_H
|
||||
|
||||
#include "intel_rps_types.h"
|
||||
|
||||
struct i915_request;
|
||||
|
||||
void intel_rps_init(struct intel_rps *rps);
|
||||
|
||||
void intel_rps_driver_register(struct intel_rps *rps);
|
||||
void intel_rps_driver_unregister(struct intel_rps *rps);
|
||||
|
||||
void intel_rps_enable(struct intel_rps *rps);
|
||||
void intel_rps_disable(struct intel_rps *rps);
|
||||
|
||||
void intel_rps_park(struct intel_rps *rps);
|
||||
void intel_rps_unpark(struct intel_rps *rps);
|
||||
void intel_rps_boost(struct i915_request *rq);
|
||||
|
||||
int intel_rps_set(struct intel_rps *rps, u8 val);
|
||||
void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive);
|
||||
|
||||
int intel_gpu_freq(struct intel_rps *rps, int val);
|
||||
int intel_freq_opcode(struct intel_rps *rps, int val);
|
||||
u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1);
|
||||
|
||||
void gen5_rps_irq_handler(struct intel_rps *rps);
|
||||
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
|
||||
void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
|
||||
|
||||
#endif /* INTEL_RPS_H */
|
93
drivers/gpu/drm/i915/gt/intel_rps_types.h
Normal file
93
drivers/gpu/drm/i915/gt/intel_rps_types.h
Normal file
@ -0,0 +1,93 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2019 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef INTEL_RPS_TYPES_H
|
||||
#define INTEL_RPS_TYPES_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct intel_ips {
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
unsigned long chipset_power;
|
||||
u64 last_count2;
|
||||
u64 last_time2;
|
||||
unsigned long gfx_power;
|
||||
u8 corr;
|
||||
|
||||
int c, m;
|
||||
};
|
||||
|
||||
struct intel_rps_ei {
|
||||
ktime_t ktime;
|
||||
u32 render_c0;
|
||||
u32 media_c0;
|
||||
};
|
||||
|
||||
struct intel_rps {
|
||||
struct mutex lock; /* protects enabling and the worker */
|
||||
|
||||
/*
|
||||
* work, interrupts_enabled and pm_iir are protected by
|
||||
* dev_priv->irq_lock
|
||||
*/
|
||||
struct work_struct work;
|
||||
bool enabled;
|
||||
bool active;
|
||||
u32 pm_iir;
|
||||
|
||||
/* PM interrupt bits that should never be masked */
|
||||
u32 pm_intrmsk_mbz;
|
||||
u32 pm_events;
|
||||
|
||||
/* Frequencies are stored in potentially platform dependent multiples.
|
||||
* In other words, *_freq needs to be multiplied by X to be interesting.
|
||||
* Soft limits are those which are used for the dynamic reclocking done
|
||||
* by the driver (raise frequencies under heavy loads, and lower for
|
||||
* lighter loads). Hard limits are those imposed by the hardware.
|
||||
*
|
||||
* A distinction is made for overclocking, which is never enabled by
|
||||
* default, and is considered to be above the hard limit if it's
|
||||
* possible at all.
|
||||
*/
|
||||
u8 cur_freq; /* Current frequency (cached, may not == HW) */
|
||||
u8 last_freq; /* Last SWREQ frequency */
|
||||
u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
|
||||
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
|
||||
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
|
||||
u8 min_freq; /* AKA RPn. Minimum frequency */
|
||||
u8 boost_freq; /* Frequency to request when wait boosting */
|
||||
u8 idle_freq; /* Frequency to request when we are idle */
|
||||
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
||||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||
u8 rp0_freq; /* Non-overclocked max frequency. */
|
||||
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
|
||||
|
||||
int last_adj;
|
||||
|
||||
struct {
|
||||
struct mutex mutex;
|
||||
|
||||
enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
|
||||
unsigned int interactive;
|
||||
|
||||
u8 up_threshold; /* Current %busy required to uplock */
|
||||
u8 down_threshold; /* Current %busy required to downclock */
|
||||
} power;
|
||||
|
||||
atomic_t num_waiters;
|
||||
atomic_t boosts;
|
||||
|
||||
/* manual wa residency calculations */
|
||||
struct intel_rps_ei ei;
|
||||
struct intel_ips ips;
|
||||
};
|
||||
|
||||
#endif /* INTEL_RPS_TYPES_H */
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "intel_pm.h" /* intel_gpu_freq() */
|
||||
#include "selftest_llc.h"
|
||||
#include "intel_rps.h"
|
||||
|
||||
static int gen6_verify_ring_freq(struct intel_llc *llc)
|
||||
{
|
||||
@ -25,6 +26,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
||||
for (gpu_freq = consts.min_gpu_freq;
|
||||
gpu_freq <= consts.max_gpu_freq;
|
||||
gpu_freq++) {
|
||||
struct intel_rps *rps = &llc_to_gt(llc)->rps;
|
||||
|
||||
unsigned int ia_freq, ring_freq, found;
|
||||
u32 val;
|
||||
|
||||
@ -44,7 +47,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
||||
if (found != ia_freq) {
|
||||
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
|
||||
intel_gpu_freq(i915, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
found, ia_freq);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
@ -54,7 +57,7 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
|
||||
if (found != ring_freq) {
|
||||
pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n",
|
||||
gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq,
|
||||
intel_gpu_freq(i915, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)),
|
||||
found, ring_freq);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
@ -1011,7 +1011,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
|
||||
|
||||
static void guc_interrupts_capture(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_rps *rps = >->i915->gt_pm.rps;
|
||||
struct intel_rps *rps = >->rps;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
@ -1057,7 +1057,7 @@ static void guc_interrupts_capture(struct intel_gt *gt)
|
||||
|
||||
static void guc_interrupts_release(struct intel_gt *gt)
|
||||
{
|
||||
struct intel_rps *rps = >->i915->gt_pm.rps;
|
||||
struct intel_rps *rps = >->rps;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include "gt/intel_gt_requests.h"
|
||||
#include "gt/intel_reset.h"
|
||||
#include "gt/intel_rc6.h"
|
||||
#include "gt/intel_rps.h"
|
||||
#include "gt/uc/intel_guc_submission.h"
|
||||
|
||||
#include "i915_debugfs.h"
|
||||
@ -791,7 +792,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
intel_wakeref_t wakeref;
|
||||
int ret = 0;
|
||||
|
||||
@ -827,23 +828,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
|
||||
|
||||
seq_printf(m, "actual GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
|
||||
intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
|
||||
|
||||
seq_printf(m, "current GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->cur_freq));
|
||||
intel_gpu_freq(rps, rps->cur_freq));
|
||||
|
||||
seq_printf(m, "max GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->max_freq));
|
||||
intel_gpu_freq(rps, rps->max_freq));
|
||||
|
||||
seq_printf(m, "min GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->min_freq));
|
||||
intel_gpu_freq(rps, rps->min_freq));
|
||||
|
||||
seq_printf(m, "idle GPU freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->idle_freq));
|
||||
intel_gpu_freq(rps, rps->idle_freq));
|
||||
|
||||
seq_printf(m,
|
||||
"efficient (RPe) frequency: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->efficient_freq));
|
||||
intel_gpu_freq(rps, rps->efficient_freq));
|
||||
} else if (INTEL_GEN(dev_priv) >= 6) {
|
||||
u32 rp_state_limits;
|
||||
u32 gt_perf_status;
|
||||
@ -877,7 +878,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
else
|
||||
reqf >>= 25;
|
||||
}
|
||||
reqf = intel_gpu_freq(dev_priv, reqf);
|
||||
reqf = intel_gpu_freq(rps, reqf);
|
||||
|
||||
rpmodectl = I915_READ(GEN6_RP_CONTROL);
|
||||
rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD);
|
||||
@ -890,8 +891,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
||||
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
cagf = intel_gpu_freq(dev_priv,
|
||||
intel_get_cagf(dev_priv, rpstat));
|
||||
cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat));
|
||||
|
||||
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
|
||||
|
||||
@ -968,37 +968,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
||||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
|
||||
max_freq = (rp_state_cap & 0xff00) >> 8;
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
||||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
|
||||
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
|
||||
rp_state_cap >> 0) & 0xff;
|
||||
max_freq *= (IS_GEN9_BC(dev_priv) ||
|
||||
INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
|
||||
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, max_freq));
|
||||
intel_gpu_freq(rps, max_freq));
|
||||
seq_printf(m, "Max overclocked frequency: %dMHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->max_freq));
|
||||
intel_gpu_freq(rps, rps->max_freq));
|
||||
|
||||
seq_printf(m, "Current freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->cur_freq));
|
||||
intel_gpu_freq(rps, rps->cur_freq));
|
||||
seq_printf(m, "Actual freq: %d MHz\n", cagf);
|
||||
seq_printf(m, "Idle freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->idle_freq));
|
||||
intel_gpu_freq(rps, rps->idle_freq));
|
||||
seq_printf(m, "Min freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->min_freq));
|
||||
intel_gpu_freq(rps, rps->min_freq));
|
||||
seq_printf(m, "Boost freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->boost_freq));
|
||||
intel_gpu_freq(rps, rps->boost_freq));
|
||||
seq_printf(m, "Max freq: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->max_freq));
|
||||
intel_gpu_freq(rps, rps->max_freq));
|
||||
seq_printf(m,
|
||||
"efficient (RPe) frequency: %d MHz\n",
|
||||
intel_gpu_freq(dev_priv, rps->efficient_freq));
|
||||
intel_gpu_freq(rps, rps->efficient_freq));
|
||||
} else {
|
||||
seq_puts(m, "no P-state info available\n");
|
||||
}
|
||||
@ -1375,7 +1375,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
|
||||
static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
unsigned int max_gpu_freq, min_gpu_freq;
|
||||
intel_wakeref_t wakeref;
|
||||
int gpu_freq, ia_freq;
|
||||
@ -1400,7 +1400,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
||||
GEN6_PCODE_READ_MIN_FREQ_TABLE,
|
||||
&ia_freq, NULL);
|
||||
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
|
||||
intel_gpu_freq(dev_priv, (gpu_freq *
|
||||
intel_gpu_freq(rps,
|
||||
(gpu_freq *
|
||||
(IS_GEN9_BC(dev_priv) ||
|
||||
INTEL_GEN(dev_priv) >= 10 ?
|
||||
GEN9_FREQ_SCALER : 1))),
|
||||
@ -1631,7 +1632,7 @@ static const char *rps_power_to_str(unsigned int power)
|
||||
static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
u32 act_freq = rps->cur_freq;
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
@ -1643,7 +1644,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
vlv_punit_put(dev_priv);
|
||||
act_freq = (act_freq >> 8) & 0xff;
|
||||
} else {
|
||||
act_freq = intel_get_cagf(dev_priv,
|
||||
act_freq = intel_get_cagf(rps,
|
||||
I915_READ(GEN6_RPSTAT1));
|
||||
}
|
||||
}
|
||||
@ -1654,17 +1655,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
|
||||
atomic_read(&rps->num_waiters));
|
||||
seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive));
|
||||
seq_printf(m, "Frequency requested %d, actual %d\n",
|
||||
intel_gpu_freq(dev_priv, rps->cur_freq),
|
||||
intel_gpu_freq(dev_priv, act_freq));
|
||||
intel_gpu_freq(rps, rps->cur_freq),
|
||||
intel_gpu_freq(rps, act_freq));
|
||||
seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n",
|
||||
intel_gpu_freq(dev_priv, rps->min_freq),
|
||||
intel_gpu_freq(dev_priv, rps->min_freq_softlimit),
|
||||
intel_gpu_freq(dev_priv, rps->max_freq_softlimit),
|
||||
intel_gpu_freq(dev_priv, rps->max_freq));
|
||||
intel_gpu_freq(rps, rps->min_freq),
|
||||
intel_gpu_freq(rps, rps->min_freq_softlimit),
|
||||
intel_gpu_freq(rps, rps->max_freq_softlimit),
|
||||
intel_gpu_freq(rps, rps->max_freq));
|
||||
seq_printf(m, " idle:%d, efficient:%d, boost:%d\n",
|
||||
intel_gpu_freq(dev_priv, rps->idle_freq),
|
||||
intel_gpu_freq(dev_priv, rps->efficient_freq),
|
||||
intel_gpu_freq(dev_priv, rps->boost_freq));
|
||||
intel_gpu_freq(rps, rps->idle_freq),
|
||||
intel_gpu_freq(rps, rps->efficient_freq),
|
||||
intel_gpu_freq(rps, rps->boost_freq));
|
||||
|
||||
seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
|
||||
|
||||
|
@ -1794,7 +1794,6 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
int ret;
|
||||
|
||||
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
|
||||
intel_gt_pm_disable(&dev_priv->gt);
|
||||
|
||||
i915_gem_sanitize(dev_priv);
|
||||
|
||||
@ -1925,8 +1924,6 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||
|
||||
intel_display_power_resume_early(dev_priv);
|
||||
|
||||
intel_gt_pm_disable(&dev_priv->gt);
|
||||
|
||||
intel_power_domains_resume(dev_priv);
|
||||
|
||||
intel_gt_sanitize(&dev_priv->gt, true);
|
||||
|
@ -545,94 +545,6 @@ struct i915_suspend_saved_registers {
|
||||
|
||||
struct vlv_s0ix_state;
|
||||
|
||||
struct intel_rps_ei {
|
||||
ktime_t ktime;
|
||||
u32 render_c0;
|
||||
u32 media_c0;
|
||||
};
|
||||
|
||||
struct intel_rps {
|
||||
struct mutex lock; /* protects enabling and the worker */
|
||||
|
||||
/*
|
||||
* work, interrupts_enabled and pm_iir are protected by
|
||||
* dev_priv->irq_lock
|
||||
*/
|
||||
struct work_struct work;
|
||||
bool interrupts_enabled;
|
||||
u32 pm_iir;
|
||||
|
||||
/* PM interrupt bits that should never be masked */
|
||||
u32 pm_intrmsk_mbz;
|
||||
|
||||
/* Frequencies are stored in potentially platform dependent multiples.
|
||||
* In other words, *_freq needs to be multiplied by X to be interesting.
|
||||
* Soft limits are those which are used for the dynamic reclocking done
|
||||
* by the driver (raise frequencies under heavy loads, and lower for
|
||||
* lighter loads). Hard limits are those imposed by the hardware.
|
||||
*
|
||||
* A distinction is made for overclocking, which is never enabled by
|
||||
* default, and is considered to be above the hard limit if it's
|
||||
* possible at all.
|
||||
*/
|
||||
u8 cur_freq; /* Current frequency (cached, may not == HW) */
|
||||
u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */
|
||||
u8 max_freq_softlimit; /* Max frequency permitted by the driver */
|
||||
u8 max_freq; /* Maximum frequency, RP0 if not overclocking */
|
||||
u8 min_freq; /* AKA RPn. Minimum frequency */
|
||||
u8 boost_freq; /* Frequency to request when wait boosting */
|
||||
u8 idle_freq; /* Frequency to request when we are idle */
|
||||
u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */
|
||||
u8 rp1_freq; /* "less than" RP0 power/freqency */
|
||||
u8 rp0_freq; /* Non-overclocked max frequency. */
|
||||
u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */
|
||||
|
||||
int last_adj;
|
||||
|
||||
struct {
|
||||
struct mutex mutex;
|
||||
|
||||
enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
|
||||
unsigned int interactive;
|
||||
|
||||
u8 up_threshold; /* Current %busy required to uplock */
|
||||
u8 down_threshold; /* Current %busy required to downclock */
|
||||
} power;
|
||||
|
||||
bool enabled;
|
||||
atomic_t num_waiters;
|
||||
atomic_t boosts;
|
||||
|
||||
/* manual wa residency calculations */
|
||||
struct intel_rps_ei ei;
|
||||
};
|
||||
|
||||
struct intel_gen6_power_mgmt {
|
||||
struct intel_rps rps;
|
||||
};
|
||||
|
||||
/* defined intel_pm.c */
|
||||
extern spinlock_t mchdev_lock;
|
||||
|
||||
struct intel_ilk_power_mgmt {
|
||||
u8 cur_delay;
|
||||
u8 min_delay;
|
||||
u8 max_delay;
|
||||
u8 fmax;
|
||||
u8 fstart;
|
||||
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
unsigned long chipset_power;
|
||||
u64 last_count2;
|
||||
u64 last_time2;
|
||||
unsigned long gfx_power;
|
||||
u8 corr;
|
||||
|
||||
int c_m;
|
||||
int r_t;
|
||||
};
|
||||
|
||||
#define MAX_L3_SLICES 2
|
||||
struct intel_l3_parity {
|
||||
u32 *remap_info[MAX_L3_SLICES];
|
||||
@ -1069,7 +981,6 @@ struct drm_i915_private {
|
||||
u32 irq_mask;
|
||||
u32 de_irq_mask[I915_MAX_PIPES];
|
||||
};
|
||||
u32 pm_rps_events;
|
||||
u32 pipestat_irq_mask[I915_MAX_PIPES];
|
||||
|
||||
struct i915_hotplug hotplug;
|
||||
@ -1209,13 +1120,6 @@ struct drm_i915_private {
|
||||
*/
|
||||
u32 edram_size_mb;
|
||||
|
||||
/* gen6+ GT PM state */
|
||||
struct intel_gen6_power_mgmt gt_pm;
|
||||
|
||||
/* ilk-only ips/rps state. Everything in here is protected by the global
|
||||
* mchdev_lock in intel_pm.c */
|
||||
struct intel_ilk_power_mgmt ips;
|
||||
|
||||
struct i915_power_domains power_domains;
|
||||
|
||||
struct i915_psr psr;
|
||||
|
@ -52,6 +52,7 @@
|
||||
#include "gt/intel_mocs.h"
|
||||
#include "gt/intel_reset.h"
|
||||
#include "gt/intel_renderstate.h"
|
||||
#include "gt/intel_rps.h"
|
||||
#include "gt/intel_workarounds.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
@ -1269,8 +1270,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
||||
goto err_context;
|
||||
}
|
||||
|
||||
intel_init_gt_powersave(dev_priv);
|
||||
|
||||
intel_uc_init(&dev_priv->gt.uc);
|
||||
|
||||
ret = intel_gt_init_hw(&dev_priv->gt);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include "gt/intel_gt.h"
|
||||
#include "gt/intel_gt_irq.h"
|
||||
#include "gt/intel_gt_pm_irq.h"
|
||||
#include "gt/intel_rps.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_irq.h"
|
||||
@ -327,87 +328,6 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
|
||||
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
|
||||
}
|
||||
|
||||
void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gt *gt = &dev_priv->gt;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
|
||||
while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM))
|
||||
;
|
||||
|
||||
dev_priv->gt_pm.rps.pm_iir = 0;
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
}
|
||||
|
||||
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gt *gt = &dev_priv->gt;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS);
|
||||
dev_priv->gt_pm.rps.pm_iir = 0;
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
}
|
||||
|
||||
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gt *gt = &dev_priv->gt;
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
|
||||
if (READ_ONCE(rps->interrupts_enabled))
|
||||
return;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
WARN_ON_ONCE(rps->pm_iir);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM));
|
||||
else
|
||||
WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
|
||||
|
||||
rps->interrupts_enabled = true;
|
||||
gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events);
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
}
|
||||
|
||||
u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask)
|
||||
{
|
||||
return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz;
|
||||
}
|
||||
|
||||
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_gt *gt = &dev_priv->gt;
|
||||
|
||||
if (!READ_ONCE(rps->interrupts_enabled))
|
||||
return;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
rps->interrupts_enabled = false;
|
||||
|
||||
I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u));
|
||||
|
||||
gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS);
|
||||
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
intel_synchronize_irq(dev_priv);
|
||||
|
||||
/* Now that we will not be generating any more work, flush any
|
||||
* outstanding tasks. As we are called on the RPS idle path,
|
||||
* we will reset the GPU to minimum frequencies, so the current
|
||||
* state of the worker can be discarded.
|
||||
*/
|
||||
cancel_work_sync(&rps->work);
|
||||
if (INTEL_GEN(dev_priv) >= 11)
|
||||
gen11_reset_rps_interrupts(dev_priv);
|
||||
else
|
||||
gen6_reset_rps_interrupts(dev_priv);
|
||||
}
|
||||
|
||||
void gen9_reset_guc_interrupts(struct intel_guc *guc)
|
||||
{
|
||||
struct intel_gt *gt = guc_to_gt(guc);
|
||||
@ -1065,199 +985,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
|
||||
return position;
|
||||
}
|
||||
|
||||
static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_uncore *uncore = &dev_priv->uncore;
|
||||
u32 busy_up, busy_down, max_avg, min_avg;
|
||||
u8 new_delay;
|
||||
|
||||
spin_lock(&mchdev_lock);
|
||||
|
||||
intel_uncore_write16(uncore,
|
||||
MEMINTRSTS,
|
||||
intel_uncore_read(uncore, MEMINTRSTS));
|
||||
|
||||
new_delay = dev_priv->ips.cur_delay;
|
||||
|
||||
intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG);
|
||||
busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG);
|
||||
busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG);
|
||||
max_avg = intel_uncore_read(uncore, RCBMAXAVG);
|
||||
min_avg = intel_uncore_read(uncore, RCBMINAVG);
|
||||
|
||||
/* Handle RCS change request from hw */
|
||||
if (busy_up > max_avg) {
|
||||
if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
|
||||
new_delay = dev_priv->ips.cur_delay - 1;
|
||||
if (new_delay < dev_priv->ips.max_delay)
|
||||
new_delay = dev_priv->ips.max_delay;
|
||||
} else if (busy_down < min_avg) {
|
||||
if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
|
||||
new_delay = dev_priv->ips.cur_delay + 1;
|
||||
if (new_delay > dev_priv->ips.min_delay)
|
||||
new_delay = dev_priv->ips.min_delay;
|
||||
}
|
||||
|
||||
if (ironlake_set_drps(dev_priv, new_delay))
|
||||
dev_priv->ips.cur_delay = new_delay;
|
||||
|
||||
spin_unlock(&mchdev_lock);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void vlv_c0_read(struct drm_i915_private *dev_priv,
|
||||
struct intel_rps_ei *ei)
|
||||
{
|
||||
ei->ktime = ktime_get_raw();
|
||||
ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
|
||||
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
|
||||
}
|
||||
|
||||
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei));
|
||||
}
|
||||
|
||||
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
{
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
const struct intel_rps_ei *prev = &rps->ei;
|
||||
struct intel_rps_ei now;
|
||||
u32 events = 0;
|
||||
|
||||
if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
|
||||
return 0;
|
||||
|
||||
vlv_c0_read(dev_priv, &now);
|
||||
|
||||
if (prev->ktime) {
|
||||
u64 time, c0;
|
||||
u32 render, media;
|
||||
|
||||
time = ktime_us_delta(now.ktime, prev->ktime);
|
||||
|
||||
time *= dev_priv->czclk_freq;
|
||||
|
||||
/* Workload can be split between render + media,
|
||||
* e.g. SwapBuffers being blitted in X after being rendered in
|
||||
* mesa. To account for this we need to combine both engines
|
||||
* into our activity counter.
|
||||
*/
|
||||
render = now.render_c0 - prev->render_c0;
|
||||
media = now.media_c0 - prev->media_c0;
|
||||
c0 = max(render, media);
|
||||
c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
|
||||
|
||||
if (c0 > time * rps->power.up_threshold)
|
||||
events = GEN6_PM_RP_UP_THRESHOLD;
|
||||
else if (c0 < time * rps->power.down_threshold)
|
||||
events = GEN6_PM_RP_DOWN_THRESHOLD;
|
||||
}
|
||||
|
||||
rps->ei = now;
|
||||
return events;
|
||||
}
|
||||
|
||||
static void gen6_pm_rps_work(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, gt_pm.rps.work);
|
||||
struct intel_gt *gt = &dev_priv->gt;
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
bool client_boost = false;
|
||||
int new_delay, adj, min, max;
|
||||
u32 pm_iir = 0;
|
||||
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (rps->interrupts_enabled) {
|
||||
pm_iir = fetch_and_zero(&rps->pm_iir);
|
||||
client_boost = atomic_read(&rps->num_waiters);
|
||||
}
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
|
||||
/* Make sure we didn't queue anything we're not going to process. */
|
||||
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
|
||||
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&rps->lock);
|
||||
|
||||
pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
|
||||
|
||||
adj = rps->last_adj;
|
||||
new_delay = rps->cur_freq;
|
||||
min = rps->min_freq_softlimit;
|
||||
max = rps->max_freq_softlimit;
|
||||
if (client_boost)
|
||||
max = rps->max_freq;
|
||||
if (client_boost && new_delay < rps->boost_freq) {
|
||||
new_delay = rps->boost_freq;
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
|
||||
if (adj > 0)
|
||||
adj *= 2;
|
||||
else /* CHV needs even encode values */
|
||||
adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
|
||||
|
||||
if (new_delay >= rps->max_freq_softlimit)
|
||||
adj = 0;
|
||||
} else if (client_boost) {
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
|
||||
if (rps->cur_freq > rps->efficient_freq)
|
||||
new_delay = rps->efficient_freq;
|
||||
else if (rps->cur_freq > rps->min_freq_softlimit)
|
||||
new_delay = rps->min_freq_softlimit;
|
||||
adj = 0;
|
||||
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
|
||||
if (adj < 0)
|
||||
adj *= 2;
|
||||
else /* CHV needs even encode values */
|
||||
adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
|
||||
|
||||
if (new_delay <= rps->min_freq_softlimit)
|
||||
adj = 0;
|
||||
} else { /* unknown event */
|
||||
adj = 0;
|
||||
}
|
||||
|
||||
rps->last_adj = adj;
|
||||
|
||||
/*
|
||||
* Limit deboosting and boosting to keep ourselves at the extremes
|
||||
* when in the respective power modes (i.e. slowly decrease frequencies
|
||||
* while in the HIGH_POWER zone and slowly increase frequencies while
|
||||
* in the LOW_POWER zone). On idle, we will hit the timeout and drop
|
||||
* to the next level quickly, and conversely if busy we expect to
|
||||
* hit a waitboost and rapidly switch into max power.
|
||||
*/
|
||||
if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
|
||||
(adj > 0 && rps->power.mode == LOW_POWER))
|
||||
rps->last_adj = 0;
|
||||
|
||||
/* sysfs frequency interfaces may have snuck in while servicing the
|
||||
* interrupt
|
||||
*/
|
||||
new_delay += adj;
|
||||
new_delay = clamp_t(int, new_delay, min, max);
|
||||
|
||||
if (intel_set_rps(dev_priv, new_delay)) {
|
||||
DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
|
||||
rps->last_adj = 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&rps->lock);
|
||||
|
||||
out:
|
||||
/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
|
||||
spin_lock_irq(>->irq_lock);
|
||||
if (rps->interrupts_enabled)
|
||||
gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events);
|
||||
spin_unlock_irq(>->irq_lock);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ivybridge_parity_work - Workqueue called when a parity error interrupt
|
||||
* occurred.
|
||||
@ -1631,54 +1358,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
|
||||
res1, res2);
|
||||
}
|
||||
|
||||
/* The RPS events need forcewake, so we add them to a work queue and mask their
|
||||
* IMR bits until the work is done. Other interrupts can be processed without
|
||||
* the work queue. */
|
||||
void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir)
|
||||
{
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_rps *rps = &i915->gt_pm.rps;
|
||||
const u32 events = i915->pm_rps_events & pm_iir;
|
||||
|
||||
lockdep_assert_held(>->irq_lock);
|
||||
|
||||
if (unlikely(!events))
|
||||
return;
|
||||
|
||||
gen6_gt_pm_mask_irq(gt, events);
|
||||
|
||||
if (!rps->interrupts_enabled)
|
||||
return;
|
||||
|
||||
rps->pm_iir |= events;
|
||||
schedule_work(&rps->work);
|
||||
}
|
||||
|
||||
void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
|
||||
{
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_gt *gt = &dev_priv->gt;
|
||||
|
||||
if (pm_iir & dev_priv->pm_rps_events) {
|
||||
spin_lock(>->irq_lock);
|
||||
gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events);
|
||||
if (rps->interrupts_enabled) {
|
||||
rps->pm_iir |= pm_iir & dev_priv->pm_rps_events;
|
||||
schedule_work(&rps->work);
|
||||
}
|
||||
spin_unlock(>->irq_lock);
|
||||
}
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
return;
|
||||
|
||||
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
|
||||
intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
|
||||
|
||||
if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
|
||||
DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
|
||||
}
|
||||
|
||||
static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
enum pipe pipe;
|
||||
@ -1989,7 +1668,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
|
||||
if (gt_iir)
|
||||
gen6_gt_irq_handler(&dev_priv->gt, gt_iir);
|
||||
if (pm_iir)
|
||||
gen6_rps_irq_handler(dev_priv, pm_iir);
|
||||
gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
|
||||
|
||||
if (hotplug_status)
|
||||
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
|
||||
@ -2393,7 +2072,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
|
||||
}
|
||||
|
||||
if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT)
|
||||
ironlake_rps_change_irq_handler(dev_priv);
|
||||
gen5_rps_irq_handler(&dev_priv->gt.rps);
|
||||
}
|
||||
|
||||
static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
|
||||
@ -2498,7 +2177,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
|
||||
if (pm_iir) {
|
||||
I915_WRITE(GEN6_PMIIR, pm_iir);
|
||||
ret = IRQ_HANDLED;
|
||||
gen6_rps_irq_handler(dev_priv, pm_iir);
|
||||
gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4281,13 +3960,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
|
||||
void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
int i;
|
||||
|
||||
intel_hpd_init_work(dev_priv);
|
||||
|
||||
INIT_WORK(&rps->work, gen6_pm_rps_work);
|
||||
|
||||
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
|
||||
for (i = 0; i < MAX_L3_SLICES; ++i)
|
||||
dev_priv->l3_parity.remap_info[i] = NULL;
|
||||
@ -4296,33 +3972,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
|
||||
if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
|
||||
dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
|
||||
|
||||
/* Let's track the enabled rps events */
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
/* WaGsvRC0ResidencyMethod:vlv */
|
||||
dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
else
|
||||
dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_THRESHOLD |
|
||||
GEN6_PM_RP_DOWN_TIMEOUT);
|
||||
|
||||
/* We share the register with other engine */
|
||||
if (INTEL_GEN(dev_priv) > 9)
|
||||
GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
|
||||
|
||||
rps->pm_intrmsk_mbz = 0;
|
||||
|
||||
/*
|
||||
* SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
|
||||
* if GEN6_PM_UP_EI_EXPIRED is masked.
|
||||
*
|
||||
* TODO: verify if this can be reproduced on VLV,CHV.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) <= 7)
|
||||
rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
|
||||
|
||||
dev->vblank_disable_immediate = true;
|
||||
|
||||
/* Most platforms treat the display irq block as an always-on
|
||||
|
@ -22,9 +22,6 @@ struct intel_gt;
|
||||
struct intel_guc;
|
||||
struct intel_uncore;
|
||||
|
||||
void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir);
|
||||
void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
|
||||
|
||||
void intel_irq_init(struct drm_i915_private *dev_priv);
|
||||
void intel_irq_fini(struct drm_i915_private *dev_priv);
|
||||
int intel_irq_install(struct drm_i915_private *dev_priv);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "gt/intel_engine_user.h"
|
||||
#include "gt/intel_gt_pm.h"
|
||||
#include "gt/intel_rc6.h"
|
||||
#include "gt/intel_rps.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_pmu.h"
|
||||
@ -358,25 +359,26 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
|
||||
struct drm_i915_private *i915 = gt->i915;
|
||||
struct intel_uncore *uncore = gt->uncore;
|
||||
struct i915_pmu *pmu = &i915->pmu;
|
||||
struct intel_rps *rps = >->rps;
|
||||
|
||||
if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) {
|
||||
u32 val;
|
||||
|
||||
val = i915->gt_pm.rps.cur_freq;
|
||||
val = rps->cur_freq;
|
||||
if (intel_gt_pm_get_if_awake(gt)) {
|
||||
val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
|
||||
val = intel_get_cagf(i915, val);
|
||||
val = intel_get_cagf(rps, val);
|
||||
intel_gt_pm_put(gt);
|
||||
}
|
||||
|
||||
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
|
||||
intel_gpu_freq(i915, val),
|
||||
intel_gpu_freq(rps, val),
|
||||
period_ns / 1000);
|
||||
}
|
||||
|
||||
if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) {
|
||||
add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
|
||||
intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq),
|
||||
intel_gpu_freq(rps, rps->cur_freq),
|
||||
period_ns / 1000);
|
||||
}
|
||||
}
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "gem/i915_gem_context.h"
|
||||
#include "gt/intel_context.h"
|
||||
#include "gt/intel_ring.h"
|
||||
#include "gt/intel_rps.h"
|
||||
|
||||
#include "i915_active.h"
|
||||
#include "i915_drv.h"
|
||||
@ -258,8 +259,8 @@ bool i915_request_retire(struct i915_request *rq)
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
|
||||
i915_request_cancel_breadcrumb(rq);
|
||||
if (i915_request_has_waitboost(rq)) {
|
||||
GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
|
||||
atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
|
||||
GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters));
|
||||
atomic_dec(&rq->engine->gt->rps.num_waiters);
|
||||
}
|
||||
if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) {
|
||||
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
|
||||
@ -1467,7 +1468,7 @@ long i915_request_wait(struct i915_request *rq,
|
||||
*/
|
||||
if (flags & I915_WAIT_PRIORITY) {
|
||||
if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
|
||||
gen6_rps_boost(rq);
|
||||
intel_rps_boost(rq);
|
||||
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "gt/intel_rc6.h"
|
||||
#include "gt/intel_rps.h"
|
||||
|
||||
#include "i915_drv.h"
|
||||
#include "i915_sysfs.h"
|
||||
@ -259,6 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 freq;
|
||||
|
||||
@ -271,31 +273,31 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
|
||||
|
||||
freq = (freq >> 8) & 0xff;
|
||||
} else {
|
||||
freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1));
|
||||
freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1));
|
||||
}
|
||||
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq));
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq));
|
||||
}
|
||||
|
||||
static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->gt_pm.rps.cur_freq));
|
||||
intel_gpu_freq(rps, rps->cur_freq));
|
||||
}
|
||||
|
||||
static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->gt_pm.rps.boost_freq));
|
||||
intel_gpu_freq(rps, rps->boost_freq));
|
||||
}
|
||||
|
||||
static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
|
||||
@ -303,7 +305,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
bool boost = false;
|
||||
ssize_t ret;
|
||||
u32 val;
|
||||
@ -313,7 +315,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
|
||||
return ret;
|
||||
|
||||
/* Validate against (static) hardware limits */
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
val = intel_freq_opcode(rps, val);
|
||||
if (val < rps->min_freq || val > rps->max_freq)
|
||||
return -EINVAL;
|
||||
|
||||
@ -333,19 +335,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->gt_pm.rps.efficient_freq));
|
||||
intel_gpu_freq(rps, rps->efficient_freq));
|
||||
}
|
||||
|
||||
static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->gt_pm.rps.max_freq_softlimit));
|
||||
intel_gpu_freq(rps, rps->max_freq_softlimit));
|
||||
}
|
||||
|
||||
static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
@ -353,19 +355,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 val;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
ssize_t ret;
|
||||
u32 val;
|
||||
|
||||
ret = kstrtou32(buf, 0, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&rps->lock);
|
||||
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
val = intel_freq_opcode(rps, val);
|
||||
if (val < rps->min_freq ||
|
||||
val > rps->max_freq ||
|
||||
val < rps->min_freq_softlimit) {
|
||||
@ -375,7 +375,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
|
||||
if (val > rps->rp0_freq)
|
||||
DRM_DEBUG("User requested overclocking to %d\n",
|
||||
intel_gpu_freq(dev_priv, val));
|
||||
intel_gpu_freq(rps, val));
|
||||
|
||||
rps->max_freq_softlimit = val;
|
||||
|
||||
@ -383,14 +383,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
|
||||
rps->min_freq_softlimit,
|
||||
rps->max_freq_softlimit);
|
||||
|
||||
/* We still need *_set_rps to process the new max_delay and
|
||||
/*
|
||||
* We still need *_set_rps to process the new max_delay and
|
||||
* update the interrupt limits and PMINTRMSK even though
|
||||
* frequency request may be unchanged. */
|
||||
ret = intel_set_rps(dev_priv, val);
|
||||
* frequency request may be unchanged.
|
||||
*/
|
||||
intel_rps_set(rps, val);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&rps->lock);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
@ -398,10 +399,10 @@ unlock:
|
||||
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
intel_gpu_freq(dev_priv,
|
||||
dev_priv->gt_pm.rps.min_freq_softlimit));
|
||||
intel_gpu_freq(rps, rps->min_freq_softlimit));
|
||||
}
|
||||
|
||||
static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
@ -409,19 +410,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
intel_wakeref_t wakeref;
|
||||
u32 val;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
ssize_t ret;
|
||||
u32 val;
|
||||
|
||||
ret = kstrtou32(buf, 0, &val);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
|
||||
mutex_lock(&rps->lock);
|
||||
|
||||
val = intel_freq_opcode(dev_priv, val);
|
||||
val = intel_freq_opcode(rps, val);
|
||||
if (val < rps->min_freq ||
|
||||
val > rps->max_freq ||
|
||||
val > rps->max_freq_softlimit) {
|
||||
@ -435,14 +434,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
|
||||
rps->min_freq_softlimit,
|
||||
rps->max_freq_softlimit);
|
||||
|
||||
/* We still need *_set_rps to process the new min_delay and
|
||||
/*
|
||||
* We still need *_set_rps to process the new min_delay and
|
||||
* update the interrupt limits and PMINTRMSK even though
|
||||
* frequency request may be unchanged. */
|
||||
ret = intel_set_rps(dev_priv, val);
|
||||
* frequency request may be unchanged.
|
||||
*/
|
||||
intel_rps_set(rps, val);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&rps->lock);
|
||||
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
|
||||
|
||||
return ret ?: count;
|
||||
}
|
||||
@ -464,15 +464,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
|
||||
static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
|
||||
struct intel_rps *rps = &dev_priv->gt_pm.rps;
|
||||
struct intel_rps *rps = &dev_priv->gt.rps;
|
||||
u32 val;
|
||||
|
||||
if (attr == &dev_attr_gt_RP0_freq_mhz)
|
||||
val = intel_gpu_freq(dev_priv, rps->rp0_freq);
|
||||
val = intel_gpu_freq(rps, rps->rp0_freq);
|
||||
else if (attr == &dev_attr_gt_RP1_freq_mhz)
|
||||
val = intel_gpu_freq(dev_priv, rps->rp1_freq);
|
||||
val = intel_gpu_freq(rps, rps->rp1_freq);
|
||||
else if (attr == &dev_attr_gt_RPn_freq_mhz)
|
||||
val = intel_gpu_freq(dev_priv, rps->min_freq);
|
||||
val = intel_gpu_freq(rps, rps->min_freq);
|
||||
else
|
||||
BUG();
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -29,15 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc);
|
||||
void intel_init_pm(struct drm_i915_private *dev_priv);
|
||||
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
|
||||
void intel_pm_setup(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
|
||||
void intel_gpu_ips_teardown(void);
|
||||
void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_busy(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_idle(struct drm_i915_private *dev_priv);
|
||||
void gen6_rps_boost(struct i915_request *rq);
|
||||
void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
|
||||
@ -67,19 +58,6 @@ bool ilk_disable_lp_wm(struct drm_device *dev);
|
||||
void intel_init_ipc(struct drm_i915_private *dev_priv);
|
||||
void intel_enable_ipc(struct drm_i915_private *dev_priv);
|
||||
|
||||
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
|
||||
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
|
||||
|
||||
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);
|
||||
|
||||
unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
|
||||
unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
|
||||
unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
|
||||
void i915_update_gfx_val(struct drm_i915_private *dev_priv);
|
||||
|
||||
bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
|
||||
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
|
||||
void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive);
|
||||
bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable);
|
||||
|
||||
#endif /* __INTEL_PM_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user