mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 12:04:46 +08:00
aed5ed5959
There is a chance if a frequent switch of the governor done in a loop result in timer list corruption where timer cancel being done from two place one from cancel_delayed_work_sync() and followed by expire_timers() can be seen from the traces[1]. while true do echo "simple_ondemand" > /sys/class/devfreq/1d84000.ufshc/governor echo "performance" > /sys/class/devfreq/1d84000.ufshc/governor done It looks to be issue with devfreq driver where device_monitor_[start/stop] need to synchronized so that delayed work should get corrupted while it is either being queued or running or being cancelled. Let's use polling flag and devfreq lock to synchronize the queueing the timer instance twice and work data being corrupted. [1] ... .. <idle>-0 [003] 9436.209662: timer_cancel timer=0xffffff80444f0428 <idle>-0 [003] 9436.209664: timer_expire_entry timer=0xffffff80444f0428 now=0x10022da1c function=__typeid__ZTSFvP10timer_listE_global_addr baseclk=0x10022da1c <idle>-0 [003] 9436.209718: timer_expire_exit timer=0xffffff80444f0428 kworker/u16:6-14217 [003] 9436.209863: timer_start timer=0xffffff80444f0428 function=__typeid__ZTSFvP10timer_listE_global_addr expires=0x10022da2b now=0x10022da1c flags=182452227 vendor.xxxyyy.ha-1593 [004] 9436.209888: timer_cancel timer=0xffffff80444f0428 vendor.xxxyyy.ha-1593 [004] 9436.216390: timer_init timer=0xffffff80444f0428 vendor.xxxyyy.ha-1593 [004] 9436.216392: timer_start timer=0xffffff80444f0428 function=__typeid__ZTSFvP10timer_listE_global_addr expires=0x10022da2c now=0x10022da1d flags=186646532 vendor.xxxyyy.ha-1593 [005] 9436.220992: timer_cancel timer=0xffffff80444f0428 xxxyyyTraceManag-7795 [004] 9436.261641: timer_cancel timer=0xffffff80444f0428 [2] 9436.261653][ C4] Unable to handle kernel paging request at virtual address dead00000000012a [ 9436.261664][ C4] Mem abort info: [ 9436.261666][ C4] ESR = 0x96000044 [ 9436.261669][ C4] EC = 0x25: DABT (current EL), IL = 32 bits [ 9436.261671][ C4] SET = 0, FnV = 0 [ 9436.261673][ C4] EA = 0, S1PTW = 0 [ 9436.261675][ C4] Data abort info: [ 9436.261677][ C4] ISV = 0, ISS = 0x00000044 [ 9436.261680][ C4] CM = 0, WnR = 1 [ 9436.261682][ C4] [dead00000000012a] address between user and kernel address ranges [ 9436.261685][ C4] Internal error: Oops: 96000044 [#1] PREEMPT SMP [ 9436.261701][ C4] Skip md ftrace buffer dump for: 0x3a982d0 ... [ 9436.262138][ C4] CPU: 4 PID: 7795 Comm: TraceManag Tainted: G S W O 5.10.149-android12-9-o-g17f915d29d0c #1 [ 9436.262141][ C4] Hardware name: Qualcomm Technologies, Inc. (DT) [ 9436.262144][ C4] pstate: 22400085 (nzCv daIf +PAN -UAO +TCO BTYPE=--) [ 9436.262161][ C4] pc : expire_timers+0x9c/0x438 [ 9436.262164][ C4] lr : expire_timers+0x2a4/0x438 [ 9436.262168][ C4] sp : ffffffc010023dd0 [ 9436.262171][ C4] x29: ffffffc010023df0 x28: ffffffd0636fdc18 [ 9436.262178][ C4] x27: ffffffd063569dd0 x26: ffffffd063536008 [ 9436.262182][ C4] x25: 0000000000000001 x24: ffffff88f7c69280 [ 9436.262185][ C4] x23: 00000000000000e0 x22: dead000000000122 [ 9436.262188][ C4] x21: 000000010022da29 x20: ffffff8af72b4e80 [ 9436.262191][ C4] x19: ffffffc010023e50 x18: ffffffc010025038 [ 9436.262195][ C4] x17: 0000000000000240 x16: 0000000000000201 [ 9436.262199][ C4] x15: ffffffffffffffff x14: ffffff889f3c3100 [ 9436.262203][ C4] x13: ffffff889f3c3100 x12: 00000000049f56b8 [ 9436.262207][ C4] x11: 00000000049f56b8 x10: 00000000ffffffff [ 9436.262212][ C4] x9 : ffffffc010023e50 x8 : dead000000000122 [ 9436.262216][ C4] x7 : ffffffffffffffff x6 : ffffffc0100239d8 [ 9436.262220][ C4] x5 : 0000000000000000 x4 : 0000000000000101 [ 9436.262223][ C4] x3 : 0000000000000080 x2 : ffffff889edc155c [ 9436.262227][ C4] x1 : ffffff8001005200 x0 : ffffff80444f0428 [ 9436.262232][ C4] Call trace: [ 9436.262236][ C4] expire_timers+0x9c/0x438 [ 9436.262240][ C4] __run_timers+0x1f0/0x330 [ 9436.262245][ C4] run_timer_softirq+0x28/0x58 [ 9436.262255][ C4] efi_header_end+0x168/0x5ec [ 9436.262265][ C4] __irq_exit_rcu+0x108/0x124 [ 9436.262274][ C4] __handle_domain_irq+0x118/0x1e4 [ 9436.262282][ C4] gic_handle_irq.30369+0x6c/0x2bc [ 9436.262286][ C4] el0_irq_naked+0x60/0x6c Link: https://lore.kernel.org/all/1700860318-4025-1-git-send-email-quic_mojha@quicinc.com/ Reported-by: Joyyoung Huang <huangzaiyang@oppo.com> Acked-by: MyungJoo Ham <myungjoo.ham@samsung.com> Signed-off-by: Mukesh Ojha <quic_mojha@quicinc.com> Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
2294 lines
58 KiB
C
2294 lines
58 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
|
|
* for Non-CPU Devices.
|
|
*
|
|
* Copyright (C) 2011 Samsung Electronics
|
|
* MyungJoo Ham <myungjoo.ham@samsung.com>
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/kmod.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/devfreq_cooling.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/err.h>
|
|
#include <linux/init.h>
|
|
#include <linux/export.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/stat.h>
|
|
#include <linux/pm_opp.h>
|
|
#include <linux/devfreq.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/list.h>
|
|
#include <linux/printk.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/of.h>
|
|
#include <linux/pm_qos.h>
|
|
#include <linux/units.h>
|
|
#include "governor.h"
|
|
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/devfreq.h>
|
|
|
|
#define IS_SUPPORTED_FLAG(f, name) ((f & DEVFREQ_GOV_FLAG_##name) ? true : false)
|
|
#define IS_SUPPORTED_ATTR(f, name) ((f & DEVFREQ_GOV_ATTR_##name) ? true : false)
|
|
|
|
static struct class *devfreq_class;
|
|
static struct dentry *devfreq_debugfs;
|
|
|
|
/*
|
|
* devfreq core provides delayed work based load monitoring helper
|
|
* functions. Governors can use these or can implement their own
|
|
* monitoring mechanism.
|
|
*/
|
|
static struct workqueue_struct *devfreq_wq;
|
|
|
|
/* The list of all device-devfreq governors */
|
|
static LIST_HEAD(devfreq_governor_list);
|
|
/* The list of all device-devfreq */
|
|
static LIST_HEAD(devfreq_list);
|
|
static DEFINE_MUTEX(devfreq_list_lock);
|
|
|
|
static const char timer_name[][DEVFREQ_NAME_LEN] = {
|
|
[DEVFREQ_TIMER_DEFERRABLE] = { "deferrable" },
|
|
[DEVFREQ_TIMER_DELAYED] = { "delayed" },
|
|
};
|
|
|
|
/**
|
|
* find_device_devfreq() - find devfreq struct using device pointer
|
|
* @dev: device pointer used to lookup device devfreq.
|
|
*
|
|
* Search the list of device devfreqs and return the matched device's
|
|
* devfreq info. devfreq_list_lock should be held by the caller.
|
|
*/
|
|
static struct devfreq *find_device_devfreq(struct device *dev)
|
|
{
|
|
struct devfreq *tmp_devfreq;
|
|
|
|
lockdep_assert_held(&devfreq_list_lock);
|
|
|
|
if (IS_ERR_OR_NULL(dev)) {
|
|
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
|
|
if (tmp_devfreq->dev.parent == dev)
|
|
return tmp_devfreq;
|
|
}
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
|
|
static unsigned long find_available_min_freq(struct devfreq *devfreq)
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
unsigned long min_freq = 0;
|
|
|
|
opp = dev_pm_opp_find_freq_ceil_indexed(devfreq->dev.parent, &min_freq, 0);
|
|
if (IS_ERR(opp))
|
|
min_freq = 0;
|
|
else
|
|
dev_pm_opp_put(opp);
|
|
|
|
return min_freq;
|
|
}
|
|
|
|
static unsigned long find_available_max_freq(struct devfreq *devfreq)
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
unsigned long max_freq = ULONG_MAX;
|
|
|
|
opp = dev_pm_opp_find_freq_floor_indexed(devfreq->dev.parent, &max_freq, 0);
|
|
if (IS_ERR(opp))
|
|
max_freq = 0;
|
|
else
|
|
dev_pm_opp_put(opp);
|
|
|
|
return max_freq;
|
|
}
|
|
|
|
/**
|
|
* devfreq_get_freq_range() - Get the current freq range
|
|
* @devfreq: the devfreq instance
|
|
* @min_freq: the min frequency
|
|
* @max_freq: the max frequency
|
|
*
|
|
* This takes into consideration all constraints.
|
|
*/
|
|
void devfreq_get_freq_range(struct devfreq *devfreq,
|
|
unsigned long *min_freq,
|
|
unsigned long *max_freq)
|
|
{
|
|
unsigned long *freq_table = devfreq->freq_table;
|
|
s32 qos_min_freq, qos_max_freq;
|
|
|
|
lockdep_assert_held(&devfreq->lock);
|
|
|
|
/*
|
|
* Initialize minimum/maximum frequency from freq table.
|
|
* The devfreq drivers can initialize this in either ascending or
|
|
* descending order and devfreq core supports both.
|
|
*/
|
|
if (freq_table[0] < freq_table[devfreq->max_state - 1]) {
|
|
*min_freq = freq_table[0];
|
|
*max_freq = freq_table[devfreq->max_state - 1];
|
|
} else {
|
|
*min_freq = freq_table[devfreq->max_state - 1];
|
|
*max_freq = freq_table[0];
|
|
}
|
|
|
|
/* Apply constraints from PM QoS */
|
|
qos_min_freq = dev_pm_qos_read_value(devfreq->dev.parent,
|
|
DEV_PM_QOS_MIN_FREQUENCY);
|
|
qos_max_freq = dev_pm_qos_read_value(devfreq->dev.parent,
|
|
DEV_PM_QOS_MAX_FREQUENCY);
|
|
*min_freq = max(*min_freq, (unsigned long)HZ_PER_KHZ * qos_min_freq);
|
|
if (qos_max_freq != PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE)
|
|
*max_freq = min(*max_freq,
|
|
(unsigned long)HZ_PER_KHZ * qos_max_freq);
|
|
|
|
/* Apply constraints from OPP interface */
|
|
*min_freq = max(*min_freq, devfreq->scaling_min_freq);
|
|
*max_freq = min(*max_freq, devfreq->scaling_max_freq);
|
|
|
|
if (*min_freq > *max_freq)
|
|
*min_freq = *max_freq;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_get_freq_range);
|
|
|
|
/**
|
|
* devfreq_get_freq_level() - Lookup freq_table for the frequency
|
|
* @devfreq: the devfreq instance
|
|
* @freq: the target frequency
|
|
*/
|
|
static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
|
|
{
|
|
int lev;
|
|
|
|
for (lev = 0; lev < devfreq->max_state; lev++)
|
|
if (freq == devfreq->freq_table[lev])
|
|
return lev;
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
static int set_freq_table(struct devfreq *devfreq)
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
unsigned long freq;
|
|
int i, count;
|
|
|
|
/* Initialize the freq_table from OPP table */
|
|
count = dev_pm_opp_get_opp_count(devfreq->dev.parent);
|
|
if (count <= 0)
|
|
return -EINVAL;
|
|
|
|
devfreq->max_state = count;
|
|
devfreq->freq_table = devm_kcalloc(devfreq->dev.parent,
|
|
devfreq->max_state,
|
|
sizeof(*devfreq->freq_table),
|
|
GFP_KERNEL);
|
|
if (!devfreq->freq_table)
|
|
return -ENOMEM;
|
|
|
|
for (i = 0, freq = 0; i < devfreq->max_state; i++, freq++) {
|
|
opp = dev_pm_opp_find_freq_ceil_indexed(devfreq->dev.parent, &freq, 0);
|
|
if (IS_ERR(opp)) {
|
|
devm_kfree(devfreq->dev.parent, devfreq->freq_table);
|
|
return PTR_ERR(opp);
|
|
}
|
|
dev_pm_opp_put(opp);
|
|
devfreq->freq_table[i] = freq;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/**
|
|
* devfreq_update_status() - Update statistics of devfreq behavior
|
|
* @devfreq: the devfreq instance
|
|
* @freq: the update target frequency
|
|
*/
|
|
int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
|
|
{
|
|
int lev, prev_lev, ret = 0;
|
|
u64 cur_time;
|
|
|
|
lockdep_assert_held(&devfreq->lock);
|
|
cur_time = get_jiffies_64();
|
|
|
|
/* Immediately exit if previous_freq is not initialized yet. */
|
|
if (!devfreq->previous_freq)
|
|
goto out;
|
|
|
|
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
|
|
if (prev_lev < 0) {
|
|
ret = prev_lev;
|
|
goto out;
|
|
}
|
|
|
|
devfreq->stats.time_in_state[prev_lev] +=
|
|
cur_time - devfreq->stats.last_update;
|
|
|
|
lev = devfreq_get_freq_level(devfreq, freq);
|
|
if (lev < 0) {
|
|
ret = lev;
|
|
goto out;
|
|
}
|
|
|
|
if (lev != prev_lev) {
|
|
devfreq->stats.trans_table[
|
|
(prev_lev * devfreq->max_state) + lev]++;
|
|
devfreq->stats.total_trans++;
|
|
}
|
|
|
|
out:
|
|
devfreq->stats.last_update = cur_time;
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_update_status);
|
|
|
|
/**
|
|
* find_devfreq_governor() - find devfreq governor from name
|
|
* @name: name of the governor
|
|
*
|
|
* Search the list of devfreq governors and return the matched
|
|
* governor's pointer. devfreq_list_lock should be held by the caller.
|
|
*/
|
|
static struct devfreq_governor *find_devfreq_governor(const char *name)
|
|
{
|
|
struct devfreq_governor *tmp_governor;
|
|
|
|
lockdep_assert_held(&devfreq_list_lock);
|
|
|
|
if (IS_ERR_OR_NULL(name)) {
|
|
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
|
|
if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
|
|
return tmp_governor;
|
|
}
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
|
|
/**
|
|
* try_then_request_governor() - Try to find the governor and request the
|
|
* module if is not found.
|
|
* @name: name of the governor
|
|
*
|
|
* Search the list of devfreq governors and request the module and try again
|
|
* if is not found. This can happen when both drivers (the governor driver
|
|
* and the driver that call devfreq_add_device) are built as modules.
|
|
* devfreq_list_lock should be held by the caller. Returns the matched
|
|
* governor's pointer or an error pointer.
|
|
*/
|
|
static struct devfreq_governor *try_then_request_governor(const char *name)
|
|
{
|
|
struct devfreq_governor *governor;
|
|
int err = 0;
|
|
|
|
lockdep_assert_held(&devfreq_list_lock);
|
|
|
|
if (IS_ERR_OR_NULL(name)) {
|
|
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
governor = find_devfreq_governor(name);
|
|
if (IS_ERR(governor)) {
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
if (!strncmp(name, DEVFREQ_GOV_SIMPLE_ONDEMAND,
|
|
DEVFREQ_NAME_LEN))
|
|
err = request_module("governor_%s", "simpleondemand");
|
|
else
|
|
err = request_module("governor_%s", name);
|
|
/* Restore previous state before return */
|
|
mutex_lock(&devfreq_list_lock);
|
|
if (err)
|
|
return (err < 0) ? ERR_PTR(err) : ERR_PTR(-EINVAL);
|
|
|
|
governor = find_devfreq_governor(name);
|
|
}
|
|
|
|
return governor;
|
|
}
|
|
|
|
static int devfreq_notify_transition(struct devfreq *devfreq,
|
|
struct devfreq_freqs *freqs, unsigned int state)
|
|
{
|
|
if (!devfreq)
|
|
return -EINVAL;
|
|
|
|
switch (state) {
|
|
case DEVFREQ_PRECHANGE:
|
|
srcu_notifier_call_chain(&devfreq->transition_notifier_list,
|
|
DEVFREQ_PRECHANGE, freqs);
|
|
break;
|
|
|
|
case DEVFREQ_POSTCHANGE:
|
|
srcu_notifier_call_chain(&devfreq->transition_notifier_list,
|
|
DEVFREQ_POSTCHANGE, freqs);
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
|
|
u32 flags)
|
|
{
|
|
struct devfreq_freqs freqs;
|
|
unsigned long cur_freq;
|
|
int err = 0;
|
|
|
|
if (devfreq->profile->get_cur_freq)
|
|
devfreq->profile->get_cur_freq(devfreq->dev.parent, &cur_freq);
|
|
else
|
|
cur_freq = devfreq->previous_freq;
|
|
|
|
freqs.old = cur_freq;
|
|
freqs.new = new_freq;
|
|
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
|
|
|
|
err = devfreq->profile->target(devfreq->dev.parent, &new_freq, flags);
|
|
if (err) {
|
|
freqs.new = cur_freq;
|
|
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
|
|
return err;
|
|
}
|
|
|
|
/*
|
|
* Print devfreq_frequency trace information between DEVFREQ_PRECHANGE
|
|
* and DEVFREQ_POSTCHANGE because for showing the correct frequency
|
|
* change order of between devfreq device and passive devfreq device.
|
|
*/
|
|
if (trace_devfreq_frequency_enabled() && new_freq != cur_freq)
|
|
trace_devfreq_frequency(devfreq, new_freq, cur_freq);
|
|
|
|
freqs.new = new_freq;
|
|
devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
|
|
|
|
if (devfreq_update_status(devfreq, new_freq))
|
|
dev_warn(&devfreq->dev,
|
|
"Couldn't update frequency transition information.\n");
|
|
|
|
devfreq->previous_freq = new_freq;
|
|
|
|
if (devfreq->suspend_freq)
|
|
devfreq->resume_freq = new_freq;
|
|
|
|
return err;
|
|
}
|
|
|
|
/**
|
|
* devfreq_update_target() - Reevaluate the device and configure frequency
|
|
* on the final stage.
|
|
* @devfreq: the devfreq instance.
|
|
* @freq: the new frequency of parent device. This argument
|
|
* is only used for devfreq device using passive governor.
|
|
*
|
|
* Note: Lock devfreq->lock before calling devfreq_update_target. This function
|
|
* should be only used by both update_devfreq() and devfreq governors.
|
|
*/
|
|
int devfreq_update_target(struct devfreq *devfreq, unsigned long freq)
|
|
{
|
|
unsigned long min_freq, max_freq;
|
|
int err = 0;
|
|
u32 flags = 0;
|
|
|
|
lockdep_assert_held(&devfreq->lock);
|
|
|
|
if (!devfreq->governor)
|
|
return -EINVAL;
|
|
|
|
/* Reevaluate the proper frequency */
|
|
err = devfreq->governor->get_target_freq(devfreq, &freq);
|
|
if (err)
|
|
return err;
|
|
devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
|
|
|
|
if (freq < min_freq) {
|
|
freq = min_freq;
|
|
flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
|
|
}
|
|
if (freq > max_freq) {
|
|
freq = max_freq;
|
|
flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
|
|
}
|
|
|
|
return devfreq_set_target(devfreq, freq, flags);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_update_target);
|
|
|
|
/* Load monitoring helper functions for governors use */
|
|
|
|
/**
|
|
* update_devfreq() - Reevaluate the device and configure frequency.
|
|
* @devfreq: the devfreq instance.
|
|
*
|
|
* Note: Lock devfreq->lock before calling update_devfreq
|
|
* This function is exported for governors.
|
|
*/
|
|
int update_devfreq(struct devfreq *devfreq)
|
|
{
|
|
return devfreq_update_target(devfreq, 0L);
|
|
}
|
|
EXPORT_SYMBOL(update_devfreq);
|
|
|
|
/**
|
|
* devfreq_monitor() - Periodically poll devfreq objects.
|
|
* @work: the work struct used to run devfreq_monitor periodically.
|
|
*
|
|
*/
|
|
static void devfreq_monitor(struct work_struct *work)
|
|
{
|
|
int err;
|
|
struct devfreq *devfreq = container_of(work,
|
|
struct devfreq, work.work);
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
err = update_devfreq(devfreq);
|
|
if (err)
|
|
dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
|
|
|
|
if (devfreq->stop_polling)
|
|
goto out;
|
|
|
|
queue_delayed_work(devfreq_wq, &devfreq->work,
|
|
msecs_to_jiffies(devfreq->profile->polling_ms));
|
|
|
|
out:
|
|
mutex_unlock(&devfreq->lock);
|
|
trace_devfreq_monitor(devfreq);
|
|
}
|
|
|
|
/**
|
|
* devfreq_monitor_start() - Start load monitoring of devfreq instance
|
|
* @devfreq: the devfreq instance.
|
|
*
|
|
* Helper function for starting devfreq device load monitoring. By default,
|
|
* deferrable timer is used for load monitoring. But the users can change this
|
|
* behavior using the "timer" type in devfreq_dev_profile. This function will be
|
|
* called by devfreq governor in response to the DEVFREQ_GOV_START event
|
|
* generated while adding a device to the devfreq framework.
|
|
*/
|
|
void devfreq_monitor_start(struct devfreq *devfreq)
|
|
{
|
|
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
|
|
return;
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
if (delayed_work_pending(&devfreq->work))
|
|
goto out;
|
|
|
|
switch (devfreq->profile->timer) {
|
|
case DEVFREQ_TIMER_DEFERRABLE:
|
|
INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
|
|
break;
|
|
case DEVFREQ_TIMER_DELAYED:
|
|
INIT_DELAYED_WORK(&devfreq->work, devfreq_monitor);
|
|
break;
|
|
default:
|
|
goto out;
|
|
}
|
|
|
|
if (devfreq->profile->polling_ms)
|
|
queue_delayed_work(devfreq_wq, &devfreq->work,
|
|
msecs_to_jiffies(devfreq->profile->polling_ms));
|
|
|
|
out:
|
|
devfreq->stop_polling = false;
|
|
mutex_unlock(&devfreq->lock);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_monitor_start);
|
|
|
|
/**
|
|
* devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
|
|
* @devfreq: the devfreq instance.
|
|
*
|
|
* Helper function to stop devfreq device load monitoring. Function
|
|
* to be called from governor in response to DEVFREQ_GOV_STOP
|
|
* event when device is removed from devfreq framework.
|
|
*/
|
|
void devfreq_monitor_stop(struct devfreq *devfreq)
|
|
{
|
|
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
|
|
return;
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
if (devfreq->stop_polling) {
|
|
mutex_unlock(&devfreq->lock);
|
|
return;
|
|
}
|
|
|
|
devfreq->stop_polling = true;
|
|
mutex_unlock(&devfreq->lock);
|
|
cancel_delayed_work_sync(&devfreq->work);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_monitor_stop);
|
|
|
|
/**
|
|
* devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
|
|
* @devfreq: the devfreq instance.
|
|
*
|
|
* Helper function to suspend devfreq device load monitoring. Function
|
|
* to be called from governor in response to DEVFREQ_GOV_SUSPEND
|
|
* event or when polling interval is set to zero.
|
|
*
|
|
* Note: Though this function is same as devfreq_monitor_stop(),
|
|
* intentionally kept separate to provide hooks for collecting
|
|
* transition statistics.
|
|
*/
|
|
void devfreq_monitor_suspend(struct devfreq *devfreq)
|
|
{
|
|
mutex_lock(&devfreq->lock);
|
|
if (devfreq->stop_polling) {
|
|
mutex_unlock(&devfreq->lock);
|
|
return;
|
|
}
|
|
|
|
devfreq_update_status(devfreq, devfreq->previous_freq);
|
|
devfreq->stop_polling = true;
|
|
mutex_unlock(&devfreq->lock);
|
|
|
|
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
|
|
return;
|
|
|
|
cancel_delayed_work_sync(&devfreq->work);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_monitor_suspend);
|
|
|
|
/**
|
|
* devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
|
|
* @devfreq: the devfreq instance.
|
|
*
|
|
* Helper function to resume devfreq device load monitoring. Function
|
|
* to be called from governor in response to DEVFREQ_GOV_RESUME
|
|
* event or when polling interval is set to non-zero.
|
|
*/
|
|
void devfreq_monitor_resume(struct devfreq *devfreq)
|
|
{
|
|
unsigned long freq;
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
|
|
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
|
|
goto out_update;
|
|
|
|
if (!devfreq->stop_polling)
|
|
goto out;
|
|
|
|
if (!delayed_work_pending(&devfreq->work) &&
|
|
devfreq->profile->polling_ms)
|
|
queue_delayed_work(devfreq_wq, &devfreq->work,
|
|
msecs_to_jiffies(devfreq->profile->polling_ms));
|
|
|
|
out_update:
|
|
devfreq->stats.last_update = get_jiffies_64();
|
|
devfreq->stop_polling = false;
|
|
|
|
if (devfreq->profile->get_cur_freq &&
|
|
!devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
|
|
devfreq->previous_freq = freq;
|
|
|
|
out:
|
|
mutex_unlock(&devfreq->lock);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_monitor_resume);
|
|
|
|
/**
|
|
* devfreq_update_interval() - Update device devfreq monitoring interval
|
|
* @devfreq: the devfreq instance.
|
|
* @delay: new polling interval to be set.
|
|
*
|
|
* Helper function to set new load monitoring polling interval. Function
|
|
* to be called from governor in response to DEVFREQ_GOV_UPDATE_INTERVAL event.
|
|
*/
|
|
void devfreq_update_interval(struct devfreq *devfreq, unsigned int *delay)
|
|
{
|
|
unsigned int cur_delay = devfreq->profile->polling_ms;
|
|
unsigned int new_delay = *delay;
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
devfreq->profile->polling_ms = new_delay;
|
|
|
|
if (IS_SUPPORTED_FLAG(devfreq->governor->flags, IRQ_DRIVEN))
|
|
goto out;
|
|
|
|
if (devfreq->stop_polling)
|
|
goto out;
|
|
|
|
/* if new delay is zero, stop polling */
|
|
if (!new_delay) {
|
|
mutex_unlock(&devfreq->lock);
|
|
cancel_delayed_work_sync(&devfreq->work);
|
|
return;
|
|
}
|
|
|
|
/* if current delay is zero, start polling with new delay */
|
|
if (!cur_delay) {
|
|
queue_delayed_work(devfreq_wq, &devfreq->work,
|
|
msecs_to_jiffies(devfreq->profile->polling_ms));
|
|
goto out;
|
|
}
|
|
|
|
/* if current delay is greater than new delay, restart polling */
|
|
if (cur_delay > new_delay) {
|
|
mutex_unlock(&devfreq->lock);
|
|
cancel_delayed_work_sync(&devfreq->work);
|
|
mutex_lock(&devfreq->lock);
|
|
if (!devfreq->stop_polling)
|
|
queue_delayed_work(devfreq_wq, &devfreq->work,
|
|
msecs_to_jiffies(devfreq->profile->polling_ms));
|
|
}
|
|
out:
|
|
mutex_unlock(&devfreq->lock);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_update_interval);
|
|
|
|
/**
|
|
* devfreq_notifier_call() - Notify that the device frequency requirements
|
|
* has been changed out of devfreq framework.
|
|
* @nb: the notifier_block (supposed to be devfreq->nb)
|
|
* @type: not used
|
|
* @devp: not used
|
|
*
|
|
* Called by a notifier that uses devfreq->nb.
|
|
*/
|
|
static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
|
|
void *devp)
|
|
{
|
|
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
|
|
int err = -EINVAL;
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
|
|
devfreq->scaling_min_freq = find_available_min_freq(devfreq);
|
|
if (!devfreq->scaling_min_freq)
|
|
goto out;
|
|
|
|
devfreq->scaling_max_freq = find_available_max_freq(devfreq);
|
|
if (!devfreq->scaling_max_freq) {
|
|
devfreq->scaling_max_freq = ULONG_MAX;
|
|
goto out;
|
|
}
|
|
|
|
err = update_devfreq(devfreq);
|
|
|
|
out:
|
|
mutex_unlock(&devfreq->lock);
|
|
if (err)
|
|
dev_err(devfreq->dev.parent,
|
|
"failed to update frequency from OPP notifier (%d)\n",
|
|
err);
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
/**
|
|
* qos_notifier_call() - Common handler for QoS constraints.
|
|
* @devfreq: the devfreq instance.
|
|
*/
|
|
static int qos_notifier_call(struct devfreq *devfreq)
|
|
{
|
|
int err;
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
err = update_devfreq(devfreq);
|
|
mutex_unlock(&devfreq->lock);
|
|
if (err)
|
|
dev_err(devfreq->dev.parent,
|
|
"failed to update frequency from PM QoS (%d)\n",
|
|
err);
|
|
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
/**
|
|
* qos_min_notifier_call() - Callback for QoS min_freq changes.
|
|
* @nb: Should be devfreq->nb_min
|
|
* @val: not used
|
|
* @ptr: not used
|
|
*/
|
|
static int qos_min_notifier_call(struct notifier_block *nb,
|
|
unsigned long val, void *ptr)
|
|
{
|
|
return qos_notifier_call(container_of(nb, struct devfreq, nb_min));
|
|
}
|
|
|
|
/**
|
|
* qos_max_notifier_call() - Callback for QoS max_freq changes.
|
|
* @nb: Should be devfreq->nb_max
|
|
* @val: not used
|
|
* @ptr: not used
|
|
*/
|
|
static int qos_max_notifier_call(struct notifier_block *nb,
|
|
unsigned long val, void *ptr)
|
|
{
|
|
return qos_notifier_call(container_of(nb, struct devfreq, nb_max));
|
|
}
|
|
|
|
/**
|
|
* devfreq_dev_release() - Callback for struct device to release the device.
|
|
* @dev: the devfreq device
|
|
*
|
|
* Remove devfreq from the list and release its resources.
|
|
*/
|
|
static void devfreq_dev_release(struct device *dev)
|
|
{
|
|
struct devfreq *devfreq = to_devfreq(dev);
|
|
int err;
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
list_del(&devfreq->node);
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_max,
|
|
DEV_PM_QOS_MAX_FREQUENCY);
|
|
if (err && err != -ENOENT)
|
|
dev_warn(dev->parent,
|
|
"Failed to remove max_freq notifier: %d\n", err);
|
|
err = dev_pm_qos_remove_notifier(devfreq->dev.parent, &devfreq->nb_min,
|
|
DEV_PM_QOS_MIN_FREQUENCY);
|
|
if (err && err != -ENOENT)
|
|
dev_warn(dev->parent,
|
|
"Failed to remove min_freq notifier: %d\n", err);
|
|
|
|
if (dev_pm_qos_request_active(&devfreq->user_max_freq_req)) {
|
|
err = dev_pm_qos_remove_request(&devfreq->user_max_freq_req);
|
|
if (err < 0)
|
|
dev_warn(dev->parent,
|
|
"Failed to remove max_freq request: %d\n", err);
|
|
}
|
|
if (dev_pm_qos_request_active(&devfreq->user_min_freq_req)) {
|
|
err = dev_pm_qos_remove_request(&devfreq->user_min_freq_req);
|
|
if (err < 0)
|
|
dev_warn(dev->parent,
|
|
"Failed to remove min_freq request: %d\n", err);
|
|
}
|
|
|
|
if (devfreq->profile->exit)
|
|
devfreq->profile->exit(devfreq->dev.parent);
|
|
|
|
if (devfreq->opp_table)
|
|
dev_pm_opp_put_opp_table(devfreq->opp_table);
|
|
|
|
mutex_destroy(&devfreq->lock);
|
|
srcu_cleanup_notifier_head(&devfreq->transition_notifier_list);
|
|
kfree(devfreq);
|
|
}
|
|
|
|
static void create_sysfs_files(struct devfreq *devfreq,
|
|
const struct devfreq_governor *gov);
|
|
static void remove_sysfs_files(struct devfreq *devfreq,
|
|
const struct devfreq_governor *gov);
|
|
|
|
/**
|
|
* devfreq_add_device() - Add devfreq feature to the device
|
|
* @dev: the device to add devfreq feature.
|
|
* @profile: device-specific profile to run devfreq.
|
|
* @governor_name: name of the policy to choose frequency.
|
|
* @data: devfreq driver pass to governors, governor should not change it.
|
|
*/
|
|
struct devfreq *devfreq_add_device(struct device *dev,
|
|
struct devfreq_dev_profile *profile,
|
|
const char *governor_name,
|
|
void *data)
|
|
{
|
|
struct devfreq *devfreq;
|
|
struct devfreq_governor *governor;
|
|
unsigned long min_freq, max_freq;
|
|
int err = 0;
|
|
|
|
if (!dev || !profile || !governor_name) {
|
|
dev_err(dev, "%s: Invalid parameters.\n", __func__);
|
|
return ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
devfreq = find_device_devfreq(dev);
|
|
mutex_unlock(&devfreq_list_lock);
|
|
if (!IS_ERR(devfreq)) {
|
|
dev_err(dev, "%s: devfreq device already exists!\n",
|
|
__func__);
|
|
err = -EINVAL;
|
|
goto err_out;
|
|
}
|
|
|
|
devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
|
|
if (!devfreq) {
|
|
err = -ENOMEM;
|
|
goto err_out;
|
|
}
|
|
|
|
mutex_init(&devfreq->lock);
|
|
mutex_lock(&devfreq->lock);
|
|
devfreq->dev.parent = dev;
|
|
devfreq->dev.class = devfreq_class;
|
|
devfreq->dev.release = devfreq_dev_release;
|
|
INIT_LIST_HEAD(&devfreq->node);
|
|
devfreq->profile = profile;
|
|
devfreq->previous_freq = profile->initial_freq;
|
|
devfreq->last_status.current_frequency = profile->initial_freq;
|
|
devfreq->data = data;
|
|
devfreq->nb.notifier_call = devfreq_notifier_call;
|
|
|
|
if (devfreq->profile->timer < 0
|
|
|| devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
|
|
mutex_unlock(&devfreq->lock);
|
|
err = -EINVAL;
|
|
goto err_dev;
|
|
}
|
|
|
|
if (!devfreq->profile->max_state || !devfreq->profile->freq_table) {
|
|
mutex_unlock(&devfreq->lock);
|
|
err = set_freq_table(devfreq);
|
|
if (err < 0)
|
|
goto err_dev;
|
|
mutex_lock(&devfreq->lock);
|
|
} else {
|
|
devfreq->freq_table = devfreq->profile->freq_table;
|
|
devfreq->max_state = devfreq->profile->max_state;
|
|
}
|
|
|
|
devfreq->scaling_min_freq = find_available_min_freq(devfreq);
|
|
if (!devfreq->scaling_min_freq) {
|
|
mutex_unlock(&devfreq->lock);
|
|
err = -EINVAL;
|
|
goto err_dev;
|
|
}
|
|
|
|
devfreq->scaling_max_freq = find_available_max_freq(devfreq);
|
|
if (!devfreq->scaling_max_freq) {
|
|
mutex_unlock(&devfreq->lock);
|
|
err = -EINVAL;
|
|
goto err_dev;
|
|
}
|
|
|
|
devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
|
|
|
|
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
|
|
devfreq->opp_table = dev_pm_opp_get_opp_table(dev);
|
|
if (IS_ERR(devfreq->opp_table))
|
|
devfreq->opp_table = NULL;
|
|
|
|
atomic_set(&devfreq->suspend_count, 0);
|
|
|
|
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
|
|
err = device_register(&devfreq->dev);
|
|
if (err) {
|
|
mutex_unlock(&devfreq->lock);
|
|
put_device(&devfreq->dev);
|
|
goto err_out;
|
|
}
|
|
|
|
devfreq->stats.trans_table = devm_kzalloc(&devfreq->dev,
|
|
array3_size(sizeof(unsigned int),
|
|
devfreq->max_state,
|
|
devfreq->max_state),
|
|
GFP_KERNEL);
|
|
if (!devfreq->stats.trans_table) {
|
|
mutex_unlock(&devfreq->lock);
|
|
err = -ENOMEM;
|
|
goto err_devfreq;
|
|
}
|
|
|
|
devfreq->stats.time_in_state = devm_kcalloc(&devfreq->dev,
|
|
devfreq->max_state,
|
|
sizeof(*devfreq->stats.time_in_state),
|
|
GFP_KERNEL);
|
|
if (!devfreq->stats.time_in_state) {
|
|
mutex_unlock(&devfreq->lock);
|
|
err = -ENOMEM;
|
|
goto err_devfreq;
|
|
}
|
|
|
|
devfreq->stats.total_trans = 0;
|
|
devfreq->stats.last_update = get_jiffies_64();
|
|
|
|
srcu_init_notifier_head(&devfreq->transition_notifier_list);
|
|
|
|
mutex_unlock(&devfreq->lock);
|
|
|
|
err = dev_pm_qos_add_request(dev, &devfreq->user_min_freq_req,
|
|
DEV_PM_QOS_MIN_FREQUENCY, 0);
|
|
if (err < 0)
|
|
goto err_devfreq;
|
|
err = dev_pm_qos_add_request(dev, &devfreq->user_max_freq_req,
|
|
DEV_PM_QOS_MAX_FREQUENCY,
|
|
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
|
|
if (err < 0)
|
|
goto err_devfreq;
|
|
|
|
devfreq->nb_min.notifier_call = qos_min_notifier_call;
|
|
err = dev_pm_qos_add_notifier(dev, &devfreq->nb_min,
|
|
DEV_PM_QOS_MIN_FREQUENCY);
|
|
if (err)
|
|
goto err_devfreq;
|
|
|
|
devfreq->nb_max.notifier_call = qos_max_notifier_call;
|
|
err = dev_pm_qos_add_notifier(dev, &devfreq->nb_max,
|
|
DEV_PM_QOS_MAX_FREQUENCY);
|
|
if (err)
|
|
goto err_devfreq;
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
|
|
governor = try_then_request_governor(governor_name);
|
|
if (IS_ERR(governor)) {
|
|
dev_err(dev, "%s: Unable to find governor for the device\n",
|
|
__func__);
|
|
err = PTR_ERR(governor);
|
|
goto err_init;
|
|
}
|
|
|
|
devfreq->governor = governor;
|
|
err = devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_START,
|
|
NULL);
|
|
if (err) {
|
|
dev_err_probe(dev, err,
|
|
"%s: Unable to start governor for the device\n",
|
|
__func__);
|
|
goto err_init;
|
|
}
|
|
create_sysfs_files(devfreq, devfreq->governor);
|
|
|
|
list_add(&devfreq->node, &devfreq_list);
|
|
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
if (devfreq->profile->is_cooling_device) {
|
|
devfreq->cdev = devfreq_cooling_em_register(devfreq, NULL);
|
|
if (IS_ERR(devfreq->cdev))
|
|
devfreq->cdev = NULL;
|
|
}
|
|
|
|
return devfreq;
|
|
|
|
err_init:
|
|
mutex_unlock(&devfreq_list_lock);
|
|
err_devfreq:
|
|
devfreq_remove_device(devfreq);
|
|
devfreq = NULL;
|
|
err_dev:
|
|
kfree(devfreq);
|
|
err_out:
|
|
return ERR_PTR(err);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_add_device);
|
|
|
|
/**
|
|
* devfreq_remove_device() - Remove devfreq feature from a device.
|
|
* @devfreq: the devfreq instance to be removed
|
|
*
|
|
* The opposite of devfreq_add_device().
|
|
*/
|
|
int devfreq_remove_device(struct devfreq *devfreq)
|
|
{
|
|
if (!devfreq)
|
|
return -EINVAL;
|
|
|
|
devfreq_cooling_unregister(devfreq->cdev);
|
|
|
|
if (devfreq->governor) {
|
|
devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_STOP, NULL);
|
|
remove_sysfs_files(devfreq, devfreq->governor);
|
|
}
|
|
|
|
device_unregister(&devfreq->dev);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_remove_device);
|
|
|
|
static int devm_devfreq_dev_match(struct device *dev, void *res, void *data)
|
|
{
|
|
struct devfreq **r = res;
|
|
|
|
if (WARN_ON(!r || !*r))
|
|
return 0;
|
|
|
|
return *r == data;
|
|
}
|
|
|
|
static void devm_devfreq_dev_release(struct device *dev, void *res)
|
|
{
|
|
devfreq_remove_device(*(struct devfreq **)res);
|
|
}
|
|
|
|
/**
|
|
* devm_devfreq_add_device() - Resource-managed devfreq_add_device()
|
|
* @dev: the device to add devfreq feature.
|
|
* @profile: device-specific profile to run devfreq.
|
|
* @governor_name: name of the policy to choose frequency.
|
|
* @data: devfreq driver pass to governors, governor should not change it.
|
|
*
|
|
* This function manages automatically the memory of devfreq device using device
|
|
* resource management and simplify the free operation for memory of devfreq
|
|
* device.
|
|
*/
|
|
struct devfreq *devm_devfreq_add_device(struct device *dev,
|
|
struct devfreq_dev_profile *profile,
|
|
const char *governor_name,
|
|
void *data)
|
|
{
|
|
struct devfreq **ptr, *devfreq;
|
|
|
|
ptr = devres_alloc(devm_devfreq_dev_release, sizeof(*ptr), GFP_KERNEL);
|
|
if (!ptr)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
devfreq = devfreq_add_device(dev, profile, governor_name, data);
|
|
if (IS_ERR(devfreq)) {
|
|
devres_free(ptr);
|
|
return devfreq;
|
|
}
|
|
|
|
*ptr = devfreq;
|
|
devres_add(dev, ptr);
|
|
|
|
return devfreq;
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_add_device);
|
|
|
|
#ifdef CONFIG_OF
|
|
/*
|
|
* devfreq_get_devfreq_by_node - Get the devfreq device from devicetree
|
|
* @node - pointer to device_node
|
|
*
|
|
* return the instance of devfreq device
|
|
*/
|
|
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
|
|
{
|
|
struct devfreq *devfreq;
|
|
|
|
if (!node)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
list_for_each_entry(devfreq, &devfreq_list, node) {
|
|
if (devfreq->dev.parent
|
|
&& device_match_of_node(devfreq->dev.parent, node)) {
|
|
mutex_unlock(&devfreq_list_lock);
|
|
return devfreq;
|
|
}
|
|
}
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
|
|
/*
|
|
* devfreq_get_devfreq_by_phandle - Get the devfreq device from devicetree
|
|
* @dev - instance to the given device
|
|
* @phandle_name - name of property holding a phandle value
|
|
* @index - index into list of devfreq
|
|
*
|
|
* return the instance of devfreq device
|
|
*/
|
|
struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
|
|
const char *phandle_name, int index)
|
|
{
|
|
struct device_node *node;
|
|
struct devfreq *devfreq;
|
|
|
|
if (!dev || !phandle_name)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
if (!dev->of_node)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
node = of_parse_phandle(dev->of_node, phandle_name, index);
|
|
if (!node)
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
devfreq = devfreq_get_devfreq_by_node(node);
|
|
of_node_put(node);
|
|
|
|
return devfreq;
|
|
}
|
|
|
|
#else
|
|
struct devfreq *devfreq_get_devfreq_by_node(struct device_node *node)
|
|
{
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
|
|
struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
|
|
const char *phandle_name, int index)
|
|
{
|
|
return ERR_PTR(-ENODEV);
|
|
}
|
|
#endif /* CONFIG_OF */
|
|
EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_node);
|
|
EXPORT_SYMBOL_GPL(devfreq_get_devfreq_by_phandle);
|
|
|
|
/**
|
|
* devm_devfreq_remove_device() - Resource-managed devfreq_remove_device()
|
|
* @dev: the device from which to remove devfreq feature.
|
|
* @devfreq: the devfreq instance to be removed
|
|
*/
|
|
void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq)
|
|
{
|
|
WARN_ON(devres_release(dev, devm_devfreq_dev_release,
|
|
devm_devfreq_dev_match, devfreq));
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_remove_device);
|
|
|
|
/**
|
|
* devfreq_suspend_device() - Suspend devfreq of a device.
|
|
* @devfreq: the devfreq instance to be suspended
|
|
*
|
|
* This function is intended to be called by the pm callbacks
|
|
* (e.g., runtime_suspend, suspend) of the device driver that
|
|
* holds the devfreq.
|
|
*/
|
|
int devfreq_suspend_device(struct devfreq *devfreq)
|
|
{
|
|
int ret;
|
|
|
|
if (!devfreq)
|
|
return -EINVAL;
|
|
|
|
if (atomic_inc_return(&devfreq->suspend_count) > 1)
|
|
return 0;
|
|
|
|
if (devfreq->governor) {
|
|
ret = devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_SUSPEND, NULL);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
if (devfreq->suspend_freq) {
|
|
mutex_lock(&devfreq->lock);
|
|
ret = devfreq_set_target(devfreq, devfreq->suspend_freq, 0);
|
|
mutex_unlock(&devfreq->lock);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_suspend_device);
|
|
|
|
/**
|
|
* devfreq_resume_device() - Resume devfreq of a device.
|
|
* @devfreq: the devfreq instance to be resumed
|
|
*
|
|
* This function is intended to be called by the pm callbacks
|
|
* (e.g., runtime_resume, resume) of the device driver that
|
|
* holds the devfreq.
|
|
*/
|
|
int devfreq_resume_device(struct devfreq *devfreq)
|
|
{
|
|
int ret;
|
|
|
|
if (!devfreq)
|
|
return -EINVAL;
|
|
|
|
if (atomic_dec_return(&devfreq->suspend_count) >= 1)
|
|
return 0;
|
|
|
|
if (devfreq->resume_freq) {
|
|
mutex_lock(&devfreq->lock);
|
|
ret = devfreq_set_target(devfreq, devfreq->resume_freq, 0);
|
|
mutex_unlock(&devfreq->lock);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
if (devfreq->governor) {
|
|
ret = devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_RESUME, NULL);
|
|
if (ret)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_resume_device);
|
|
|
|
/**
|
|
* devfreq_suspend() - Suspend devfreq governors and devices
|
|
*
|
|
* Called during system wide Suspend/Hibernate cycles for suspending governors
|
|
* and devices preserving the state for resume. On some platforms the devfreq
|
|
* device must have precise state (frequency) after resume in order to provide
|
|
* fully operating setup.
|
|
*/
|
|
void devfreq_suspend(void)
|
|
{
|
|
struct devfreq *devfreq;
|
|
int ret;
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
list_for_each_entry(devfreq, &devfreq_list, node) {
|
|
ret = devfreq_suspend_device(devfreq);
|
|
if (ret)
|
|
dev_err(&devfreq->dev,
|
|
"failed to suspend devfreq device\n");
|
|
}
|
|
mutex_unlock(&devfreq_list_lock);
|
|
}
|
|
|
|
/**
|
|
* devfreq_resume() - Resume devfreq governors and devices
|
|
*
|
|
* Called during system wide Suspend/Hibernate cycle for resuming governors and
|
|
* devices that are suspended with devfreq_suspend().
|
|
*/
|
|
void devfreq_resume(void)
|
|
{
|
|
struct devfreq *devfreq;
|
|
int ret;
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
list_for_each_entry(devfreq, &devfreq_list, node) {
|
|
ret = devfreq_resume_device(devfreq);
|
|
if (ret)
|
|
dev_warn(&devfreq->dev,
|
|
"failed to resume devfreq device\n");
|
|
}
|
|
mutex_unlock(&devfreq_list_lock);
|
|
}
|
|
|
|
/**
|
|
* devfreq_add_governor() - Add devfreq governor
|
|
* @governor: the devfreq governor to be added
|
|
*/
|
|
int devfreq_add_governor(struct devfreq_governor *governor)
|
|
{
|
|
struct devfreq_governor *g;
|
|
struct devfreq *devfreq;
|
|
int err = 0;
|
|
|
|
if (!governor) {
|
|
pr_err("%s: Invalid parameters.\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
g = find_devfreq_governor(governor->name);
|
|
if (!IS_ERR(g)) {
|
|
pr_err("%s: governor %s already registered\n", __func__,
|
|
g->name);
|
|
err = -EINVAL;
|
|
goto err_out;
|
|
}
|
|
|
|
list_add(&governor->node, &devfreq_governor_list);
|
|
|
|
list_for_each_entry(devfreq, &devfreq_list, node) {
|
|
int ret = 0;
|
|
struct device *dev = devfreq->dev.parent;
|
|
|
|
if (!strncmp(devfreq->governor->name, governor->name,
|
|
DEVFREQ_NAME_LEN)) {
|
|
/* The following should never occur */
|
|
if (devfreq->governor) {
|
|
dev_warn(dev,
|
|
"%s: Governor %s already present\n",
|
|
__func__, devfreq->governor->name);
|
|
ret = devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_STOP, NULL);
|
|
if (ret) {
|
|
dev_warn(dev,
|
|
"%s: Governor %s stop = %d\n",
|
|
__func__,
|
|
devfreq->governor->name, ret);
|
|
}
|
|
/* Fall through */
|
|
}
|
|
devfreq->governor = governor;
|
|
ret = devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_START, NULL);
|
|
if (ret) {
|
|
dev_warn(dev, "%s: Governor %s start=%d\n",
|
|
__func__, devfreq->governor->name,
|
|
ret);
|
|
}
|
|
}
|
|
}
|
|
|
|
err_out:
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_add_governor);
|
|
|
|
static void devm_devfreq_remove_governor(void *governor)
|
|
{
|
|
WARN_ON(devfreq_remove_governor(governor));
|
|
}
|
|
|
|
/**
|
|
* devm_devfreq_add_governor() - Add devfreq governor
|
|
* @dev: device which adds devfreq governor
|
|
* @governor: the devfreq governor to be added
|
|
*
|
|
* This is a resource-managed variant of devfreq_add_governor().
|
|
*/
|
|
int devm_devfreq_add_governor(struct device *dev,
|
|
struct devfreq_governor *governor)
|
|
{
|
|
int err;
|
|
|
|
err = devfreq_add_governor(governor);
|
|
if (err)
|
|
return err;
|
|
|
|
return devm_add_action_or_reset(dev, devm_devfreq_remove_governor,
|
|
governor);
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_add_governor);
|
|
|
|
/**
|
|
* devfreq_remove_governor() - Remove devfreq feature from a device.
|
|
* @governor: the devfreq governor to be removed
|
|
*/
|
|
int devfreq_remove_governor(struct devfreq_governor *governor)
|
|
{
|
|
struct devfreq_governor *g;
|
|
struct devfreq *devfreq;
|
|
int err = 0;
|
|
|
|
if (!governor) {
|
|
pr_err("%s: Invalid parameters.\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
g = find_devfreq_governor(governor->name);
|
|
if (IS_ERR(g)) {
|
|
pr_err("%s: governor %s not registered\n", __func__,
|
|
governor->name);
|
|
err = PTR_ERR(g);
|
|
goto err_out;
|
|
}
|
|
list_for_each_entry(devfreq, &devfreq_list, node) {
|
|
int ret;
|
|
struct device *dev = devfreq->dev.parent;
|
|
|
|
if (!strncmp(devfreq->governor->name, governor->name,
|
|
DEVFREQ_NAME_LEN)) {
|
|
/* we should have a devfreq governor! */
|
|
if (!devfreq->governor) {
|
|
dev_warn(dev, "%s: Governor %s NOT present\n",
|
|
__func__, governor->name);
|
|
continue;
|
|
/* Fall through */
|
|
}
|
|
ret = devfreq->governor->event_handler(devfreq,
|
|
DEVFREQ_GOV_STOP, NULL);
|
|
if (ret) {
|
|
dev_warn(dev, "%s: Governor %s stop=%d\n",
|
|
__func__, devfreq->governor->name,
|
|
ret);
|
|
}
|
|
devfreq->governor = NULL;
|
|
}
|
|
}
|
|
|
|
list_del(&governor->node);
|
|
err_out:
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_remove_governor);
|
|
|
|
static ssize_t name_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
return sprintf(buf, "%s\n", dev_name(df->dev.parent));
|
|
}
|
|
static DEVICE_ATTR_RO(name);
|
|
|
|
static ssize_t governor_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
|
|
if (!df->governor)
|
|
return -EINVAL;
|
|
|
|
return sprintf(buf, "%s\n", df->governor->name);
|
|
}
|
|
|
|
static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
int ret;
|
|
char str_governor[DEVFREQ_NAME_LEN + 1];
|
|
const struct devfreq_governor *governor, *prev_governor;
|
|
|
|
if (!df->governor)
|
|
return -EINVAL;
|
|
|
|
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
|
|
if (ret != 1)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
governor = try_then_request_governor(str_governor);
|
|
if (IS_ERR(governor)) {
|
|
ret = PTR_ERR(governor);
|
|
goto out;
|
|
}
|
|
if (df->governor == governor) {
|
|
ret = 0;
|
|
goto out;
|
|
} else if (IS_SUPPORTED_FLAG(df->governor->flags, IMMUTABLE)
|
|
|| IS_SUPPORTED_FLAG(governor->flags, IMMUTABLE)) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Stop the current governor and remove the specific sysfs files
|
|
* which depend on current governor.
|
|
*/
|
|
ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
|
|
if (ret) {
|
|
dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
|
|
__func__, df->governor->name, ret);
|
|
goto out;
|
|
}
|
|
remove_sysfs_files(df, df->governor);
|
|
|
|
/*
|
|
* Start the new governor and create the specific sysfs files
|
|
* which depend on the new governor.
|
|
*/
|
|
prev_governor = df->governor;
|
|
df->governor = governor;
|
|
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
|
|
if (ret) {
|
|
dev_warn(dev, "%s: Governor %s not started(%d)\n",
|
|
__func__, df->governor->name, ret);
|
|
|
|
/* Restore previous governor */
|
|
df->governor = prev_governor;
|
|
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
|
|
if (ret) {
|
|
dev_err(dev,
|
|
"%s: reverting to Governor %s failed (%d)\n",
|
|
__func__, prev_governor->name, ret);
|
|
df->governor = NULL;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Create the sysfs files for the new governor. But if failed to start
|
|
* the new governor, restore the sysfs files of previous governor.
|
|
*/
|
|
create_sysfs_files(df, df->governor);
|
|
|
|
out:
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
if (!ret)
|
|
ret = count;
|
|
return ret;
|
|
}
|
|
static DEVICE_ATTR_RW(governor);
|
|
|
|
static ssize_t available_governors_show(struct device *d,
|
|
struct device_attribute *attr,
|
|
char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(d);
|
|
ssize_t count = 0;
|
|
|
|
if (!df->governor)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
|
|
/*
|
|
* The devfreq with immutable governor (e.g., passive) shows
|
|
* only own governor.
|
|
*/
|
|
if (IS_SUPPORTED_FLAG(df->governor->flags, IMMUTABLE)) {
|
|
count = scnprintf(&buf[count], DEVFREQ_NAME_LEN,
|
|
"%s ", df->governor->name);
|
|
/*
|
|
* The devfreq device shows the registered governor except for
|
|
* immutable governors such as passive governor .
|
|
*/
|
|
} else {
|
|
struct devfreq_governor *governor;
|
|
|
|
list_for_each_entry(governor, &devfreq_governor_list, node) {
|
|
if (IS_SUPPORTED_FLAG(governor->flags, IMMUTABLE))
|
|
continue;
|
|
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
|
|
"%s ", governor->name);
|
|
}
|
|
}
|
|
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
/* Truncate the trailing space */
|
|
if (count)
|
|
count--;
|
|
|
|
count += sprintf(&buf[count], "\n");
|
|
|
|
return count;
|
|
}
|
|
static DEVICE_ATTR_RO(available_governors);
|
|
|
|
static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
|
|
char *buf)
|
|
{
|
|
unsigned long freq;
|
|
struct devfreq *df = to_devfreq(dev);
|
|
|
|
if (!df->profile)
|
|
return -EINVAL;
|
|
|
|
if (df->profile->get_cur_freq &&
|
|
!df->profile->get_cur_freq(df->dev.parent, &freq))
|
|
return sprintf(buf, "%lu\n", freq);
|
|
|
|
return sprintf(buf, "%lu\n", df->previous_freq);
|
|
}
|
|
static DEVICE_ATTR_RO(cur_freq);
|
|
|
|
static ssize_t target_freq_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
|
|
return sprintf(buf, "%lu\n", df->previous_freq);
|
|
}
|
|
static DEVICE_ATTR_RO(target_freq);
|
|
|
|
static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
unsigned long value;
|
|
int ret;
|
|
|
|
/*
|
|
* Protect against theoretical sysfs writes between
|
|
* device_add and dev_pm_qos_add_request
|
|
*/
|
|
if (!dev_pm_qos_request_active(&df->user_min_freq_req))
|
|
return -EAGAIN;
|
|
|
|
ret = sscanf(buf, "%lu", &value);
|
|
if (ret != 1)
|
|
return -EINVAL;
|
|
|
|
/* Round down to kHz for PM QoS */
|
|
ret = dev_pm_qos_update_request(&df->user_min_freq_req,
|
|
value / HZ_PER_KHZ);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
return count;
|
|
}
|
|
|
|
static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
|
|
char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
unsigned long min_freq, max_freq;
|
|
|
|
mutex_lock(&df->lock);
|
|
devfreq_get_freq_range(df, &min_freq, &max_freq);
|
|
mutex_unlock(&df->lock);
|
|
|
|
return sprintf(buf, "%lu\n", min_freq);
|
|
}
|
|
static DEVICE_ATTR_RW(min_freq);
|
|
|
|
static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
unsigned long value;
|
|
int ret;
|
|
|
|
/*
|
|
* Protect against theoretical sysfs writes between
|
|
* device_add and dev_pm_qos_add_request
|
|
*/
|
|
if (!dev_pm_qos_request_active(&df->user_max_freq_req))
|
|
return -EINVAL;
|
|
|
|
ret = sscanf(buf, "%lu", &value);
|
|
if (ret != 1)
|
|
return -EINVAL;
|
|
|
|
/*
|
|
* PM QoS frequencies are in kHz so we need to convert. Convert by
|
|
* rounding upwards so that the acceptable interval never shrinks.
|
|
*
|
|
* For example if the user writes "666666666" to sysfs this value will
|
|
* be converted to 666667 kHz and back to 666667000 Hz before an OPP
|
|
* lookup, this ensures that an OPP of 666666666Hz is still accepted.
|
|
*
|
|
* A value of zero means "no limit".
|
|
*/
|
|
if (value)
|
|
value = DIV_ROUND_UP(value, HZ_PER_KHZ);
|
|
else
|
|
value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
|
|
|
|
ret = dev_pm_qos_update_request(&df->user_max_freq_req, value);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
return count;
|
|
}
|
|
|
|
static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
|
|
char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
unsigned long min_freq, max_freq;
|
|
|
|
mutex_lock(&df->lock);
|
|
devfreq_get_freq_range(df, &min_freq, &max_freq);
|
|
mutex_unlock(&df->lock);
|
|
|
|
return sprintf(buf, "%lu\n", max_freq);
|
|
}
|
|
static DEVICE_ATTR_RW(max_freq);
|
|
|
|
static ssize_t available_frequencies_show(struct device *d,
|
|
struct device_attribute *attr,
|
|
char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(d);
|
|
ssize_t count = 0;
|
|
int i;
|
|
|
|
if (!df->profile)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&df->lock);
|
|
|
|
for (i = 0; i < df->max_state; i++)
|
|
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
|
|
"%lu ", df->freq_table[i]);
|
|
|
|
mutex_unlock(&df->lock);
|
|
/* Truncate the trailing space */
|
|
if (count)
|
|
count--;
|
|
|
|
count += sprintf(&buf[count], "\n");
|
|
|
|
return count;
|
|
}
|
|
static DEVICE_ATTR_RO(available_frequencies);
|
|
|
|
static ssize_t trans_stat_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
ssize_t len = 0;
|
|
int i, j;
|
|
unsigned int max_state;
|
|
|
|
if (!df->profile)
|
|
return -EINVAL;
|
|
max_state = df->max_state;
|
|
|
|
if (max_state == 0)
|
|
return sysfs_emit(buf, "Not Supported.\n");
|
|
|
|
mutex_lock(&df->lock);
|
|
if (!df->stop_polling &&
|
|
devfreq_update_status(df, df->previous_freq)) {
|
|
mutex_unlock(&df->lock);
|
|
return 0;
|
|
}
|
|
mutex_unlock(&df->lock);
|
|
|
|
len += sysfs_emit_at(buf, len, " From : To\n");
|
|
len += sysfs_emit_at(buf, len, " :");
|
|
for (i = 0; i < max_state; i++) {
|
|
if (len >= PAGE_SIZE - 1)
|
|
break;
|
|
len += sysfs_emit_at(buf, len, "%10lu",
|
|
df->freq_table[i]);
|
|
}
|
|
|
|
if (len >= PAGE_SIZE - 1)
|
|
return PAGE_SIZE - 1;
|
|
len += sysfs_emit_at(buf, len, " time(ms)\n");
|
|
|
|
for (i = 0; i < max_state; i++) {
|
|
if (len >= PAGE_SIZE - 1)
|
|
break;
|
|
if (df->freq_table[2] == df->previous_freq)
|
|
len += sysfs_emit_at(buf, len, "*");
|
|
else
|
|
len += sysfs_emit_at(buf, len, " ");
|
|
if (len >= PAGE_SIZE - 1)
|
|
break;
|
|
len += sysfs_emit_at(buf, len, "%10lu:", df->freq_table[i]);
|
|
for (j = 0; j < max_state; j++) {
|
|
if (len >= PAGE_SIZE - 1)
|
|
break;
|
|
len += sysfs_emit_at(buf, len, "%10u",
|
|
df->stats.trans_table[(i * max_state) + j]);
|
|
}
|
|
if (len >= PAGE_SIZE - 1)
|
|
break;
|
|
len += sysfs_emit_at(buf, len, "%10llu\n", (u64)
|
|
jiffies64_to_msecs(df->stats.time_in_state[i]));
|
|
}
|
|
|
|
if (len < PAGE_SIZE - 1)
|
|
len += sysfs_emit_at(buf, len, "Total transition : %u\n",
|
|
df->stats.total_trans);
|
|
if (len >= PAGE_SIZE - 1) {
|
|
pr_warn_once("devfreq transition table exceeds PAGE_SIZE. Disabling\n");
|
|
return -EFBIG;
|
|
}
|
|
|
|
return len;
|
|
}
|
|
|
|
static ssize_t trans_stat_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
int err, value;
|
|
|
|
if (!df->profile)
|
|
return -EINVAL;
|
|
|
|
if (df->max_state == 0)
|
|
return count;
|
|
|
|
err = kstrtoint(buf, 10, &value);
|
|
if (err || value != 0)
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&df->lock);
|
|
memset(df->stats.time_in_state, 0, (df->max_state *
|
|
sizeof(*df->stats.time_in_state)));
|
|
memset(df->stats.trans_table, 0, array3_size(sizeof(unsigned int),
|
|
df->max_state,
|
|
df->max_state));
|
|
df->stats.total_trans = 0;
|
|
df->stats.last_update = get_jiffies_64();
|
|
mutex_unlock(&df->lock);
|
|
|
|
return count;
|
|
}
|
|
static DEVICE_ATTR_RW(trans_stat);
|
|
|
|
static struct attribute *devfreq_attrs[] = {
|
|
&dev_attr_name.attr,
|
|
&dev_attr_governor.attr,
|
|
&dev_attr_available_governors.attr,
|
|
&dev_attr_cur_freq.attr,
|
|
&dev_attr_available_frequencies.attr,
|
|
&dev_attr_target_freq.attr,
|
|
&dev_attr_min_freq.attr,
|
|
&dev_attr_max_freq.attr,
|
|
&dev_attr_trans_stat.attr,
|
|
NULL,
|
|
};
|
|
ATTRIBUTE_GROUPS(devfreq);
|
|
|
|
static ssize_t polling_interval_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
|
|
if (!df->profile)
|
|
return -EINVAL;
|
|
|
|
return sprintf(buf, "%d\n", df->profile->polling_ms);
|
|
}
|
|
|
|
static ssize_t polling_interval_store(struct device *dev,
|
|
struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
unsigned int value;
|
|
int ret;
|
|
|
|
if (!df->governor)
|
|
return -EINVAL;
|
|
|
|
ret = sscanf(buf, "%u", &value);
|
|
if (ret != 1)
|
|
return -EINVAL;
|
|
|
|
df->governor->event_handler(df, DEVFREQ_GOV_UPDATE_INTERVAL, &value);
|
|
ret = count;
|
|
|
|
return ret;
|
|
}
|
|
static DEVICE_ATTR_RW(polling_interval);
|
|
|
|
static ssize_t timer_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
|
|
if (!df->profile)
|
|
return -EINVAL;
|
|
|
|
return sprintf(buf, "%s\n", timer_name[df->profile->timer]);
|
|
}
|
|
|
|
static ssize_t timer_store(struct device *dev, struct device_attribute *attr,
|
|
const char *buf, size_t count)
|
|
{
|
|
struct devfreq *df = to_devfreq(dev);
|
|
char str_timer[DEVFREQ_NAME_LEN + 1];
|
|
int timer = -1;
|
|
int ret = 0, i;
|
|
|
|
if (!df->governor || !df->profile)
|
|
return -EINVAL;
|
|
|
|
ret = sscanf(buf, "%16s", str_timer);
|
|
if (ret != 1)
|
|
return -EINVAL;
|
|
|
|
for (i = 0; i < DEVFREQ_TIMER_NUM; i++) {
|
|
if (!strncmp(timer_name[i], str_timer, DEVFREQ_NAME_LEN)) {
|
|
timer = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (timer < 0) {
|
|
ret = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
if (df->profile->timer == timer) {
|
|
ret = 0;
|
|
goto out;
|
|
}
|
|
|
|
mutex_lock(&df->lock);
|
|
df->profile->timer = timer;
|
|
mutex_unlock(&df->lock);
|
|
|
|
ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
|
|
if (ret) {
|
|
dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
|
|
__func__, df->governor->name, ret);
|
|
goto out;
|
|
}
|
|
|
|
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
|
|
if (ret)
|
|
dev_warn(dev, "%s: Governor %s not started(%d)\n",
|
|
__func__, df->governor->name, ret);
|
|
out:
|
|
return ret ? ret : count;
|
|
}
|
|
static DEVICE_ATTR_RW(timer);
|
|
|
|
#define CREATE_SYSFS_FILE(df, name) \
|
|
{ \
|
|
int ret; \
|
|
ret = sysfs_create_file(&df->dev.kobj, &dev_attr_##name.attr); \
|
|
if (ret < 0) { \
|
|
dev_warn(&df->dev, \
|
|
"Unable to create attr(%s)\n", "##name"); \
|
|
} \
|
|
} \
|
|
|
|
/* Create the specific sysfs files which depend on each governor. */
|
|
static void create_sysfs_files(struct devfreq *devfreq,
|
|
const struct devfreq_governor *gov)
|
|
{
|
|
if (IS_SUPPORTED_ATTR(gov->attrs, POLLING_INTERVAL))
|
|
CREATE_SYSFS_FILE(devfreq, polling_interval);
|
|
if (IS_SUPPORTED_ATTR(gov->attrs, TIMER))
|
|
CREATE_SYSFS_FILE(devfreq, timer);
|
|
}
|
|
|
|
/* Remove the specific sysfs files which depend on each governor. */
|
|
static void remove_sysfs_files(struct devfreq *devfreq,
|
|
const struct devfreq_governor *gov)
|
|
{
|
|
if (IS_SUPPORTED_ATTR(gov->attrs, POLLING_INTERVAL))
|
|
sysfs_remove_file(&devfreq->dev.kobj,
|
|
&dev_attr_polling_interval.attr);
|
|
if (IS_SUPPORTED_ATTR(gov->attrs, TIMER))
|
|
sysfs_remove_file(&devfreq->dev.kobj, &dev_attr_timer.attr);
|
|
}
|
|
|
|
/**
|
|
* devfreq_summary_show() - Show the summary of the devfreq devices
|
|
* @s: seq_file instance to show the summary of devfreq devices
|
|
* @data: not used
|
|
*
|
|
* Show the summary of the devfreq devices via 'devfreq_summary' debugfs file.
|
|
* It helps that user can know the detailed information of the devfreq devices.
|
|
*
|
|
* Return 0 always because it shows the information without any data change.
|
|
*/
|
|
static int devfreq_summary_show(struct seq_file *s, void *data)
|
|
{
|
|
struct devfreq *devfreq;
|
|
struct devfreq *p_devfreq = NULL;
|
|
unsigned long cur_freq, min_freq, max_freq;
|
|
unsigned int polling_ms;
|
|
unsigned int timer;
|
|
|
|
seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n",
|
|
"dev",
|
|
"parent_dev",
|
|
"governor",
|
|
"timer",
|
|
"polling_ms",
|
|
"cur_freq_Hz",
|
|
"min_freq_Hz",
|
|
"max_freq_Hz");
|
|
seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n",
|
|
"------------------------------",
|
|
"------------------------------",
|
|
"---------------",
|
|
"----------",
|
|
"----------",
|
|
"------------",
|
|
"------------",
|
|
"------------");
|
|
|
|
mutex_lock(&devfreq_list_lock);
|
|
|
|
list_for_each_entry_reverse(devfreq, &devfreq_list, node) {
|
|
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
|
|
if (!strncmp(devfreq->governor->name, DEVFREQ_GOV_PASSIVE,
|
|
DEVFREQ_NAME_LEN)) {
|
|
struct devfreq_passive_data *data = devfreq->data;
|
|
|
|
if (data)
|
|
p_devfreq = data->parent;
|
|
} else {
|
|
p_devfreq = NULL;
|
|
}
|
|
#endif
|
|
|
|
mutex_lock(&devfreq->lock);
|
|
cur_freq = devfreq->previous_freq;
|
|
devfreq_get_freq_range(devfreq, &min_freq, &max_freq);
|
|
timer = devfreq->profile->timer;
|
|
|
|
if (IS_SUPPORTED_ATTR(devfreq->governor->attrs, POLLING_INTERVAL))
|
|
polling_ms = devfreq->profile->polling_ms;
|
|
else
|
|
polling_ms = 0;
|
|
mutex_unlock(&devfreq->lock);
|
|
|
|
seq_printf(s,
|
|
"%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n",
|
|
dev_name(&devfreq->dev),
|
|
p_devfreq ? dev_name(&p_devfreq->dev) : "null",
|
|
devfreq->governor->name,
|
|
polling_ms ? timer_name[timer] : "null",
|
|
polling_ms,
|
|
cur_freq,
|
|
min_freq,
|
|
max_freq);
|
|
}
|
|
|
|
mutex_unlock(&devfreq_list_lock);
|
|
|
|
return 0;
|
|
}
|
|
DEFINE_SHOW_ATTRIBUTE(devfreq_summary);
|
|
|
|
static int __init devfreq_init(void)
|
|
{
|
|
devfreq_class = class_create("devfreq");
|
|
if (IS_ERR(devfreq_class)) {
|
|
pr_err("%s: couldn't create class\n", __FILE__);
|
|
return PTR_ERR(devfreq_class);
|
|
}
|
|
|
|
devfreq_wq = create_freezable_workqueue("devfreq_wq");
|
|
if (!devfreq_wq) {
|
|
class_destroy(devfreq_class);
|
|
pr_err("%s: couldn't create workqueue\n", __FILE__);
|
|
return -ENOMEM;
|
|
}
|
|
devfreq_class->dev_groups = devfreq_groups;
|
|
|
|
devfreq_debugfs = debugfs_create_dir("devfreq", NULL);
|
|
debugfs_create_file("devfreq_summary", 0444,
|
|
devfreq_debugfs, NULL,
|
|
&devfreq_summary_fops);
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(devfreq_init);
|
|
|
|
/*
|
|
* The following are helper functions for devfreq user device drivers with
|
|
* OPP framework.
|
|
*/
|
|
|
|
/**
|
|
* devfreq_recommended_opp() - Helper function to get proper OPP for the
|
|
* freq value given to target callback.
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @freq: The frequency given to target function
|
|
* @flags: Flags handed from devfreq framework.
|
|
*
|
|
* The callers are required to call dev_pm_opp_put() for the returned OPP after
|
|
* use.
|
|
*/
|
|
struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
|
|
unsigned long *freq,
|
|
u32 flags)
|
|
{
|
|
struct dev_pm_opp *opp;
|
|
|
|
if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
|
|
/* The freq is an upper bound. opp should be lower */
|
|
opp = dev_pm_opp_find_freq_floor_indexed(dev, freq, 0);
|
|
|
|
/* If not available, use the closest opp */
|
|
if (opp == ERR_PTR(-ERANGE))
|
|
opp = dev_pm_opp_find_freq_ceil_indexed(dev, freq, 0);
|
|
} else {
|
|
/* The freq is an lower bound. opp should be higher */
|
|
opp = dev_pm_opp_find_freq_ceil_indexed(dev, freq, 0);
|
|
|
|
/* If not available, use the closest opp */
|
|
if (opp == ERR_PTR(-ERANGE))
|
|
opp = dev_pm_opp_find_freq_floor_indexed(dev, freq, 0);
|
|
}
|
|
|
|
return opp;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_recommended_opp);
|
|
|
|
/**
|
|
* devfreq_register_opp_notifier() - Helper function to get devfreq notified
|
|
* for any changes in the OPP availability
|
|
* changes
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @devfreq: The devfreq object.
|
|
*/
|
|
int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
|
|
{
|
|
return dev_pm_opp_register_notifier(dev, &devfreq->nb);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_register_opp_notifier);
|
|
|
|
/**
|
|
* devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
|
|
* notified for any changes in the OPP
|
|
* availability changes anymore.
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @devfreq: The devfreq object.
|
|
*
|
|
* At exit() callback of devfreq_dev_profile, this must be included if
|
|
* devfreq_recommended_opp is used.
|
|
*/
|
|
int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
|
|
{
|
|
return dev_pm_opp_unregister_notifier(dev, &devfreq->nb);
|
|
}
|
|
EXPORT_SYMBOL(devfreq_unregister_opp_notifier);
|
|
|
|
static void devm_devfreq_opp_release(struct device *dev, void *res)
|
|
{
|
|
devfreq_unregister_opp_notifier(dev, *(struct devfreq **)res);
|
|
}
|
|
|
|
/**
|
|
* devm_devfreq_register_opp_notifier() - Resource-managed
|
|
* devfreq_register_opp_notifier()
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @devfreq: The devfreq object.
|
|
*/
|
|
int devm_devfreq_register_opp_notifier(struct device *dev,
|
|
struct devfreq *devfreq)
|
|
{
|
|
struct devfreq **ptr;
|
|
int ret;
|
|
|
|
ptr = devres_alloc(devm_devfreq_opp_release, sizeof(*ptr), GFP_KERNEL);
|
|
if (!ptr)
|
|
return -ENOMEM;
|
|
|
|
ret = devfreq_register_opp_notifier(dev, devfreq);
|
|
if (ret) {
|
|
devres_free(ptr);
|
|
return ret;
|
|
}
|
|
|
|
*ptr = devfreq;
|
|
devres_add(dev, ptr);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_register_opp_notifier);
|
|
|
|
/**
|
|
* devm_devfreq_unregister_opp_notifier() - Resource-managed
|
|
* devfreq_unregister_opp_notifier()
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @devfreq: The devfreq object.
|
|
*/
|
|
void devm_devfreq_unregister_opp_notifier(struct device *dev,
|
|
struct devfreq *devfreq)
|
|
{
|
|
WARN_ON(devres_release(dev, devm_devfreq_opp_release,
|
|
devm_devfreq_dev_match, devfreq));
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_unregister_opp_notifier);
|
|
|
|
/**
|
|
* devfreq_register_notifier() - Register a driver with devfreq
|
|
* @devfreq: The devfreq object.
|
|
* @nb: The notifier block to register.
|
|
* @list: DEVFREQ_TRANSITION_NOTIFIER.
|
|
*/
|
|
int devfreq_register_notifier(struct devfreq *devfreq,
|
|
struct notifier_block *nb,
|
|
unsigned int list)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (!devfreq)
|
|
return -EINVAL;
|
|
|
|
switch (list) {
|
|
case DEVFREQ_TRANSITION_NOTIFIER:
|
|
ret = srcu_notifier_chain_register(
|
|
&devfreq->transition_notifier_list, nb);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_register_notifier);
|
|
|
|
/*
|
|
* devfreq_unregister_notifier() - Unregister a driver with devfreq
|
|
* @devfreq: The devfreq object.
|
|
* @nb: The notifier block to be unregistered.
|
|
* @list: DEVFREQ_TRANSITION_NOTIFIER.
|
|
*/
|
|
int devfreq_unregister_notifier(struct devfreq *devfreq,
|
|
struct notifier_block *nb,
|
|
unsigned int list)
|
|
{
|
|
int ret = 0;
|
|
|
|
if (!devfreq)
|
|
return -EINVAL;
|
|
|
|
switch (list) {
|
|
case DEVFREQ_TRANSITION_NOTIFIER:
|
|
ret = srcu_notifier_chain_unregister(
|
|
&devfreq->transition_notifier_list, nb);
|
|
break;
|
|
default:
|
|
ret = -EINVAL;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(devfreq_unregister_notifier);
|
|
|
|
struct devfreq_notifier_devres {
|
|
struct devfreq *devfreq;
|
|
struct notifier_block *nb;
|
|
unsigned int list;
|
|
};
|
|
|
|
static void devm_devfreq_notifier_release(struct device *dev, void *res)
|
|
{
|
|
struct devfreq_notifier_devres *this = res;
|
|
|
|
devfreq_unregister_notifier(this->devfreq, this->nb, this->list);
|
|
}
|
|
|
|
/**
|
|
* devm_devfreq_register_notifier()
|
|
* - Resource-managed devfreq_register_notifier()
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @devfreq: The devfreq object.
|
|
* @nb: The notifier block to be unregistered.
|
|
* @list: DEVFREQ_TRANSITION_NOTIFIER.
|
|
*/
|
|
int devm_devfreq_register_notifier(struct device *dev,
|
|
struct devfreq *devfreq,
|
|
struct notifier_block *nb,
|
|
unsigned int list)
|
|
{
|
|
struct devfreq_notifier_devres *ptr;
|
|
int ret;
|
|
|
|
ptr = devres_alloc(devm_devfreq_notifier_release, sizeof(*ptr),
|
|
GFP_KERNEL);
|
|
if (!ptr)
|
|
return -ENOMEM;
|
|
|
|
ret = devfreq_register_notifier(devfreq, nb, list);
|
|
if (ret) {
|
|
devres_free(ptr);
|
|
return ret;
|
|
}
|
|
|
|
ptr->devfreq = devfreq;
|
|
ptr->nb = nb;
|
|
ptr->list = list;
|
|
devres_add(dev, ptr);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_register_notifier);
|
|
|
|
/**
|
|
* devm_devfreq_unregister_notifier()
|
|
* - Resource-managed devfreq_unregister_notifier()
|
|
* @dev: The devfreq user device. (parent of devfreq)
|
|
* @devfreq: The devfreq object.
|
|
* @nb: The notifier block to be unregistered.
|
|
* @list: DEVFREQ_TRANSITION_NOTIFIER.
|
|
*/
|
|
void devm_devfreq_unregister_notifier(struct device *dev,
|
|
struct devfreq *devfreq,
|
|
struct notifier_block *nb,
|
|
unsigned int list)
|
|
{
|
|
WARN_ON(devres_release(dev, devm_devfreq_notifier_release,
|
|
devm_devfreq_dev_match, devfreq));
|
|
}
|
|
EXPORT_SYMBOL(devm_devfreq_unregister_notifier);
|