2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-28 07:04:00 +08:00

Merge branch 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm

Pull ARM cpufreq driver changes for v5.15 from Viresh Kumar:

"This contains:

 - Update cpufreq-dt blocklist with more platforms (Bjorn Andersson).

 - Allow freq changes from any CPU for qcom-hw driver (Taniya Das).

 - Add DSVS interrupt's support for qcom-hw driver (Thara Gopinath).

 - A new callback (->register_em()) to register EM at a more convenient
   point of time."

* 'cpufreq/arm/linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm:
  cpufreq: qcom-hw: Set dvfs_possible_from_any_cpu cpufreq driver flag
  cpufreq: blocklist more Qualcomm platforms in cpufreq-dt-platdev
  cpufreq: qcom-cpufreq-hw: Add dcvs interrupt support
  cpufreq: scmi: Use .register_em() to register with energy model
  cpufreq: vexpress: Use .register_em() to register with energy model
  cpufreq: scpi: Use .register_em() to register with energy model
  cpufreq: qcom-cpufreq-hw: Use .register_em() to register with energy model
  cpufreq: omap: Use .register_em() to register with energy model
  cpufreq: mediatek: Use .register_em() to register with energy model
  cpufreq: imx6q: Use .register_em() to register with energy model
  cpufreq: dt: Use .register_em() to register with energy model
  cpufreq: Add callback to register with energy model
  cpufreq: vexpress: Set CPUFREQ_IS_COOLING_DEV flag
This commit is contained in:
Rafael J. Wysocki 2021-08-31 14:02:16 +02:00
commit b2a6181e27
12 changed files with 233 additions and 54 deletions

View File

@ -149,6 +149,7 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
}
DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
{
@ -165,6 +166,7 @@ void topology_set_thermal_pressure(const struct cpumask *cpus,
for_each_cpu(cpu, cpus)
WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
}
EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
static ssize_t cpu_capacity_show(struct device *dev,
struct device_attribute *attr,

View File

@ -137,11 +137,15 @@ static const struct of_device_id blocklist[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
{ .compatible = "qcom,qcs404", },
{ .compatible = "qcom,sa8155p" },
{ .compatible = "qcom,sc7180", },
{ .compatible = "qcom,sc7280", },
{ .compatible = "qcom,sc8180x", },
{ .compatible = "qcom,sdm845", },
{ .compatible = "qcom,sm6350", },
{ .compatible = "qcom,sm8150", },
{ .compatible = "qcom,sm8250", },
{ .compatible = "qcom,sm8350", },
{ .compatible = "st,stih407", },
{ .compatible = "st,stih410", },

View File

@ -143,8 +143,6 @@ static int cpufreq_init(struct cpufreq_policy *policy)
cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
}
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
out_clk_put:
@ -184,6 +182,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
.exit = cpufreq_exit,
.online = cpufreq_online,
.offline = cpufreq_offline,
.register_em = cpufreq_register_em_with_opp,
.name = "cpufreq-dt",
.attr = cpufreq_dt_attr,
.suspend = cpufreq_generic_suspend,

View File

@ -1491,6 +1491,19 @@ static int cpufreq_online(unsigned int cpu)
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
/*
* Register with the energy model before
* sched_cpufreq_governor_change() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.
*
* Also, this should be called before the policy is registered
* with cooling framework.
*/
if (cpufreq_driver->register_em)
cpufreq_driver->register_em(policy);
}
ret = cpufreq_init_policy(policy);

View File

@ -192,7 +192,6 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy)
policy->clk = clks[ARM].clk;
cpufreq_generic_init(policy, freq_table, transition_latency);
policy->suspend_freq = max_freq;
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
}
@ -204,6 +203,7 @@ static struct cpufreq_driver imx6q_cpufreq_driver = {
.target_index = imx6q_set_target,
.get = cpufreq_generic_get,
.init = imx6q_cpufreq_init,
.register_em = cpufreq_register_em_with_opp,
.name = "imx6q-cpufreq",
.attr = cpufreq_generic_attr,
.suspend = cpufreq_generic_suspend,

View File

@ -448,8 +448,6 @@ static int mtk_cpufreq_init(struct cpufreq_policy *policy)
policy->driver_data = info;
policy->clk = info->cpu_clk;
dev_pm_opp_of_register_em(info->cpu_dev, policy->cpus);
return 0;
}
@ -471,6 +469,7 @@ static struct cpufreq_driver mtk_cpufreq_driver = {
.get = cpufreq_generic_get,
.init = mtk_cpufreq_init,
.exit = mtk_cpufreq_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "mtk-cpufreq",
.attr = cpufreq_generic_attr,
};

View File

@ -131,7 +131,6 @@ static int omap_cpu_init(struct cpufreq_policy *policy)
/* FIXME: what's the actual transition time? */
cpufreq_generic_init(policy, freq_table, 300 * 1000);
dev_pm_opp_of_register_em(mpu_dev, policy->cpus);
return 0;
}
@ -150,6 +149,7 @@ static struct cpufreq_driver omap_driver = {
.get = cpufreq_generic_get,
.init = omap_cpu_init,
.exit = omap_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.name = "omap",
.attr = cpufreq_generic_attr,
};

View File

@ -7,12 +7,14 @@
#include <linux/cpufreq.h>
#include <linux/init.h>
#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define LUT_MAX_ENTRIES 40U
#define LUT_SRC GENMASK(31, 30)
@ -22,10 +24,13 @@
#define CLK_HW_DIV 2
#define LUT_TURBO_IND 1
#define HZ_PER_KHZ 1000
struct qcom_cpufreq_soc_data {
u32 reg_enable;
u32 reg_freq_lut;
u32 reg_volt_lut;
u32 reg_current_vote;
u32 reg_perf_state;
u8 lut_row_size;
};
@ -34,6 +39,16 @@ struct qcom_cpufreq_data {
void __iomem *base;
struct resource *res;
const struct qcom_cpufreq_soc_data *soc_data;
/*
* Mutex to synchronize between de-init sequence and re-starting LMh
* polling/interrupts
*/
struct mutex throttle_lock;
int throttle_irq;
bool cancel_throttle;
struct delayed_work throttle_work;
struct cpufreq_policy *policy;
};
static unsigned long cpu_hw_rate, xo_rate;
@ -251,10 +266,92 @@ static void qcom_get_related_cpus(int index, struct cpumask *m)
}
}
static unsigned int qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data *data)
{
unsigned int val = readl_relaxed(data->base + data->soc_data->reg_current_vote);
return (val & 0x3FF) * 19200;
}
static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data)
{
unsigned long max_capacity, capacity, freq_hz, throttled_freq;
struct cpufreq_policy *policy = data->policy;
int cpu = cpumask_first(policy->cpus);
struct device *dev = get_cpu_device(cpu);
struct dev_pm_opp *opp;
unsigned int freq;
/*
* Get the h/w throttled frequency, normalize it using the
* registered opp table and use it to calculate thermal pressure.
*/
freq = qcom_lmh_get_throttle_freq(data);
freq_hz = freq * HZ_PER_KHZ;
opp = dev_pm_opp_find_freq_floor(dev, &freq_hz);
if (IS_ERR(opp) && PTR_ERR(opp) == -ERANGE)
dev_pm_opp_find_freq_ceil(dev, &freq_hz);
throttled_freq = freq_hz / HZ_PER_KHZ;
/* Update thermal pressure */
max_capacity = arch_scale_cpu_capacity(cpu);
capacity = mult_frac(max_capacity, throttled_freq, policy->cpuinfo.max_freq);
/* Don't pass boost capacity to scheduler */
if (capacity > max_capacity)
capacity = max_capacity;
arch_set_thermal_pressure(policy->cpus, max_capacity - capacity);
/*
* In the unlikely case policy is unregistered do not enable
* polling or h/w interrupt
*/
mutex_lock(&data->throttle_lock);
if (data->cancel_throttle)
goto out;
/*
* If h/w throttled frequency is higher than what cpufreq has requested
* for, then stop polling and switch back to interrupt mechanism.
*/
if (throttled_freq >= qcom_cpufreq_hw_get(cpu))
enable_irq(data->throttle_irq);
else
mod_delayed_work(system_highpri_wq, &data->throttle_work,
msecs_to_jiffies(10));
out:
mutex_unlock(&data->throttle_lock);
}
static void qcom_lmh_dcvs_poll(struct work_struct *work)
{
struct qcom_cpufreq_data *data;
data = container_of(work, struct qcom_cpufreq_data, throttle_work.work);
qcom_lmh_dcvs_notify(data);
}
static irqreturn_t qcom_lmh_dcvs_handle_irq(int irq, void *data)
{
struct qcom_cpufreq_data *c_data = data;
/* Disable interrupt and enable polling */
disable_irq_nosync(c_data->throttle_irq);
qcom_lmh_dcvs_notify(c_data);
return 0;
}
static const struct qcom_cpufreq_soc_data qcom_soc_data = {
.reg_enable = 0x0,
.reg_freq_lut = 0x110,
.reg_volt_lut = 0x114,
.reg_current_vote = 0x704,
.reg_perf_state = 0x920,
.lut_row_size = 32,
};
@ -274,6 +371,51 @@ static const struct of_device_id qcom_cpufreq_hw_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_cpufreq_hw_match);
static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy *policy, int index)
{
struct qcom_cpufreq_data *data = policy->driver_data;
struct platform_device *pdev = cpufreq_get_driver_data();
char irq_name[15];
int ret;
/*
* Look for LMh interrupt. If no interrupt line is specified /
* if there is an error, allow cpufreq to be enabled as usual.
*/
data->throttle_irq = platform_get_irq(pdev, index);
if (data->throttle_irq <= 0)
return data->throttle_irq == -EPROBE_DEFER ? -EPROBE_DEFER : 0;
data->cancel_throttle = false;
data->policy = policy;
mutex_init(&data->throttle_lock);
INIT_DEFERRABLE_WORK(&data->throttle_work, qcom_lmh_dcvs_poll);
snprintf(irq_name, sizeof(irq_name), "dcvsh-irq-%u", policy->cpu);
ret = request_threaded_irq(data->throttle_irq, NULL, qcom_lmh_dcvs_handle_irq,
IRQF_ONESHOT, irq_name, data);
if (ret) {
dev_err(&pdev->dev, "Error registering %s: %d\n", irq_name, ret);
return 0;
}
return 0;
}
static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data *data)
{
if (data->throttle_irq <= 0)
return;
mutex_lock(&data->throttle_lock);
data->cancel_throttle = true;
mutex_unlock(&data->throttle_lock);
cancel_delayed_work_sync(&data->throttle_work);
free_irq(data->throttle_irq, data);
}
static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
{
struct platform_device *pdev = cpufreq_get_driver_data();
@ -348,6 +490,7 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
}
policy->driver_data = data;
policy->dvfs_possible_from_any_cpu = true;
ret = qcom_cpufreq_hw_read_lut(cpu_dev, policy);
if (ret) {
@ -362,14 +505,16 @@ static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
goto error;
}
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
if (policy_has_boost_freq(policy)) {
ret = cpufreq_enable_boost_support();
if (ret)
dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
}
ret = qcom_cpufreq_hw_lmh_init(policy, index);
if (ret)
goto error;
return 0;
error:
kfree(data);
@ -389,6 +534,7 @@ static int qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy *policy)
dev_pm_opp_remove_all_dynamic(cpu_dev);
dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
qcom_cpufreq_hw_lmh_exit(data);
kfree(policy->freq_table);
kfree(data);
iounmap(base);
@ -412,6 +558,7 @@ static struct cpufreq_driver cpufreq_qcom_hw_driver = {
.get = qcom_cpufreq_hw_get,
.init = qcom_cpufreq_hw_cpu_init,
.exit = qcom_cpufreq_hw_cpu_exit,
.register_em = cpufreq_register_em_with_opp,
.fast_switch = qcom_cpufreq_hw_fast_switch,
.name = "qcom-cpufreq-hw",
.attr = qcom_cpufreq_hw_attr,

View File

@ -22,7 +22,9 @@
struct scmi_data {
int domain_id;
int nr_opp;
struct device *cpu_dev;
cpumask_var_t opp_shared_cpus;
};
static struct scmi_protocol_handle *ph;
@ -123,9 +125,6 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
struct device *cpu_dev;
struct scmi_data *priv;
struct cpufreq_frequency_table *freq_table;
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
cpumask_var_t opp_shared_cpus;
bool power_scale_mw;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@ -133,9 +132,15 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
return -ENODEV;
}
if (!zalloc_cpumask_var(&opp_shared_cpus, GFP_KERNEL))
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
ret = -ENOMEM;
goto out_free_priv;
}
/* Obtain CPUs that share SCMI performance controls */
ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
if (ret) {
@ -148,14 +153,14 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
* The OPP 'sharing cpus' info may come from DT through an empty opp
* table and opp-shared.
*/
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, opp_shared_cpus);
if (ret || !cpumask_weight(opp_shared_cpus)) {
ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
/*
* Either opp-table is not set or no opp-shared was found.
* Use the CPU mask from SCMI to designate CPUs sharing an OPP
* table.
*/
cpumask_copy(opp_shared_cpus, policy->cpus);
cpumask_copy(priv->opp_shared_cpus, policy->cpus);
}
/*
@ -180,7 +185,7 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, opp_shared_cpus);
ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
if (ret) {
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
__func__, ret);
@ -188,21 +193,13 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
power_scale_mw = perf_ops->power_scale_mw_get(ph);
em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb,
opp_shared_cpus, power_scale_mw);
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
goto out_free_opp;
priv->nr_opp = nr_opp;
}
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
goto out_free_priv;
goto out_free_opp;
}
priv->cpu_dev = cpu_dev;
@ -223,17 +220,16 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible =
perf_ops->fast_switch_possible(ph, cpu_dev);
free_cpumask_var(opp_shared_cpus);
return 0;
out_free_priv:
kfree(priv);
out_free_opp:
dev_pm_opp_remove_all_dynamic(cpu_dev);
out_free_cpumask:
free_cpumask_var(opp_shared_cpus);
free_cpumask_var(priv->opp_shared_cpus);
out_free_priv:
kfree(priv);
return ret;
}
@ -244,11 +240,33 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
free_cpumask_var(priv->opp_shared_cpus);
kfree(priv);
return 0;
}
static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
{
struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
struct scmi_data *priv = policy->driver_data;
/*
* This callback will be called for each policy, but we don't need to
* register with EM every time. Despite not being part of the same
* policy, some CPUs may still share their perf-domains, and a CPU from
* another policy may already have registered with EM on behalf of CPUs
* of this policy.
*/
if (!priv->nr_opp)
return;
em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
&em_cb, priv->opp_shared_cpus,
power_scale_mw);
}
static struct cpufreq_driver scmi_cpufreq_driver = {
.name = "scmi",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@ -261,6 +279,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
.get = scmi_cpufreq_get_rate,
.init = scmi_cpufreq_init,
.exit = scmi_cpufreq_exit,
.register_em = scmi_cpufreq_register_em,
};
static int scmi_cpufreq_probe(struct scmi_device *sdev)

View File

@ -163,8 +163,6 @@ static int scpi_cpufreq_init(struct cpufreq_policy *policy)
policy->fast_switch_possible = false;
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
return 0;
out_free_cpufreq_table:
@ -200,6 +198,7 @@ static struct cpufreq_driver scpi_cpufreq_driver = {
.init = scpi_cpufreq_init,
.exit = scpi_cpufreq_exit,
.target_index = scpi_cpufreq_set_target,
.register_em = cpufreq_register_em_with_opp,
};
static int scpi_cpufreq_probe(struct platform_device *pdev)

View File

@ -15,7 +15,6 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mutex.h>
@ -47,7 +46,6 @@ static bool bL_switching_enabled;
#define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
#define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
static struct thermal_cooling_device *cdev[MAX_CLUSTERS];
static struct clk *clk[MAX_CLUSTERS];
static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
static atomic_t cluster_usage[MAX_CLUSTERS + 1];
@ -442,8 +440,6 @@ static int ve_spc_cpufreq_init(struct cpufreq_policy *policy)
policy->freq_table = freq_table[cur_cluster];
policy->cpuinfo.transition_latency = 1000000; /* 1 ms */
dev_pm_opp_of_register_em(cpu_dev, policy->cpus);
if (is_bL_switching_enabled())
per_cpu(cpu_last_req_freq, policy->cpu) =
clk_get_cpu_rate(policy->cpu);
@ -457,11 +453,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
struct device *cpu_dev;
int cur_cluster = cpu_to_cluster(policy->cpu);
if (cur_cluster < MAX_CLUSTERS) {
cpufreq_cooling_unregister(cdev[cur_cluster]);
cdev[cur_cluster] = NULL;
}
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
@ -473,17 +464,6 @@ static int ve_spc_cpufreq_exit(struct cpufreq_policy *policy)
return 0;
}
static void ve_spc_cpufreq_ready(struct cpufreq_policy *policy)
{
int cur_cluster = cpu_to_cluster(policy->cpu);
/* Do not register a cpu_cooling device if we are in IKS mode */
if (cur_cluster >= MAX_CLUSTERS)
return;
cdev[cur_cluster] = of_cpufreq_cooling_register(policy);
}
static struct cpufreq_driver ve_spc_cpufreq_driver = {
.name = "vexpress-spc",
.flags = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
@ -493,7 +473,7 @@ static struct cpufreq_driver ve_spc_cpufreq_driver = {
.get = ve_spc_cpufreq_get_rate,
.init = ve_spc_cpufreq_init,
.exit = ve_spc_cpufreq_exit,
.ready = ve_spc_cpufreq_ready,
.register_em = cpufreq_register_em_with_opp,
.attr = cpufreq_generic_attr,
};
@ -553,6 +533,9 @@ static int ve_spc_cpufreq_probe(struct platform_device *pdev)
for (i = 0; i < MAX_CLUSTERS; i++)
mutex_init(&cluster_lock[i]);
if (!is_bL_switching_enabled())
ve_spc_cpufreq_driver.flags |= CPUFREQ_IS_COOLING_DEV;
ret = cpufreq_register_driver(&ve_spc_cpufreq_driver);
if (ret) {
pr_info("%s: Failed registering platform driver: %s, err: %d\n",

View File

@ -9,10 +9,12 @@
#define _LINUX_CPUFREQ_H
#include <linux/clk.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/pm_opp.h>
#include <linux/pm_qos.h>
#include <linux/spinlock.h>
#include <linux/sysfs.h>
@ -373,6 +375,12 @@ struct cpufreq_driver {
/* platform specific boost support code */
bool boost_enabled;
int (*set_boost)(struct cpufreq_policy *policy, int state);
/*
* Set by drivers that want to register with the energy model after the
* policy is properly initialized, but before the governor is started.
*/
void (*register_em)(struct cpufreq_policy *policy);
};
/* flags */
@ -1046,4 +1054,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu);
void cpufreq_generic_init(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table,
unsigned int transition_latency);
static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy)
{
dev_pm_opp_of_register_em(get_cpu_device(policy->cpu),
policy->related_cpus);
}
#endif /* _LINUX_CPUFREQ_H */