mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Power management fix for 5.5-rc3
Fix a problem related to CPU offline/online and cpufreq governors that in some system configurations may lead to a system-wide deadlock during CPU online. -----BEGIN PGP SIGNATURE----- iQJGBAABCAAwFiEE4fcc61cGeeHD/fCwgsRv/nhiVHEFAl37lO4SHHJqd0Byand5 c29ja2kubmV0AAoJEILEb/54YlRxrUoP+wfiXQ8k3GncyD8NXY1/GhEmqB95v/f4 clbn0xNu2WaQB3UdO/LkouL0+IaVw/i8PAt0cdeuEjKSgbPT8HHCkN28J0oia02H HD7JzdiUZh7ONG1eq9Z/7ckSXBflZaUIjzTi6C1axX8reEzGVVuy5LNhc+0iWjsh +mr9hRymgsRcGHPTN+CKi8Qhb29PPvVRt4YbghL0moQUDYewYENb/JBYJIjhgChG vWpHX6Kra99uveTMkAN5GVcgZP5b/RiM5E+cCpLEZDTSUnCIuTPM38ATGDTpadpW DSDuu+vEEmFu7RHO/lheN92n2fnTgjGpl5d6L5qwGCSzm0GeYZNo84RDEFCWwXZh 5sY8oz+1wA2MIXV3f1bXYTDMWWQSitSVQ3A9OeKLlprGcZhG/66T2QB7aTut/D/R devyNt+xjMoqKcA7AaeVZ6XqUSHMTSCak88okXbKapJq6qkA6QkVsga+LArlRa0c xdA6lma2ICPG7Q2ta2G4nHekHd9mDSaR7aFkcKoApOkIDKUY9j47pI3KWSgVFCu3 D6by7F7CCWHfp0Vw22eGuCQokBsLvhMsa7qwFlxKoxC6iJADANzBVkRzaH70wu2w QP2Xu9+WndyRJrrmIQS5iTrClUfgverOgXTJ5OH2jFm+Oi4r6quTKF83rturnDBr J8OK4odeh6E9 =+MQE -----END PGP SIGNATURE----- Merge tag 'pm-5.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm Pull power management fix from Rafael Wysocki: "Fix a problem related to CPU offline/online and cpufreq governors that in some system configurations may lead to a system-wide deadlock during CPU online" * tag 'pm-5.5-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: cpufreq: Avoid leaving stale IRQ work items during CPU offline
This commit is contained in:
commit
5f096c0ecd
@ -595,17 +595,6 @@ struct governor_attr {
|
||||
size_t count);
|
||||
};
|
||||
|
||||
static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
|
||||
{
|
||||
/*
|
||||
* Allow remote callbacks if:
|
||||
* - dvfs_possible_from_any_cpu flag is set
|
||||
* - the local and remote CPUs share cpufreq policy
|
||||
*/
|
||||
return policy->dvfs_possible_from_any_cpu ||
|
||||
cpumask_test_cpu(smp_processor_id(), policy->cpus);
|
||||
}
|
||||
|
||||
/*********************************************************************
|
||||
* FREQUENCY TABLE HELPERS *
|
||||
*********************************************************************/
|
||||
|
@ -12,6 +12,8 @@
|
||||
#define SCHED_CPUFREQ_MIGRATION (1U << 1)
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
struct cpufreq_policy;
|
||||
|
||||
struct update_util_data {
|
||||
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
|
||||
};
|
||||
@ -20,6 +22,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
|
||||
void (*func)(struct update_util_data *data, u64 time,
|
||||
unsigned int flags));
|
||||
void cpufreq_remove_update_util_hook(int cpu);
|
||||
bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
|
||||
|
||||
static inline unsigned long map_util_freq(unsigned long util,
|
||||
unsigned long freq, unsigned long cap)
|
||||
|
@ -5,6 +5,8 @@
|
||||
* Copyright (C) 2016, Intel Corporation
|
||||
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
*/
|
||||
#include <linux/cpufreq.h>
|
||||
|
||||
#include "sched.h"
|
||||
|
||||
DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
|
||||
@ -57,3 +59,19 @@ void cpufreq_remove_update_util_hook(int cpu)
|
||||
rcu_assign_pointer(per_cpu(cpufreq_update_util_data, cpu), NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_remove_update_util_hook);
|
||||
|
||||
/**
|
||||
* cpufreq_this_cpu_can_update - Check if cpufreq policy can be updated.
|
||||
* @policy: cpufreq policy to check.
|
||||
*
|
||||
* Return 'true' if:
|
||||
* - the local and remote CPUs share @policy,
|
||||
* - dvfs_possible_from_any_cpu is set in @policy and the local CPU is not going
|
||||
* offline (in which case it is not expected to run cpufreq updates any more).
|
||||
*/
|
||||
bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
|
||||
{
|
||||
return cpumask_test_cpu(smp_processor_id(), policy->cpus) ||
|
||||
(policy->dvfs_possible_from_any_cpu &&
|
||||
rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
|
||||
}
|
||||
|
@ -82,12 +82,10 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
||||
* by the hardware, as calculating the frequency is pointless if
|
||||
* we cannot in fact act on it.
|
||||
*
|
||||
* For the slow switching platforms, the kthread is always scheduled on
|
||||
* the right set of CPUs and any CPU can find the next frequency and
|
||||
* schedule the kthread.
|
||||
* This is needed on the slow switching platforms too to prevent CPUs
|
||||
* going offline from leaving stale IRQ work items behind.
|
||||
*/
|
||||
if (sg_policy->policy->fast_switch_enabled &&
|
||||
!cpufreq_this_cpu_can_update(sg_policy->policy))
|
||||
if (!cpufreq_this_cpu_can_update(sg_policy->policy))
|
||||
return false;
|
||||
|
||||
if (unlikely(sg_policy->limits_changed)) {
|
||||
|
Loading…
Reference in New Issue
Block a user