mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-11 13:04:03 +08:00
Merge branch 'sched-idle-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull sched/idle changes from Ingo Molnar: "More idle code reorganization, to prepare for more integration. (Sent separately because it depended on pending timer work, which is now upstream)" * 'sched-idle-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/idle: Add more comments to the code sched/idle: Move idle conditions in cpuidle_idle main function sched/idle: Reorganize the idle loop cpuidle/idle: Move the cpuidle_idle_call function to idle.c idle/cpuidle: Split cpuidle_idle_call main function into smaller functions
This commit is contained in:
commit
05bf58ca4b
@ -64,6 +64,26 @@ int cpuidle_play_dead(void)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_enabled - check if the cpuidle framework is ready
|
||||
* @dev: cpuidle device for this cpu
|
||||
* @drv: cpuidle driver for this cpu
|
||||
*
|
||||
* Return 0 on success, otherwise:
|
||||
* -NODEV : the cpuidle framework is not available
|
||||
* -EBUSY : the cpuidle framework is not initialized
|
||||
*/
|
||||
int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
if (off || !initialized)
|
||||
return -ENODEV;
|
||||
|
||||
if (!drv || !dev || !dev->enabled)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_enter_state - enter the state and update stats
|
||||
* @dev: cpuidle device for this cpu
|
||||
@ -109,63 +129,48 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_idle_call - the main idle loop
|
||||
* cpuidle_select - ask the cpuidle framework to choose an idle state
|
||||
*
|
||||
* NOTE: no locks or semaphores should be used here
|
||||
* return non-zero on failure
|
||||
* @drv: the cpuidle driver
|
||||
* @dev: the cpuidle device
|
||||
*
|
||||
* Returns the index of the idle state.
|
||||
*/
|
||||
int cpuidle_idle_call(void)
|
||||
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_driver *drv;
|
||||
int next_state, entered_state;
|
||||
bool broadcast;
|
||||
|
||||
if (off || !initialized)
|
||||
return -ENODEV;
|
||||
|
||||
/* check if the device is ready */
|
||||
if (!dev || !dev->enabled)
|
||||
return -EBUSY;
|
||||
|
||||
drv = cpuidle_get_cpu_driver(dev);
|
||||
|
||||
/* ask the governor for the next state */
|
||||
next_state = cpuidle_curr_governor->select(drv, dev);
|
||||
if (need_resched()) {
|
||||
dev->last_residency = 0;
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
if (cpuidle_curr_governor->reflect)
|
||||
cpuidle_curr_governor->reflect(dev, next_state);
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
return cpuidle_curr_governor->select(drv, dev);
|
||||
}
|
||||
|
||||
broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
|
||||
/**
|
||||
* cpuidle_enter - enter into the specified idle state
|
||||
*
|
||||
* @drv: the cpuidle driver tied with the cpu
|
||||
* @dev: the cpuidle device
|
||||
* @index: the index in the idle state table
|
||||
*
|
||||
* Returns the index in the idle state, < 0 in case of error.
|
||||
* The error code depends on the backend driver
|
||||
*/
|
||||
int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
|
||||
int index)
|
||||
{
|
||||
if (cpuidle_state_is_coupled(dev, drv, index))
|
||||
return cpuidle_enter_state_coupled(dev, drv, index);
|
||||
return cpuidle_enter_state(dev, drv, index);
|
||||
}
|
||||
|
||||
if (broadcast &&
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
|
||||
return -EBUSY;
|
||||
|
||||
|
||||
trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
||||
|
||||
if (cpuidle_state_is_coupled(dev, drv, next_state))
|
||||
entered_state = cpuidle_enter_state_coupled(dev, drv,
|
||||
next_state);
|
||||
else
|
||||
entered_state = cpuidle_enter_state(dev, drv, next_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
if (broadcast)
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
|
||||
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
/**
|
||||
* cpuidle_reflect - tell the underlying governor what was the state
|
||||
* we were in
|
||||
*
|
||||
* @dev : the cpuidle device
|
||||
* @index: the index in the idle state table
|
||||
*
|
||||
*/
|
||||
void cpuidle_reflect(struct cpuidle_device *dev, int index)
|
||||
{
|
||||
if (cpuidle_curr_governor->reflect)
|
||||
cpuidle_curr_governor->reflect(dev, entered_state);
|
||||
|
||||
return 0;
|
||||
cpuidle_curr_governor->reflect(dev, index);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -119,7 +119,15 @@ struct cpuidle_driver {
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
extern void disable_cpuidle(void);
|
||||
extern int cpuidle_idle_call(void);
|
||||
|
||||
extern int cpuidle_enabled(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
extern int cpuidle_select(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev);
|
||||
extern int cpuidle_enter(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, int index);
|
||||
extern void cpuidle_reflect(struct cpuidle_device *dev, int index);
|
||||
|
||||
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
|
||||
extern struct cpuidle_driver *cpuidle_get_driver(void);
|
||||
extern struct cpuidle_driver *cpuidle_driver_ref(void);
|
||||
@ -141,7 +149,16 @@ extern int cpuidle_play_dead(void);
|
||||
extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
|
||||
#else
|
||||
static inline void disable_cpuidle(void) { }
|
||||
static inline int cpuidle_idle_call(void) { return -ENODEV; }
|
||||
static inline int cpuidle_enabled(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline int cpuidle_select(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline int cpuidle_enter(struct cpuidle_driver *drv,
|
||||
struct cpuidle_device *dev, int index)
|
||||
{return -ENODEV; }
|
||||
static inline void cpuidle_reflect(struct cpuidle_device *dev, int index) { }
|
||||
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
|
||||
{return -ENODEV; }
|
||||
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
|
||||
@ -163,6 +180,8 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
|
||||
{return -ENODEV; }
|
||||
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
|
||||
static inline int cpuidle_play_dead(void) {return -ENODEV; }
|
||||
static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
|
||||
struct cpuidle_device *dev) {return NULL; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
|
||||
|
@ -63,6 +63,136 @@ void __weak arch_cpu_idle(void)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_idle_call - the main idle function
|
||||
*
|
||||
* NOTE: no locks or semaphores should be used here
|
||||
* return non-zero on failure
|
||||
*/
|
||||
static int cpuidle_idle_call(void)
|
||||
{
|
||||
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
|
||||
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
|
||||
int next_state, entered_state, ret;
|
||||
bool broadcast;
|
||||
|
||||
/*
|
||||
* Check if the idle task must be rescheduled. If it is the
|
||||
* case, exit the function after re-enabling the local irq and
|
||||
* set again the polling flag
|
||||
*/
|
||||
if (current_clr_polling_and_test()) {
|
||||
local_irq_enable();
|
||||
__current_set_polling();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* During the idle period, stop measuring the disabled irqs
|
||||
* critical sections latencies
|
||||
*/
|
||||
stop_critical_timings();
|
||||
|
||||
/*
|
||||
* Tell the RCU framework we are entering an idle section,
|
||||
* so no more rcu read side critical sections and one more
|
||||
* step to the grace period
|
||||
*/
|
||||
rcu_idle_enter();
|
||||
|
||||
/*
|
||||
* Check if the cpuidle framework is ready, otherwise fallback
|
||||
* to the default arch specific idle method
|
||||
*/
|
||||
ret = cpuidle_enabled(drv, dev);
|
||||
|
||||
if (!ret) {
|
||||
/*
|
||||
* Ask the governor to choose an idle state it thinks
|
||||
* it is convenient to go to. There is *always* a
|
||||
* convenient idle state
|
||||
*/
|
||||
next_state = cpuidle_select(drv, dev);
|
||||
|
||||
/*
|
||||
* The idle task must be scheduled, it is pointless to
|
||||
* go to idle, just update no idle residency and get
|
||||
* out of this function
|
||||
*/
|
||||
if (current_clr_polling_and_test()) {
|
||||
dev->last_residency = 0;
|
||||
entered_state = next_state;
|
||||
local_irq_enable();
|
||||
} else {
|
||||
broadcast = !!(drv->states[next_state].flags &
|
||||
CPUIDLE_FLAG_TIMER_STOP);
|
||||
|
||||
if (broadcast)
|
||||
/*
|
||||
* Tell the time framework to switch
|
||||
* to a broadcast timer because our
|
||||
* local timer will be shutdown. If a
|
||||
* local timer is used from another
|
||||
* cpu as a broadcast timer, this call
|
||||
* may fail if it is not available
|
||||
*/
|
||||
ret = clockevents_notify(
|
||||
CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
|
||||
&dev->cpu);
|
||||
|
||||
if (!ret) {
|
||||
trace_cpu_idle_rcuidle(next_state, dev->cpu);
|
||||
|
||||
/*
|
||||
* Enter the idle state previously
|
||||
* returned by the governor
|
||||
* decision. This function will block
|
||||
* until an interrupt occurs and will
|
||||
* take care of re-enabling the local
|
||||
* interrupts
|
||||
*/
|
||||
entered_state = cpuidle_enter(drv, dev,
|
||||
next_state);
|
||||
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT,
|
||||
dev->cpu);
|
||||
|
||||
if (broadcast)
|
||||
clockevents_notify(
|
||||
CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
|
||||
&dev->cpu);
|
||||
|
||||
/*
|
||||
* Give the governor an opportunity to reflect on the
|
||||
* outcome
|
||||
*/
|
||||
cpuidle_reflect(dev, entered_state);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't use the cpuidle framework, let's use the default
|
||||
* idle routine
|
||||
*/
|
||||
if (ret)
|
||||
arch_cpu_idle();
|
||||
|
||||
__current_set_polling();
|
||||
|
||||
/*
|
||||
* It is up to the idle functions to enable back the local
|
||||
* interrupt
|
||||
*/
|
||||
if (WARN_ON_ONCE(irqs_disabled()))
|
||||
local_irq_enable();
|
||||
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Generic idle loop implementation
|
||||
*/
|
||||
@ -90,23 +220,11 @@ static void cpu_idle_loop(void)
|
||||
* know that the IPI is going to arrive right
|
||||
* away
|
||||
*/
|
||||
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
|
||||
if (cpu_idle_force_poll || tick_check_broadcast_expired())
|
||||
cpu_idle_poll();
|
||||
} else {
|
||||
if (!current_clr_polling_and_test()) {
|
||||
stop_critical_timings();
|
||||
rcu_idle_enter();
|
||||
if (cpuidle_idle_call())
|
||||
arch_cpu_idle();
|
||||
if (WARN_ON_ONCE(irqs_disabled()))
|
||||
local_irq_enable();
|
||||
rcu_idle_exit();
|
||||
start_critical_timings();
|
||||
} else {
|
||||
local_irq_enable();
|
||||
}
|
||||
__current_set_polling();
|
||||
}
|
||||
else
|
||||
cpuidle_idle_call();
|
||||
|
||||
arch_cpu_idle_exit();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user