mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-22 12:33:59 +08:00
timekeeping: Update clocksource with stop_machine
update_wall_time calls change_clocksource HZ times per second to check if a new clock source is available. In close to 100% of all calls there is no new clock. Replace the tick based check by an update done with stop_machine. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@elte.hu> Acked-by: John Stultz <johnstul@us.ibm.com> Cc: Daniel Walker <dwalker@fifo99.com> LKML-Reference: <20090814134810.711836357@de.ibm.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
2ba2a3054f
commit
75c5158f70
@ -291,4 +291,6 @@ static inline void update_vsyscall_tz(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern void timekeeping_notify(struct clocksource *clock);
|
||||||
|
|
||||||
#endif /* _LINUX_CLOCKSOURCE_H */
|
#endif /* _LINUX_CLOCKSOURCE_H */
|
||||||
|
@ -109,35 +109,17 @@ EXPORT_SYMBOL(timecounter_cyc2time);
|
|||||||
/*[Clocksource internal variables]---------
|
/*[Clocksource internal variables]---------
|
||||||
* curr_clocksource:
|
* curr_clocksource:
|
||||||
* currently selected clocksource.
|
* currently selected clocksource.
|
||||||
* next_clocksource:
|
|
||||||
* pending next selected clocksource.
|
|
||||||
* clocksource_list:
|
* clocksource_list:
|
||||||
* linked list with the registered clocksources
|
* linked list with the registered clocksources
|
||||||
* clocksource_lock:
|
* clocksource_mutex:
|
||||||
* protects manipulations to curr_clocksource and next_clocksource
|
* protects manipulations to curr_clocksource and the clocksource_list
|
||||||
* and the clocksource_list
|
|
||||||
* override_name:
|
* override_name:
|
||||||
* Name of the user-specified clocksource.
|
* Name of the user-specified clocksource.
|
||||||
*/
|
*/
|
||||||
static struct clocksource *curr_clocksource;
|
static struct clocksource *curr_clocksource;
|
||||||
static struct clocksource *next_clocksource;
|
|
||||||
static LIST_HEAD(clocksource_list);
|
static LIST_HEAD(clocksource_list);
|
||||||
static DEFINE_SPINLOCK(clocksource_lock);
|
static DEFINE_MUTEX(clocksource_mutex);
|
||||||
static char override_name[32];
|
static char override_name[32];
|
||||||
static int finished_booting;
|
|
||||||
|
|
||||||
/* clocksource_done_booting - Called near the end of core bootup
|
|
||||||
*
|
|
||||||
* Hack to avoid lots of clocksource churn at boot time.
|
|
||||||
* We use fs_initcall because we want this to start before
|
|
||||||
* device_initcall but after subsys_initcall.
|
|
||||||
*/
|
|
||||||
static int __init clocksource_done_booting(void)
|
|
||||||
{
|
|
||||||
finished_booting = 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
fs_initcall(clocksource_done_booting);
|
|
||||||
|
|
||||||
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
||||||
static LIST_HEAD(watchdog_list);
|
static LIST_HEAD(watchdog_list);
|
||||||
@ -356,18 +338,16 @@ static inline void clocksource_resume_watchdog(void) { }
|
|||||||
void clocksource_resume(void)
|
void clocksource_resume(void)
|
||||||
{
|
{
|
||||||
struct clocksource *cs;
|
struct clocksource *cs;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&clocksource_lock, flags);
|
mutex_lock(&clocksource_mutex);
|
||||||
|
|
||||||
list_for_each_entry(cs, &clocksource_list, list) {
|
list_for_each_entry(cs, &clocksource_list, list)
|
||||||
if (cs->resume)
|
if (cs->resume)
|
||||||
cs->resume();
|
cs->resume();
|
||||||
}
|
|
||||||
|
|
||||||
clocksource_resume_watchdog();
|
clocksource_resume_watchdog();
|
||||||
|
|
||||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
mutex_unlock(&clocksource_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -383,28 +363,13 @@ void clocksource_touch_watchdog(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_TIME
|
#ifdef CONFIG_GENERIC_TIME
|
||||||
/**
|
|
||||||
* clocksource_get_next - Returns the selected clocksource
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
struct clocksource *clocksource_get_next(void)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&clocksource_lock, flags);
|
static int finished_booting;
|
||||||
if (next_clocksource && finished_booting) {
|
|
||||||
curr_clocksource = next_clocksource;
|
|
||||||
next_clocksource = NULL;
|
|
||||||
}
|
|
||||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
|
||||||
|
|
||||||
return curr_clocksource;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* clocksource_select - Select the best clocksource available
|
* clocksource_select - Select the best clocksource available
|
||||||
*
|
*
|
||||||
* Private function. Must hold clocksource_lock when called.
|
* Private function. Must hold clocksource_mutex when called.
|
||||||
*
|
*
|
||||||
* Select the clocksource with the best rating, or the clocksource,
|
* Select the clocksource with the best rating, or the clocksource,
|
||||||
* which is selected by userspace override.
|
* which is selected by userspace override.
|
||||||
@ -413,7 +378,7 @@ static void clocksource_select(void)
|
|||||||
{
|
{
|
||||||
struct clocksource *best, *cs;
|
struct clocksource *best, *cs;
|
||||||
|
|
||||||
if (list_empty(&clocksource_list))
|
if (!finished_booting || list_empty(&clocksource_list))
|
||||||
return;
|
return;
|
||||||
/* First clocksource on the list has the best rating. */
|
/* First clocksource on the list has the best rating. */
|
||||||
best = list_first_entry(&clocksource_list, struct clocksource, list);
|
best = list_first_entry(&clocksource_list, struct clocksource, list);
|
||||||
@ -438,13 +403,31 @@ static void clocksource_select(void)
|
|||||||
best = cs;
|
best = cs;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (curr_clocksource != best)
|
if (curr_clocksource != best) {
|
||||||
next_clocksource = best;
|
printk(KERN_INFO "Switching to clocksource %s\n", best->name);
|
||||||
|
curr_clocksource = best;
|
||||||
|
timekeeping_notify(curr_clocksource);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* clocksource_done_booting - Called near the end of core bootup
|
||||||
|
*
|
||||||
|
* Hack to avoid lots of clocksource churn at boot time.
|
||||||
|
* We use fs_initcall because we want this to start before
|
||||||
|
* device_initcall but after subsys_initcall.
|
||||||
|
*/
|
||||||
|
static int __init clocksource_done_booting(void)
|
||||||
|
{
|
||||||
|
finished_booting = 1;
|
||||||
|
clocksource_select();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
fs_initcall(clocksource_done_booting);
|
||||||
|
|
||||||
#else /* CONFIG_GENERIC_TIME */
|
#else /* CONFIG_GENERIC_TIME */
|
||||||
|
|
||||||
static void clocksource_select(void) { }
|
static inline void clocksource_select(void) { }
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -471,13 +454,11 @@ static void clocksource_enqueue(struct clocksource *cs)
|
|||||||
*/
|
*/
|
||||||
int clocksource_register(struct clocksource *cs)
|
int clocksource_register(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
mutex_lock(&clocksource_mutex);
|
||||||
|
|
||||||
spin_lock_irqsave(&clocksource_lock, flags);
|
|
||||||
clocksource_enqueue(cs);
|
clocksource_enqueue(cs);
|
||||||
clocksource_select();
|
clocksource_select();
|
||||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
|
||||||
clocksource_enqueue_watchdog(cs);
|
clocksource_enqueue_watchdog(cs);
|
||||||
|
mutex_unlock(&clocksource_mutex);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(clocksource_register);
|
EXPORT_SYMBOL(clocksource_register);
|
||||||
@ -487,14 +468,12 @@ EXPORT_SYMBOL(clocksource_register);
|
|||||||
*/
|
*/
|
||||||
void clocksource_change_rating(struct clocksource *cs, int rating)
|
void clocksource_change_rating(struct clocksource *cs, int rating)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
mutex_lock(&clocksource_mutex);
|
||||||
|
|
||||||
spin_lock_irqsave(&clocksource_lock, flags);
|
|
||||||
list_del(&cs->list);
|
list_del(&cs->list);
|
||||||
cs->rating = rating;
|
cs->rating = rating;
|
||||||
clocksource_enqueue(cs);
|
clocksource_enqueue(cs);
|
||||||
clocksource_select();
|
clocksource_select();
|
||||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
mutex_unlock(&clocksource_mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(clocksource_change_rating);
|
EXPORT_SYMBOL(clocksource_change_rating);
|
||||||
|
|
||||||
@ -503,13 +482,11 @@ EXPORT_SYMBOL(clocksource_change_rating);
|
|||||||
*/
|
*/
|
||||||
void clocksource_unregister(struct clocksource *cs)
|
void clocksource_unregister(struct clocksource *cs)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
mutex_lock(&clocksource_mutex);
|
||||||
|
|
||||||
clocksource_dequeue_watchdog(cs);
|
clocksource_dequeue_watchdog(cs);
|
||||||
spin_lock_irqsave(&clocksource_lock, flags);
|
|
||||||
list_del(&cs->list);
|
list_del(&cs->list);
|
||||||
clocksource_select();
|
clocksource_select();
|
||||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
mutex_unlock(&clocksource_mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(clocksource_unregister);
|
EXPORT_SYMBOL(clocksource_unregister);
|
||||||
|
|
||||||
@ -527,9 +504,9 @@ sysfs_show_current_clocksources(struct sys_device *dev,
|
|||||||
{
|
{
|
||||||
ssize_t count = 0;
|
ssize_t count = 0;
|
||||||
|
|
||||||
spin_lock_irq(&clocksource_lock);
|
mutex_lock(&clocksource_mutex);
|
||||||
count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
|
count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
|
||||||
spin_unlock_irq(&clocksource_lock);
|
mutex_unlock(&clocksource_mutex);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -557,14 +534,14 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
|
|||||||
if (buf[count-1] == '\n')
|
if (buf[count-1] == '\n')
|
||||||
count--;
|
count--;
|
||||||
|
|
||||||
spin_lock_irq(&clocksource_lock);
|
mutex_lock(&clocksource_mutex);
|
||||||
|
|
||||||
if (count > 0)
|
if (count > 0)
|
||||||
memcpy(override_name, buf, count);
|
memcpy(override_name, buf, count);
|
||||||
override_name[count] = 0;
|
override_name[count] = 0;
|
||||||
clocksource_select();
|
clocksource_select();
|
||||||
|
|
||||||
spin_unlock_irq(&clocksource_lock);
|
mutex_unlock(&clocksource_mutex);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -584,7 +561,7 @@ sysfs_show_available_clocksources(struct sys_device *dev,
|
|||||||
struct clocksource *src;
|
struct clocksource *src;
|
||||||
ssize_t count = 0;
|
ssize_t count = 0;
|
||||||
|
|
||||||
spin_lock_irq(&clocksource_lock);
|
mutex_lock(&clocksource_mutex);
|
||||||
list_for_each_entry(src, &clocksource_list, list) {
|
list_for_each_entry(src, &clocksource_list, list) {
|
||||||
/*
|
/*
|
||||||
* Don't show non-HRES clocksource if the tick code is
|
* Don't show non-HRES clocksource if the tick code is
|
||||||
@ -596,7 +573,7 @@ sysfs_show_available_clocksources(struct sys_device *dev,
|
|||||||
max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
|
max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
|
||||||
"%s ", src->name);
|
"%s ", src->name);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&clocksource_lock);
|
mutex_unlock(&clocksource_mutex);
|
||||||
|
|
||||||
count += snprintf(buf + count,
|
count += snprintf(buf + count,
|
||||||
max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
|
max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
|
||||||
@ -651,11 +628,10 @@ device_initcall(init_clocksource_sysfs);
|
|||||||
*/
|
*/
|
||||||
static int __init boot_override_clocksource(char* str)
|
static int __init boot_override_clocksource(char* str)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
mutex_lock(&clocksource_mutex);
|
||||||
spin_lock_irqsave(&clocksource_lock, flags);
|
|
||||||
if (str)
|
if (str)
|
||||||
strlcpy(override_name, str, sizeof(override_name));
|
strlcpy(override_name, str, sizeof(override_name));
|
||||||
spin_unlock_irqrestore(&clocksource_lock, flags);
|
mutex_unlock(&clocksource_mutex);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/tick.h>
|
#include <linux/tick.h>
|
||||||
|
#include <linux/stop_machine.h>
|
||||||
|
|
||||||
/* Structure holding internal timekeeping values. */
|
/* Structure holding internal timekeeping values. */
|
||||||
struct timekeeper {
|
struct timekeeper {
|
||||||
@ -179,6 +180,7 @@ void timekeeping_leap_insert(int leapsecond)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_GENERIC_TIME
|
#ifdef CONFIG_GENERIC_TIME
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* timekeeping_forward_now - update clock to the current time
|
* timekeeping_forward_now - update clock to the current time
|
||||||
*
|
*
|
||||||
@ -351,31 +353,40 @@ EXPORT_SYMBOL(do_settimeofday);
|
|||||||
*
|
*
|
||||||
* Accumulates current time interval and initializes new clocksource
|
* Accumulates current time interval and initializes new clocksource
|
||||||
*/
|
*/
|
||||||
static void change_clocksource(void)
|
static int change_clocksource(void *data)
|
||||||
{
|
{
|
||||||
struct clocksource *new, *old;
|
struct clocksource *new, *old;
|
||||||
|
|
||||||
new = clocksource_get_next();
|
new = (struct clocksource *) data;
|
||||||
|
|
||||||
if (!new || timekeeper.clock == new)
|
|
||||||
return;
|
|
||||||
|
|
||||||
timekeeping_forward_now();
|
timekeeping_forward_now();
|
||||||
|
if (!new->enable || new->enable(new) == 0) {
|
||||||
|
old = timekeeper.clock;
|
||||||
|
timekeeper_setup_internals(new);
|
||||||
|
if (old->disable)
|
||||||
|
old->disable(old);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (new->enable && !new->enable(new))
|
/**
|
||||||
|
* timekeeping_notify - Install a new clock source
|
||||||
|
* @clock: pointer to the clock source
|
||||||
|
*
|
||||||
|
* This function is called from clocksource.c after a new, better clock
|
||||||
|
* source has been registered. The caller holds the clocksource_mutex.
|
||||||
|
*/
|
||||||
|
void timekeeping_notify(struct clocksource *clock)
|
||||||
|
{
|
||||||
|
if (timekeeper.clock == clock)
|
||||||
return;
|
return;
|
||||||
|
stop_machine(change_clocksource, clock, NULL);
|
||||||
old = timekeeper.clock;
|
|
||||||
timekeeper_setup_internals(new);
|
|
||||||
|
|
||||||
if (old->disable)
|
|
||||||
old->disable(old);
|
|
||||||
|
|
||||||
tick_clock_notify();
|
tick_clock_notify();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* GENERIC_TIME */
|
#else /* GENERIC_TIME */
|
||||||
|
|
||||||
static inline void timekeeping_forward_now(void) { }
|
static inline void timekeeping_forward_now(void) { }
|
||||||
static inline void change_clocksource(void) { }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ktime_get - get the monotonic time in ktime_t format
|
* ktime_get - get the monotonic time in ktime_t format
|
||||||
@ -416,6 +427,7 @@ void ktime_get_ts(struct timespec *ts)
|
|||||||
ts->tv_nsec + tomono.tv_nsec);
|
ts->tv_nsec + tomono.tv_nsec);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(ktime_get_ts);
|
EXPORT_SYMBOL_GPL(ktime_get_ts);
|
||||||
|
|
||||||
#endif /* !GENERIC_TIME */
|
#endif /* !GENERIC_TIME */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -773,7 +785,6 @@ void update_wall_time(void)
|
|||||||
update_xtime_cache(nsecs);
|
update_xtime_cache(nsecs);
|
||||||
|
|
||||||
/* check to see if there is a new clocksource to use */
|
/* check to see if there is a new clocksource to use */
|
||||||
change_clocksource();
|
|
||||||
update_vsyscall(&xtime, timekeeper.clock);
|
update_vsyscall(&xtime, timekeeper.clock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user