From 65cd4f6c99c1170bd0114dbd71b978012ea44d28 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 18 Jul 2013 16:21:18 -0700 Subject: [PATCH] arch_timer: Move to generic sched_clock framework Register with the generic sched_clock framework now that it supports 64 bits. This fixes two problems with the current sched_clock support for machines using the architected timers. First off, we don't subtract the start value from subsequent sched_clock calls so we can potentially start off with sched_clock returning gigantic numbers. Second, there is no support for suspend/resume handling so problems such as discussed in 6a4dae5 (ARM: 7565/1: sched: stop sched_clock() during suspend, 2012-10-23) can happen without this patch. Finally, it allows us to move the sched_clock setup into drivers clocksource out of the arch ports. Cc: Christopher Covington Cc: Catalin Marinas Acked-by: Will Deacon Signed-off-by: Stephen Boyd Signed-off-by: John Stultz --- arch/arm/kernel/arch_timer.c | 14 -------------- arch/arm64/Kconfig | 1 + arch/arm64/kernel/time.c | 10 ---------- drivers/clocksource/arm_arch_timer.c | 10 ++++++++++ 4 files changed, 11 insertions(+), 24 deletions(-) diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c index 221f07b11ccb..1791f12c180b 100644 --- a/arch/arm/kernel/arch_timer.c +++ b/arch/arm/kernel/arch_timer.c @@ -11,7 +11,6 @@ #include #include #include -#include #include @@ -22,13 +21,6 @@ static unsigned long arch_timer_read_counter_long(void) return arch_timer_read_counter(); } -static u32 sched_clock_mult __read_mostly; - -static unsigned long long notrace arch_timer_sched_clock(void) -{ - return arch_timer_read_counter() * sched_clock_mult; -} - static struct delay_timer arch_delay_timer; static void __init arch_timer_delay_timer_register(void) @@ -48,11 +40,5 @@ int __init arch_timer_arch_init(void) arch_timer_delay_timer_register(); - /* Cache the sched_clock multiplier to save a divide in the hot path. */ - sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; - sched_clock_func = arch_timer_sched_clock; - pr_info("sched_clock: ARM arch timer >56 bits at %ukHz, resolution %uns\n", - arch_timer_rate / 1000, sched_clock_mult); - return 0; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index c04454876bcb..35fd0eb57270 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -14,6 +14,7 @@ config ARM64 select GENERIC_IOMAP select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW + select GENERIC_SCHED_CLOCK select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL select HARDIRQS_SW_RESEND diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 03dc3718eb13..29c39d5d77e3 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -static u64 sched_clock_mult __read_mostly; - -unsigned long long notrace sched_clock(void) -{ - return arch_timer_read_counter() * sched_clock_mult; -} - void __init time_init(void) { u32 arch_timer_rate; @@ -78,9 +71,6 @@ void __init time_init(void) if (!arch_timer_rate) panic("Unable to initialise architected timer.\n"); - /* Cache the sched_clock multiplier to save a divide in the hot path. */ - sched_clock_mult = NSEC_PER_SEC / arch_timer_rate; - /* Calibrate the delay loop directly */ lpj_fine = arch_timer_rate / HZ; } diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fbd9ccd5e114..5d527895c74d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -471,6 +472,15 @@ static int __init arch_timer_register(void) goto out; } + clocksource_register_hz(&clocksource_counter, arch_timer_rate); + cyclecounter.mult = clocksource_counter.mult; + cyclecounter.shift = clocksource_counter.shift; + timecounter_init(&timecounter, &cyclecounter, + arch_counter_get_cntvct()); + + /* 56 bits minimum, so we assume worst case rollover */ + sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate); + if (arch_timer_use_virtual) { ppi = arch_timer_ppi[VIRT_PPI]; err = request_percpu_irq(ppi, arch_timer_handler_virt,