mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 21:24:08 +08:00
ia64: convert to legacy_timer_tick
ia64 is the only architecture that calls xtime_update() in a loop,
once for each jiffie that has passed since the last event.
Before commit 3171a0305d
("[PATCH] simplify update_times (avoid
jiffies/jiffies_64 aliasing problem)") in 2006, it could not actually do
this any differently, but now it seems simpler to just pass the number
of jiffies that passed in the meantime.
While this loses the ability process interrupts in the middle of
the timer tick by calling local_irq_enable(), doing so is fairly
peculiar anyway and it seems better to just do what everyone
else does here.
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
parent
b3550164a1
commit
2b49ddcef2
@ -46,6 +46,7 @@ config IA64
|
||||
select ARCH_THREAD_STACK_ALLOCATOR
|
||||
select ARCH_CLOCKSOURCE_DATA
|
||||
select GENERIC_TIME_VSYSCALL
|
||||
select LEGACY_TIMER_TICK
|
||||
select SWIOTLB
|
||||
select SYSCTL_ARCH_UNALIGN_NO_WARN
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
|
@ -161,40 +161,30 @@ void vtime_account_idle(struct task_struct *tsk)
|
||||
static irqreturn_t
|
||||
timer_interrupt (int irq, void *dev_id)
|
||||
{
|
||||
unsigned long new_itm;
|
||||
unsigned long cur_itm, new_itm, ticks;
|
||||
|
||||
if (cpu_is_offline(smp_processor_id())) {
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
new_itm = local_cpu_data->itm_next;
|
||||
cur_itm = ia64_get_itc();
|
||||
|
||||
if (!time_after(ia64_get_itc(), new_itm))
|
||||
if (!time_after(cur_itm, new_itm)) {
|
||||
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
|
||||
ia64_get_itc(), new_itm);
|
||||
|
||||
profile_tick(CPU_PROFILING);
|
||||
|
||||
while (1) {
|
||||
update_process_times(user_mode(get_irq_regs()));
|
||||
|
||||
new_itm += local_cpu_data->itm_delta;
|
||||
|
||||
if (smp_processor_id() == time_keeper_id)
|
||||
xtime_update(1);
|
||||
|
||||
local_cpu_data->itm_next = new_itm;
|
||||
|
||||
if (time_after(new_itm, ia64_get_itc()))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Allow IPIs to interrupt the timer loop.
|
||||
*/
|
||||
local_irq_enable();
|
||||
local_irq_disable();
|
||||
cur_itm, new_itm);
|
||||
ticks = 1;
|
||||
} else {
|
||||
ticks = DIV_ROUND_UP(cur_itm - new_itm,
|
||||
local_cpu_data->itm_delta);
|
||||
new_itm += ticks * local_cpu_data->itm_delta;
|
||||
}
|
||||
|
||||
if (smp_processor_id() != time_keeper_id)
|
||||
ticks = 0;
|
||||
|
||||
legacy_timer_tick(ticks);
|
||||
|
||||
do {
|
||||
/*
|
||||
* If we're too close to the next clock tick for
|
||||
|
Loading…
Reference in New Issue
Block a user