mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-30 21:44:06 +08:00
posix-timers: Unify overrun/requeue_pending handling
hrtimer based posix-timers and posix-cpu-timers handle the update of the rearming and overflow related status fields differently. Move that update to the common rearming code. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: John Stultz <john.stultz@linaro.org> Link: http://lkml.kernel.org/r/20170530211656.484936964@linutronix.de
This commit is contained in:
parent
bab0aae9dc
commit
af888d677a
@ -527,6 +527,7 @@ static void cpu_timer_fire(struct k_itimer *timer)
|
|||||||
* ticking in case the signal is deliverable next time.
|
* ticking in case the signal is deliverable next time.
|
||||||
*/
|
*/
|
||||||
posix_cpu_timer_schedule(timer);
|
posix_cpu_timer_schedule(timer);
|
||||||
|
++timer->it_requeue_pending;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -997,12 +998,12 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|||||||
cpu_clock_sample(timer->it_clock, p, &now);
|
cpu_clock_sample(timer->it_clock, p, &now);
|
||||||
bump_cpu_timer(timer, now);
|
bump_cpu_timer(timer, now);
|
||||||
if (unlikely(p->exit_state))
|
if (unlikely(p->exit_state))
|
||||||
goto out;
|
return;
|
||||||
|
|
||||||
/* Protect timer list r/w in arm_timer() */
|
/* Protect timer list r/w in arm_timer() */
|
||||||
sighand = lock_task_sighand(p, &flags);
|
sighand = lock_task_sighand(p, &flags);
|
||||||
if (!sighand)
|
if (!sighand)
|
||||||
goto out;
|
return;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Protect arm_timer() and timer sampling in case of call to
|
* Protect arm_timer() and timer sampling in case of call to
|
||||||
@ -1015,11 +1016,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|||||||
* We can't even collect a sample any more.
|
* We can't even collect a sample any more.
|
||||||
*/
|
*/
|
||||||
timer->it.cpu.expires = 0;
|
timer->it.cpu.expires = 0;
|
||||||
goto out;
|
return;
|
||||||
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
|
} else if (unlikely(p->exit_state) && thread_group_empty(p)) {
|
||||||
unlock_task_sighand(p, &flags);
|
/* If the process is dying, no need to rearm */
|
||||||
/* Optimizations: if the process is dying, no need to rearm */
|
goto unlock;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
cpu_timer_sample_group(timer->it_clock, p, &now);
|
cpu_timer_sample_group(timer->it_clock, p, &now);
|
||||||
bump_cpu_timer(timer, now);
|
bump_cpu_timer(timer, now);
|
||||||
@ -1031,12 +1031,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
|
|||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(!irqs_disabled());
|
WARN_ON_ONCE(!irqs_disabled());
|
||||||
arm_timer(timer);
|
arm_timer(timer);
|
||||||
|
unlock:
|
||||||
unlock_task_sighand(p, &flags);
|
unlock_task_sighand(p, &flags);
|
||||||
|
|
||||||
out:
|
|
||||||
timer->it_overrun_last = timer->it_overrun;
|
|
||||||
timer->it_overrun = -1;
|
|
||||||
++timer->it_requeue_pending;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -291,10 +291,6 @@ static void schedule_next_timer(struct k_itimer *timr)
|
|||||||
timr->it_overrun += (unsigned int) hrtimer_forward(timer,
|
timr->it_overrun += (unsigned int) hrtimer_forward(timer,
|
||||||
timer->base->get_time(),
|
timer->base->get_time(),
|
||||||
timr->it.real.interval);
|
timr->it.real.interval);
|
||||||
|
|
||||||
timr->it_overrun_last = timr->it_overrun;
|
|
||||||
timr->it_overrun = -1;
|
|
||||||
++timr->it_requeue_pending;
|
|
||||||
hrtimer_restart(timer);
|
hrtimer_restart(timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,18 +311,23 @@ void do_schedule_next_timer(struct siginfo *info)
|
|||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
timr = lock_timer(info->si_tid, &flags);
|
timr = lock_timer(info->si_tid, &flags);
|
||||||
|
if (!timr)
|
||||||
|
return;
|
||||||
|
|
||||||
if (timr && timr->it_requeue_pending == info->si_sys_private) {
|
if (timr->it_requeue_pending == info->si_sys_private) {
|
||||||
if (timr->it_clock < 0)
|
if (timr->it_clock < 0)
|
||||||
posix_cpu_timer_schedule(timr);
|
posix_cpu_timer_schedule(timr);
|
||||||
else
|
else
|
||||||
schedule_next_timer(timr);
|
schedule_next_timer(timr);
|
||||||
|
|
||||||
|
timr->it_overrun_last = timr->it_overrun;
|
||||||
|
timr->it_overrun = -1;
|
||||||
|
++timr->it_requeue_pending;
|
||||||
|
|
||||||
info->si_overrun += timr->it_overrun_last;
|
info->si_overrun += timr->it_overrun_last;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timr)
|
unlock_timer(timr, flags);
|
||||||
unlock_timer(timr, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int posix_timer_event(struct k_itimer *timr, int si_private)
|
int posix_timer_event(struct k_itimer *timr, int si_private)
|
||||||
|
Loading…
Reference in New Issue
Block a user