2018-11-01 02:21:09 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* 2002-10-15 Posix Clocks & timers
|
|
|
|
* by George Anzinger george@mvista.com
|
|
|
|
* Copyright (C) 2002 2003 by MontaVista Software.
|
|
|
|
*
|
|
|
|
* 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
|
|
|
|
* Copyright (C) 2004 Boris Hu
|
|
|
|
*
|
2018-11-01 02:21:16 +08:00
|
|
|
* These are all the functions necessary to implement POSIX clocks & timers
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/time.h>
|
2006-03-23 19:00:24 +08:00
|
|
|
#include <linux/mutex.h>
|
2017-02-05 21:35:41 +08:00
|
|
|
#include <linux/sched/task.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/compiler.h>
|
2013-03-11 17:12:21 +08:00
|
|
|
#include <linux/hash.h>
|
2011-02-01 21:52:35 +08:00
|
|
|
#include <linux/posix-clock.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/posix-timers.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/workqueue.h>
|
2011-05-24 02:51:41 +08:00
|
|
|
#include <linux/export.h>
|
2013-03-11 17:12:21 +08:00
|
|
|
#include <linux/hashtable.h>
|
2017-06-07 16:42:31 +08:00
|
|
|
#include <linux/compat.h>
|
2018-02-16 00:21:55 +08:00
|
|
|
#include <linux/nospec.h>
|
2019-11-12 09:27:00 +08:00
|
|
|
#include <linux/time_namespace.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2014-07-17 05:04:02 +08:00
|
|
|
#include "timekeeping.h"
|
2017-05-31 05:15:41 +08:00
|
|
|
#include "posix-timers.h"
|
2014-07-17 05:04:02 +08:00
|
|
|
|
2023-04-26 02:49:01 +08:00
|
|
|
static struct kmem_cache *posix_timers_cache;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
2023-04-26 02:49:01 +08:00
|
|
|
* Timers are managed in a hash table for lockless lookup. The hash key is
|
|
|
|
* constructed from current::signal and the timer ID and the timer is
|
|
|
|
* matched against current::signal and the timer ID when walking the hash
|
|
|
|
* bucket list.
|
|
|
|
*
|
|
|
|
* This allows checkpoint/restore to reconstruct the exact timer IDs for
|
|
|
|
* a process.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2013-03-11 17:12:21 +08:00
|
|
|
static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
|
|
|
|
static DEFINE_SPINLOCK(hash_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-31 05:15:39 +08:00
|
|
|
static const struct k_clock * const posix_clocks[];
|
|
|
|
static const struct k_clock *clockid_to_kclock(const clockid_t id);
|
2017-06-13 01:39:49 +08:00
|
|
|
static const struct k_clock clock_realtime, clock_monotonic;
|
2017-05-31 05:15:39 +08:00
|
|
|
|
2023-04-26 02:49:20 +08:00
|
|
|
/* SIGEV_THREAD_ID cannot share a bit with the other SIGEV values. */
|
2005-04-17 06:20:36 +08:00
|
|
|
#if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
|
2023-04-26 02:49:20 +08:00
|
|
|
~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
|
2005-04-17 06:20:36 +08:00
|
|
|
#error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
|
|
|
|
#endif
|
|
|
|
|
2010-10-21 06:57:34 +08:00
|
|
|
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
|
|
|
|
|
|
|
|
#define lock_timer(tid, flags) \
|
|
|
|
({ struct k_itimer *__timr; \
|
|
|
|
__cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
|
|
|
|
__timr; \
|
|
|
|
})
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2013-03-11 17:12:21 +08:00
|
|
|
static int hash(struct signal_struct *sig, unsigned int nr)
|
|
|
|
{
|
|
|
|
return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct k_itimer *__posix_timers_find(struct hlist_head *head,
|
|
|
|
struct signal_struct *sig,
|
|
|
|
timer_t id)
|
|
|
|
{
|
|
|
|
struct k_itimer *timer;
|
|
|
|
|
2023-04-26 02:49:05 +08:00
|
|
|
hlist_for_each_entry_rcu(timer, head, t_hash, lockdep_is_held(&hash_lock)) {
|
|
|
|
/* timer->it_signal can be set concurrently */
|
|
|
|
if ((READ_ONCE(timer->it_signal) == sig) && (timer->it_id == id))
|
2013-03-11 17:12:21 +08:00
|
|
|
return timer;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct k_itimer *posix_timer_by_id(timer_t id)
|
|
|
|
{
|
|
|
|
struct signal_struct *sig = current->signal;
|
|
|
|
struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
|
|
|
|
|
|
|
|
return __posix_timers_find(head, sig, id);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int posix_timer_add(struct k_itimer *timer)
|
|
|
|
{
|
|
|
|
struct signal_struct *sig = current->signal;
|
|
|
|
struct hlist_head *head;
|
2023-06-02 02:58:47 +08:00
|
|
|
unsigned int cnt, id;
|
2013-03-11 17:12:21 +08:00
|
|
|
|
2023-06-02 02:58:47 +08:00
|
|
|
/*
|
|
|
|
* FIXME: Replace this by a per signal struct xarray once there is
|
|
|
|
* a plan to handle the resulting CRIU regression gracefully.
|
|
|
|
*/
|
|
|
|
for (cnt = 0; cnt <= INT_MAX; cnt++) {
|
2013-03-11 17:12:21 +08:00
|
|
|
spin_lock(&hash_lock);
|
2023-06-02 02:58:47 +08:00
|
|
|
id = sig->next_posix_timer_id;
|
|
|
|
|
|
|
|
/* Write the next ID back. Clamp it to the positive space */
|
|
|
|
sig->next_posix_timer_id = (id + 1) & INT_MAX;
|
|
|
|
|
|
|
|
head = &posix_timers_hashtable[hash(sig, id)];
|
|
|
|
if (!__posix_timers_find(head, sig, id)) {
|
2013-03-11 17:12:21 +08:00
|
|
|
hlist_add_head_rcu(&timer->t_hash, head);
|
2023-06-02 02:58:47 +08:00
|
|
|
spin_unlock(&hash_lock);
|
|
|
|
return id;
|
2013-03-11 17:12:21 +08:00
|
|
|
}
|
|
|
|
spin_unlock(&hash_lock);
|
2023-06-02 02:58:47 +08:00
|
|
|
}
|
|
|
|
/* POSIX return code when no timer ID could be allocated */
|
|
|
|
return -EAGAIN;
|
2013-03-11 17:12:21 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
|
|
|
|
{
|
|
|
|
spin_unlock_irqrestore(&timr->it_lock, flags);
|
|
|
|
}
|
|
|
|
|
2019-11-12 09:26:55 +08:00
|
|
|
static int posix_get_realtime_timespec(clockid_t which_clock, struct timespec64 *tp)
|
2011-02-01 21:51:50 +08:00
|
|
|
{
|
2017-03-27 03:04:14 +08:00
|
|
|
ktime_get_real_ts64(tp);
|
2011-02-01 21:51:50 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-12 09:26:58 +08:00
|
|
|
static ktime_t posix_get_realtime_ktime(clockid_t which_clock)
|
|
|
|
{
|
|
|
|
return ktime_get_real();
|
|
|
|
}
|
|
|
|
|
2011-02-01 21:51:48 +08:00
|
|
|
static int posix_clock_realtime_set(const clockid_t which_clock,
|
2017-03-27 03:04:16 +08:00
|
|
|
const struct timespec64 *tp)
|
2011-02-01 21:51:48 +08:00
|
|
|
{
|
2017-03-27 03:04:16 +08:00
|
|
|
return do_sys_settimeofday64(tp, NULL);
|
2011-02-01 21:51:48 +08:00
|
|
|
}
|
|
|
|
|
2011-02-01 21:52:26 +08:00
|
|
|
static int posix_clock_realtime_adj(const clockid_t which_clock,
|
2018-07-03 13:44:21 +08:00
|
|
|
struct __kernel_timex *t)
|
2011-02-01 21:52:26 +08:00
|
|
|
{
|
|
|
|
return do_adjtimex(t);
|
|
|
|
}
|
|
|
|
|
2019-11-12 09:26:55 +08:00
|
|
|
static int posix_get_monotonic_timespec(clockid_t which_clock, struct timespec64 *tp)
|
2006-01-10 12:52:38 +08:00
|
|
|
{
|
2017-03-27 03:04:14 +08:00
|
|
|
ktime_get_ts64(tp);
|
2019-11-12 09:27:00 +08:00
|
|
|
timens_add_monotonic(tp);
|
2006-01-10 12:52:38 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2019-11-12 09:26:58 +08:00
|
|
|
static ktime_t posix_get_monotonic_ktime(clockid_t which_clock)
|
|
|
|
{
|
|
|
|
return ktime_get();
|
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:14 +08:00
|
|
|
static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
|
2008-08-21 07:37:30 +08:00
|
|
|
{
|
2018-06-18 22:32:24 +08:00
|
|
|
ktime_get_raw_ts64(tp);
|
2019-11-12 09:27:00 +08:00
|
|
|
timens_add_monotonic(tp);
|
2008-08-21 07:37:30 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:14 +08:00
|
|
|
static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
2018-06-18 22:32:24 +08:00
|
|
|
ktime_get_coarse_real_ts64(tp);
|
2009-08-20 10:13:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int posix_get_monotonic_coarse(clockid_t which_clock,
|
2017-03-27 03:04:14 +08:00
|
|
|
struct timespec64 *tp)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
2018-06-18 22:32:24 +08:00
|
|
|
ktime_get_coarse_ts64(tp);
|
2019-11-12 09:27:00 +08:00
|
|
|
timens_add_monotonic(tp);
|
2009-08-20 10:13:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:15 +08:00
|
|
|
static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
|
2009-08-20 10:13:34 +08:00
|
|
|
{
|
2017-03-27 03:04:15 +08:00
|
|
|
*tp = ktime_to_timespec64(KTIME_LOW_RES);
|
2009-08-20 10:13:34 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-02-16 02:52:57 +08:00
|
|
|
|
2019-11-12 09:26:55 +08:00
|
|
|
static int posix_get_boottime_timespec(const clockid_t which_clock, struct timespec64 *tp)
|
2011-02-16 02:52:57 +08:00
|
|
|
{
|
2018-06-18 22:32:24 +08:00
|
|
|
ktime_get_boottime_ts64(tp);
|
2019-11-12 09:27:00 +08:00
|
|
|
timens_add_boottime(tp);
|
2011-02-16 02:52:57 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-11-12 09:26:58 +08:00
|
|
|
static ktime_t posix_get_boottime_ktime(const clockid_t which_clock)
|
|
|
|
{
|
|
|
|
return ktime_get_boottime();
|
|
|
|
}
|
|
|
|
|
2019-11-12 09:26:55 +08:00
|
|
|
static int posix_get_tai_timespec(clockid_t which_clock, struct timespec64 *tp)
|
2012-05-04 03:43:40 +08:00
|
|
|
{
|
2018-06-18 22:32:24 +08:00
|
|
|
ktime_get_clocktai_ts64(tp);
|
2012-05-04 03:43:40 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2011-02-16 02:52:57 +08:00
|
|
|
|
2019-11-12 09:26:58 +08:00
|
|
|
static ktime_t posix_get_tai_ktime(clockid_t which_clock)
|
|
|
|
{
|
|
|
|
return ktime_get_clocktai();
|
|
|
|
}
|
|
|
|
|
2017-03-27 03:04:15 +08:00
|
|
|
static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
|
2015-04-15 05:08:32 +08:00
|
|
|
{
|
|
|
|
tp->tv_sec = 0;
|
|
|
|
tp->tv_nsec = hrtimer_resolution;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static __init int init_posix_timers(void)
|
|
|
|
{
|
|
|
|
posix_timers_cache = kmem_cache_create("posix_timers_cache",
|
2021-09-03 05:55:39 +08:00
|
|
|
sizeof(struct k_itimer), 0,
|
|
|
|
SLAB_PANIC | SLAB_ACCOUNT, NULL);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__initcall(init_posix_timers);
|
|
|
|
|
2018-06-26 21:21:32 +08:00
|
|
|
/*
|
|
|
|
* The siginfo si_overrun field and the return value of timer_getoverrun(2)
|
|
|
|
* are of type int. Clamp the overrun value to INT_MAX
|
|
|
|
*/
|
|
|
|
static inline int timer_overrun_to_int(struct k_itimer *timr, int baseval)
|
|
|
|
{
|
|
|
|
s64 sum = timr->it_overrun_last + (s64)baseval;
|
|
|
|
|
|
|
|
return sum > (s64)INT_MAX ? INT_MAX : (int)sum;
|
|
|
|
}
|
|
|
|
|
2017-05-31 05:15:47 +08:00
|
|
|
static void common_hrtimer_rearm(struct k_itimer *timr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-03-26 17:38:06 +08:00
|
|
|
struct hrtimer *timer = &timr->it.real.timer;
|
|
|
|
|
2018-06-26 21:21:32 +08:00
|
|
|
timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
|
|
|
|
timr->it_interval);
|
2006-03-26 17:38:06 +08:00
|
|
|
hrtimer_restart(timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-04-26 02:49:22 +08:00
|
|
|
* This function is called from the signal delivery code if
|
|
|
|
* info->si_sys_private is not zero, which indicates that the timer has to
|
|
|
|
* be rearmed. Restart the timer and update info::si_overrun.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2018-09-25 17:27:20 +08:00
|
|
|
void posixtimer_rearm(struct kernel_siginfo *info)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct k_itimer *timr;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
timr = lock_timer(info->si_tid, &flags);
|
2017-05-31 05:15:42 +08:00
|
|
|
if (!timr)
|
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-12-17 20:31:05 +08:00
|
|
|
if (timr->it_interval && timr->it_requeue_pending == info->si_sys_private) {
|
2017-05-31 05:15:47 +08:00
|
|
|
timr->kclock->timer_rearm(timr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-31 05:15:48 +08:00
|
|
|
timr->it_active = 1;
|
2017-05-31 05:15:42 +08:00
|
|
|
timr->it_overrun_last = timr->it_overrun;
|
2018-06-26 21:21:32 +08:00
|
|
|
timr->it_overrun = -1LL;
|
2017-05-31 05:15:42 +08:00
|
|
|
++timr->it_requeue_pending;
|
|
|
|
|
2018-06-26 21:21:32 +08:00
|
|
|
info->si_overrun = timer_overrun_to_int(timr, info->si_overrun);
|
2006-01-10 12:52:38 +08:00
|
|
|
}
|
|
|
|
|
2017-05-31 05:15:42 +08:00
|
|
|
unlock_timer(timr, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
posix-timers: fix posix_timer_event() vs dequeue_signal() race
The bug was reported and analysed by Mark McLoughlin <markmc@redhat.com>,
the patch is based on his and Roland's suggestions.
posix_timer_event() always rewrites the pre-allocated siginfo before sending
the signal. Most of the written info is the same all the time, but memset(0)
is very wrong. If ->sigq is queued we can race with collect_signal() which
can fail to find this siginfo looking at .si_signo, or copy_siginfo() can
copy the wrong .si_code/si_tid/etc.
In short, sys_timer_settime() can in fact stop the active timer, or the user
can receive the siginfo with the wrong .si_xxx values.
Move "memset(->info, 0)" from posix_timer_event() to alloc_posix_timer(),
change send_sigqueue() to set .si_overrun = 0 when ->sigq is not queued.
It would be nice to move the whole sigq->info initialization from send to
create path, but this is not easy to do without uglifying timer_create()
further.
As Roland rightly pointed out, we need more cleanups/fixes here, see the
"FIXME" comment in the patch. Hopefully this patch makes sense anyway, and
it can mask the most bad implications.
Reported-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Mark McLoughlin <markmc@redhat.com>
Cc: Oliver Pinter <oliver.pntr@gmail.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: stable@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/posix-timers.c | 17 +++++++++++++----
kernel/signal.c | 1 +
2 files changed, 14 insertions(+), 4 deletions(-)
2008-07-24 00:52:05 +08:00
|
|
|
int posix_timer_event(struct k_itimer *timr, int si_private)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-07-21 03:30:23 +08:00
|
|
|
enum pid_type type;
|
2021-07-21 20:01:47 +08:00
|
|
|
int ret;
|
posix-timers: fix posix_timer_event() vs dequeue_signal() race
The bug was reported and analysed by Mark McLoughlin <markmc@redhat.com>,
the patch is based on his and Roland's suggestions.
posix_timer_event() always rewrites the pre-allocated siginfo before sending
the signal. Most of the written info is the same all the time, but memset(0)
is very wrong. If ->sigq is queued we can race with collect_signal() which
can fail to find this siginfo looking at .si_signo, or copy_siginfo() can
copy the wrong .si_code/si_tid/etc.
In short, sys_timer_settime() can in fact stop the active timer, or the user
can receive the siginfo with the wrong .si_xxx values.
Move "memset(->info, 0)" from posix_timer_event() to alloc_posix_timer(),
change send_sigqueue() to set .si_overrun = 0 when ->sigq is not queued.
It would be nice to move the whole sigq->info initialization from send to
create path, but this is not easy to do without uglifying timer_create()
further.
As Roland rightly pointed out, we need more cleanups/fixes here, see the
"FIXME" comment in the patch. Hopefully this patch makes sense anyway, and
it can mask the most bad implications.
Reported-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Mark McLoughlin <markmc@redhat.com>
Cc: Oliver Pinter <oliver.pntr@gmail.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: stable@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/posix-timers.c | 17 +++++++++++++----
kernel/signal.c | 1 +
2 files changed, 14 insertions(+), 4 deletions(-)
2008-07-24 00:52:05 +08:00
|
|
|
/*
|
|
|
|
* FIXME: if ->sigq is queued we can race with
|
2017-05-31 05:15:46 +08:00
|
|
|
* dequeue_signal()->posixtimer_rearm().
|
posix-timers: fix posix_timer_event() vs dequeue_signal() race
The bug was reported and analysed by Mark McLoughlin <markmc@redhat.com>,
the patch is based on his and Roland's suggestions.
posix_timer_event() always rewrites the pre-allocated siginfo before sending
the signal. Most of the written info is the same all the time, but memset(0)
is very wrong. If ->sigq is queued we can race with collect_signal() which
can fail to find this siginfo looking at .si_signo, or copy_siginfo() can
copy the wrong .si_code/si_tid/etc.
In short, sys_timer_settime() can in fact stop the active timer, or the user
can receive the siginfo with the wrong .si_xxx values.
Move "memset(->info, 0)" from posix_timer_event() to alloc_posix_timer(),
change send_sigqueue() to set .si_overrun = 0 when ->sigq is not queued.
It would be nice to move the whole sigq->info initialization from send to
create path, but this is not easy to do without uglifying timer_create()
further.
As Roland rightly pointed out, we need more cleanups/fixes here, see the
"FIXME" comment in the patch. Hopefully this patch makes sense anyway, and
it can mask the most bad implications.
Reported-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Mark McLoughlin <markmc@redhat.com>
Cc: Oliver Pinter <oliver.pntr@gmail.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: stable@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/posix-timers.c | 17 +++++++++++++----
kernel/signal.c | 1 +
2 files changed, 14 insertions(+), 4 deletions(-)
2008-07-24 00:52:05 +08:00
|
|
|
*
|
|
|
|
* If dequeue_signal() sees the "right" value of
|
2017-05-31 05:15:46 +08:00
|
|
|
* si_sys_private it calls posixtimer_rearm().
|
posix-timers: fix posix_timer_event() vs dequeue_signal() race
The bug was reported and analysed by Mark McLoughlin <markmc@redhat.com>,
the patch is based on his and Roland's suggestions.
posix_timer_event() always rewrites the pre-allocated siginfo before sending
the signal. Most of the written info is the same all the time, but memset(0)
is very wrong. If ->sigq is queued we can race with collect_signal() which
can fail to find this siginfo looking at .si_signo, or copy_siginfo() can
copy the wrong .si_code/si_tid/etc.
In short, sys_timer_settime() can in fact stop the active timer, or the user
can receive the siginfo with the wrong .si_xxx values.
Move "memset(->info, 0)" from posix_timer_event() to alloc_posix_timer(),
change send_sigqueue() to set .si_overrun = 0 when ->sigq is not queued.
It would be nice to move the whole sigq->info initialization from send to
create path, but this is not easy to do without uglifying timer_create()
further.
As Roland rightly pointed out, we need more cleanups/fixes here, see the
"FIXME" comment in the patch. Hopefully this patch makes sense anyway, and
it can mask the most bad implications.
Reported-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Mark McLoughlin <markmc@redhat.com>
Cc: Oliver Pinter <oliver.pntr@gmail.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: stable@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/posix-timers.c | 17 +++++++++++++----
kernel/signal.c | 1 +
2 files changed, 14 insertions(+), 4 deletions(-)
2008-07-24 00:52:05 +08:00
|
|
|
* We re-queue ->sigq and drop ->it_lock().
|
2017-05-31 05:15:46 +08:00
|
|
|
* posixtimer_rearm() locks the timer
|
posix-timers: fix posix_timer_event() vs dequeue_signal() race
The bug was reported and analysed by Mark McLoughlin <markmc@redhat.com>,
the patch is based on his and Roland's suggestions.
posix_timer_event() always rewrites the pre-allocated siginfo before sending
the signal. Most of the written info is the same all the time, but memset(0)
is very wrong. If ->sigq is queued we can race with collect_signal() which
can fail to find this siginfo looking at .si_signo, or copy_siginfo() can
copy the wrong .si_code/si_tid/etc.
In short, sys_timer_settime() can in fact stop the active timer, or the user
can receive the siginfo with the wrong .si_xxx values.
Move "memset(->info, 0)" from posix_timer_event() to alloc_posix_timer(),
change send_sigqueue() to set .si_overrun = 0 when ->sigq is not queued.
It would be nice to move the whole sigq->info initialization from send to
create path, but this is not easy to do without uglifying timer_create()
further.
As Roland rightly pointed out, we need more cleanups/fixes here, see the
"FIXME" comment in the patch. Hopefully this patch makes sense anyway, and
it can mask the most bad implications.
Reported-by: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru>
Cc: Mark McLoughlin <markmc@redhat.com>
Cc: Oliver Pinter <oliver.pntr@gmail.com>
Cc: Roland McGrath <roland@redhat.com>
Cc: stable@kernel.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/posix-timers.c | 17 +++++++++++++----
kernel/signal.c | 1 +
2 files changed, 14 insertions(+), 4 deletions(-)
2008-07-24 00:52:05 +08:00
|
|
|
* and re-schedules it while ->sigq is pending.
|
|
|
|
* Not really bad, but not that we want.
|
|
|
|
*/
|
2005-04-17 06:20:36 +08:00
|
|
|
timr->sigq->info.si_sys_private = si_private;
|
|
|
|
|
2018-07-21 03:30:23 +08:00
|
|
|
type = !(timr->it_sigev_notify & SIGEV_THREAD_ID) ? PIDTYPE_TGID : PIDTYPE_PID;
|
|
|
|
ret = send_sigqueue(timr->sigq, timr->it_pid, type);
|
2008-09-23 05:42:46 +08:00
|
|
|
/* If we failed to send the signal the timer stops. */
|
|
|
|
return ret > 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-06-02 03:07:37 +08:00
|
|
|
* This function gets called when a POSIX.1b interval timer expires from
|
|
|
|
* the HRTIMER interrupt (soft interrupt on RT kernels).
|
|
|
|
*
|
|
|
|
* Handles CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME and CLOCK_TAI
|
|
|
|
* based timers.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2007-02-16 17:27:49 +08:00
|
|
|
static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2023-04-26 02:49:27 +08:00
|
|
|
enum hrtimer_restart ret = HRTIMER_NORESTART;
|
2006-03-26 17:38:12 +08:00
|
|
|
struct k_itimer *timr;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
2006-01-10 12:52:38 +08:00
|
|
|
int si_private = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-26 17:38:12 +08:00
|
|
|
timr = container_of(timer, struct k_itimer, it.real.timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock_irqsave(&timr->it_lock, flags);
|
|
|
|
|
2017-05-31 05:15:48 +08:00
|
|
|
timr->it_active = 0;
|
2017-05-31 05:15:43 +08:00
|
|
|
if (timr->it_interval != 0)
|
2006-01-10 12:52:38 +08:00
|
|
|
si_private = ++timr->it_requeue_pending;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-10 12:52:38 +08:00
|
|
|
if (posix_timer_event(timr, si_private)) {
|
|
|
|
/*
|
2023-06-02 03:07:37 +08:00
|
|
|
* The signal was not queued due to SIG_IGN. As a
|
|
|
|
* consequence the timer is not going to be rearmed from
|
|
|
|
* the signal delivery path. But as a real signal handler
|
|
|
|
* can be installed later the timer must be rearmed here.
|
2006-01-10 12:52:38 +08:00
|
|
|
*/
|
2017-05-31 05:15:43 +08:00
|
|
|
if (timr->it_interval != 0) {
|
2007-06-22 04:45:15 +08:00
|
|
|
ktime_t now = hrtimer_cb_get_time(timer);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* FIXME: What we really want, is to stop this
|
|
|
|
* timer completely and restart it in case the
|
|
|
|
* SIG_IGN is removed. This is a non trivial
|
2023-06-02 03:07:37 +08:00
|
|
|
* change to the signal handling code.
|
|
|
|
*
|
|
|
|
* For now let timers with an interval less than a
|
|
|
|
* jiffie expire every jiffie and recheck for a
|
|
|
|
* valid signal handler.
|
|
|
|
*
|
|
|
|
* This avoids interrupt starvation in case of a
|
|
|
|
* very small interval, which would expire the
|
|
|
|
* timer immediately again.
|
2007-06-22 04:45:15 +08:00
|
|
|
*
|
2023-06-02 03:07:37 +08:00
|
|
|
* Moving now ahead of time by one jiffie tricks
|
|
|
|
* hrtimer_forward() to expire the timer later,
|
|
|
|
* while it still maintains the overrun accuracy
|
|
|
|
* for the price of a slight inconsistency in the
|
|
|
|
* timer_gettime() case. This is at least better
|
|
|
|
* than a timer storm.
|
|
|
|
*
|
|
|
|
* Only required when high resolution timers are
|
|
|
|
* enabled as the periodic tick based timers are
|
|
|
|
* automatically aligned to the next tick.
|
2007-06-22 04:45:15 +08:00
|
|
|
*/
|
2023-06-09 17:46:43 +08:00
|
|
|
if (IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) {
|
2023-06-02 03:07:37 +08:00
|
|
|
ktime_t kj = TICK_NSEC;
|
2007-06-22 04:45:15 +08:00
|
|
|
|
2017-05-31 05:15:43 +08:00
|
|
|
if (timr->it_interval < kj)
|
2007-06-22 04:45:15 +08:00
|
|
|
now = ktime_add(now, kj);
|
|
|
|
}
|
2023-06-02 03:07:37 +08:00
|
|
|
|
|
|
|
timr->it_overrun += hrtimer_forward(timer, now, timr->it_interval);
|
2006-01-10 12:52:38 +08:00
|
|
|
ret = HRTIMER_RESTART;
|
2006-03-17 15:04:01 +08:00
|
|
|
++timr->it_requeue_pending;
|
2017-05-31 05:15:48 +08:00
|
|
|
timr->it_active = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-10 12:52:38 +08:00
|
|
|
unlock_timer(timr, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-12-02 06:18:13 +08:00
|
|
|
static struct pid *good_sigevent(sigevent_t * event)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2018-07-21 13:00:29 +08:00
|
|
|
struct pid *pid = task_tgid(current);
|
|
|
|
struct task_struct *rtn;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-12-15 17:32:03 +08:00
|
|
|
switch (event->sigev_notify) {
|
|
|
|
case SIGEV_SIGNAL | SIGEV_THREAD_ID:
|
2018-07-21 13:00:29 +08:00
|
|
|
pid = find_vpid(event->sigev_notify_thread_id);
|
|
|
|
rtn = pid_task(pid, PIDTYPE_PID);
|
2017-12-15 17:32:03 +08:00
|
|
|
if (!rtn || !same_thread_group(rtn, current))
|
|
|
|
return NULL;
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2017-12-15 17:32:03 +08:00
|
|
|
case SIGEV_SIGNAL:
|
|
|
|
case SIGEV_THREAD:
|
|
|
|
if (event->sigev_signo <= 0 || event->sigev_signo > SIGRTMAX)
|
|
|
|
return NULL;
|
2020-08-24 06:36:59 +08:00
|
|
|
fallthrough;
|
2017-12-15 17:32:03 +08:00
|
|
|
case SIGEV_NONE:
|
2018-07-21 13:00:29 +08:00
|
|
|
return pid;
|
2017-12-15 17:32:03 +08:00
|
|
|
default:
|
2005-04-17 06:20:36 +08:00
|
|
|
return NULL;
|
2017-12-15 17:32:03 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct k_itimer * alloc_posix_timer(void)
|
|
|
|
{
|
2023-04-26 02:49:27 +08:00
|
|
|
struct k_itimer *tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!tmr)
|
|
|
|
return tmr;
|
|
|
|
if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
|
|
|
|
kmem_cache_free(posix_timers_cache, tmr);
|
2008-10-03 05:50:14 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2017-08-19 08:56:27 +08:00
|
|
|
clear_siginfo(&tmr->sigq->info);
|
2005-04-17 06:20:36 +08:00
|
|
|
return tmr;
|
|
|
|
}
|
|
|
|
|
2011-05-24 17:12:58 +08:00
|
|
|
static void k_itimer_rcu_free(struct rcu_head *head)
|
|
|
|
{
|
2019-07-31 06:33:54 +08:00
|
|
|
struct k_itimer *tmr = container_of(head, struct k_itimer, rcu);
|
2011-05-24 17:12:58 +08:00
|
|
|
|
|
|
|
kmem_cache_free(posix_timers_cache, tmr);
|
|
|
|
}
|
|
|
|
|
2023-04-26 02:49:09 +08:00
|
|
|
static void posix_timer_free(struct k_itimer *tmr)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2008-12-02 06:18:15 +08:00
|
|
|
put_pid(tmr->it_pid);
|
2005-04-17 06:20:36 +08:00
|
|
|
sigqueue_free(tmr->sigq);
|
2019-07-31 06:33:54 +08:00
|
|
|
call_rcu(&tmr->rcu, k_itimer_rcu_free);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2023-04-26 02:49:09 +08:00
|
|
|
static void posix_timer_unhash_and_free(struct k_itimer *tmr)
|
|
|
|
{
|
|
|
|
spin_lock(&hash_lock);
|
|
|
|
hlist_del_rcu(&tmr->t_hash);
|
|
|
|
spin_unlock(&hash_lock);
|
|
|
|
posix_timer_free(tmr);
|
|
|
|
}
|
|
|
|
|
2011-02-01 21:51:58 +08:00
|
|
|
static int common_timer_create(struct k_itimer *new_timer)
|
|
|
|
{
|
|
|
|
hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Create a POSIX.1b interval timer. */
|
2017-06-07 16:42:39 +08:00
|
|
|
static int do_timer_create(clockid_t which_clock, struct sigevent *event,
|
|
|
|
timer_t __user *created_timer_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2008-09-23 05:42:47 +08:00
|
|
|
struct k_itimer *new_timer;
|
2008-09-23 05:42:49 +08:00
|
|
|
int error, new_timer_id;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-01 21:51:58 +08:00
|
|
|
if (!kc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2011-02-01 21:51:58 +08:00
|
|
|
if (!kc->timer_create)
|
|
|
|
return -EOPNOTSUPP;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
new_timer = alloc_posix_timer();
|
|
|
|
if (unlikely(!new_timer))
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
spin_lock_init(&new_timer->it_lock);
|
2023-04-26 02:49:03 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Add the timer to the hash table. The timer is not yet valid
|
|
|
|
* because new_timer::it_signal is still NULL. The timer id is also
|
|
|
|
* not yet visible to user space.
|
|
|
|
*/
|
2013-03-11 17:12:21 +08:00
|
|
|
new_timer_id = posix_timer_add(new_timer);
|
|
|
|
if (new_timer_id < 0) {
|
2023-04-26 02:49:09 +08:00
|
|
|
posix_timer_free(new_timer);
|
|
|
|
return new_timer_id;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
new_timer->it_id = (timer_t) new_timer_id;
|
|
|
|
new_timer->it_clock = which_clock;
|
2017-05-31 05:15:44 +08:00
|
|
|
new_timer->kclock = kc;
|
2018-06-26 21:21:32 +08:00
|
|
|
new_timer->it_overrun = -1LL;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-07 16:42:39 +08:00
|
|
|
if (event) {
|
2008-09-23 05:42:48 +08:00
|
|
|
rcu_read_lock();
|
2017-06-07 16:42:39 +08:00
|
|
|
new_timer->it_pid = get_pid(good_sigevent(event));
|
2008-09-23 05:42:48 +08:00
|
|
|
rcu_read_unlock();
|
2008-12-02 06:18:15 +08:00
|
|
|
if (!new_timer->it_pid) {
|
2005-04-17 06:20:36 +08:00
|
|
|
error = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
2017-06-07 16:42:39 +08:00
|
|
|
new_timer->it_sigev_notify = event->sigev_notify;
|
|
|
|
new_timer->sigq->info.si_signo = event->sigev_signo;
|
|
|
|
new_timer->sigq->info.si_value = event->sigev_value;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else {
|
2017-06-07 16:42:39 +08:00
|
|
|
new_timer->it_sigev_notify = SIGEV_SIGNAL;
|
|
|
|
new_timer->sigq->info.si_signo = SIGALRM;
|
|
|
|
memset(&new_timer->sigq->info.si_value, 0, sizeof(sigval_t));
|
|
|
|
new_timer->sigq->info.si_value.sival_int = new_timer->it_id;
|
2008-12-02 06:18:15 +08:00
|
|
|
new_timer->it_pid = get_pid(task_tgid(current));
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2008-09-23 05:42:49 +08:00
|
|
|
new_timer->sigq->info.si_tid = new_timer->it_id;
|
2008-09-23 05:42:50 +08:00
|
|
|
new_timer->sigq->info.si_code = SI_TIMER;
|
2008-09-23 05:42:49 +08:00
|
|
|
|
2023-04-26 02:49:19 +08:00
|
|
|
if (copy_to_user(created_timer_id, &new_timer_id, sizeof (new_timer_id))) {
|
2010-07-21 06:23:14 +08:00
|
|
|
error = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2023-04-26 02:49:19 +08:00
|
|
|
/*
|
|
|
|
* After succesful copy out, the timer ID is visible to user space
|
|
|
|
* now but not yet valid because new_timer::signal is still NULL.
|
|
|
|
*
|
|
|
|
* Complete the initialization with the clock specific create
|
|
|
|
* callback.
|
|
|
|
*/
|
2011-02-01 21:51:58 +08:00
|
|
|
error = kc->timer_create(new_timer);
|
2010-05-25 03:15:33 +08:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
2008-09-23 05:42:48 +08:00
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
2023-04-26 02:49:03 +08:00
|
|
|
/* This makes the timer valid in the hash table */
|
2023-04-26 02:49:05 +08:00
|
|
|
WRITE_ONCE(new_timer->it_signal, current->signal);
|
2008-09-23 05:42:48 +08:00
|
|
|
list_add(&new_timer->list, ¤t->signal->posix_timers);
|
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
2011-02-01 21:51:58 +08:00
|
|
|
/*
|
2023-04-26 02:49:19 +08:00
|
|
|
* After unlocking sighand::siglock @new_timer is subject to
|
|
|
|
* concurrent removal and cannot be touched anymore
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2023-04-26 02:49:19 +08:00
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
out:
|
2023-04-26 02:49:09 +08:00
|
|
|
posix_timer_unhash_and_free(new_timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2017-06-07 16:42:39 +08:00
|
|
|
SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
|
|
|
|
struct sigevent __user *, timer_event_spec,
|
|
|
|
timer_t __user *, created_timer_id)
|
|
|
|
{
|
|
|
|
if (timer_event_spec) {
|
|
|
|
sigevent_t event;
|
|
|
|
|
|
|
|
if (copy_from_user(&event, timer_event_spec, sizeof (event)))
|
|
|
|
return -EFAULT;
|
|
|
|
return do_timer_create(which_clock, &event, created_timer_id);
|
|
|
|
}
|
|
|
|
return do_timer_create(which_clock, NULL, created_timer_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
COMPAT_SYSCALL_DEFINE3(timer_create, clockid_t, which_clock,
|
|
|
|
struct compat_sigevent __user *, timer_event_spec,
|
|
|
|
timer_t __user *, created_timer_id)
|
|
|
|
{
|
|
|
|
if (timer_event_spec) {
|
|
|
|
sigevent_t event;
|
|
|
|
|
|
|
|
if (get_compat_sigevent(&event, timer_event_spec))
|
|
|
|
return -EFAULT;
|
|
|
|
return do_timer_create(which_clock, &event, created_timer_id);
|
|
|
|
}
|
|
|
|
return do_timer_create(which_clock, NULL, created_timer_id);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-10-21 06:57:34 +08:00
|
|
|
static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct k_itimer *timr;
|
2011-05-24 17:12:58 +08:00
|
|
|
|
2013-02-21 07:24:12 +08:00
|
|
|
/*
|
|
|
|
* timer_t could be any type >= int and we want to make sure any
|
|
|
|
* @timer_id outside positive int range fails lookup.
|
|
|
|
*/
|
|
|
|
if ((unsigned long long)timer_id > INT_MAX)
|
|
|
|
return NULL;
|
|
|
|
|
2023-04-26 02:49:03 +08:00
|
|
|
/*
|
|
|
|
* The hash lookup and the timers are RCU protected.
|
|
|
|
*
|
|
|
|
* Timers are added to the hash in invalid state where
|
|
|
|
* timr::it_signal == NULL. timer::it_signal is only set after the
|
|
|
|
* rest of the initialization succeeded.
|
|
|
|
*
|
|
|
|
* Timer destruction happens in steps:
|
|
|
|
* 1) Set timr::it_signal to NULL with timr::it_lock held
|
|
|
|
* 2) Release timr::it_lock
|
|
|
|
* 3) Remove from the hash under hash_lock
|
|
|
|
* 4) Call RCU for removal after the grace period
|
|
|
|
*
|
|
|
|
* Holding rcu_read_lock() accross the lookup ensures that
|
|
|
|
* the timer cannot be freed.
|
|
|
|
*
|
|
|
|
* The lookup validates locklessly that timr::it_signal ==
|
|
|
|
* current::it_signal and timr::it_id == @timer_id. timr::it_id
|
|
|
|
* can't change, but timr::it_signal becomes NULL during
|
|
|
|
* destruction.
|
|
|
|
*/
|
2011-05-24 17:12:58 +08:00
|
|
|
rcu_read_lock();
|
2013-03-11 17:12:21 +08:00
|
|
|
timr = posix_timer_by_id(timer_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (timr) {
|
2011-05-24 17:12:58 +08:00
|
|
|
spin_lock_irqsave(&timr->it_lock, *flags);
|
2023-04-26 02:49:03 +08:00
|
|
|
/*
|
|
|
|
* Validate under timr::it_lock that timr::it_signal is
|
|
|
|
* still valid. Pairs with #1 above.
|
|
|
|
*/
|
2008-12-02 06:18:15 +08:00
|
|
|
if (timr->it_signal == current->signal) {
|
2011-05-24 17:12:58 +08:00
|
|
|
rcu_read_unlock();
|
2008-09-23 05:42:51 +08:00
|
|
|
return timr;
|
|
|
|
}
|
2011-05-24 17:12:58 +08:00
|
|
|
spin_unlock_irqrestore(&timr->it_lock, *flags);
|
2008-09-23 05:42:51 +08:00
|
|
|
}
|
2011-05-24 17:12:58 +08:00
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-09-23 05:42:51 +08:00
|
|
|
return NULL;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-05-31 05:15:50 +08:00
|
|
|
static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
|
|
|
|
{
|
|
|
|
struct hrtimer *timer = &timr->it.real.timer;
|
|
|
|
|
|
|
|
return __hrtimer_expires_remaining_adjusted(timer, now);
|
|
|
|
}
|
|
|
|
|
2018-06-26 21:21:31 +08:00
|
|
|
static s64 common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
|
2017-05-31 05:15:50 +08:00
|
|
|
{
|
|
|
|
struct hrtimer *timer = &timr->it.real.timer;
|
|
|
|
|
2018-06-26 21:21:31 +08:00
|
|
|
return hrtimer_forward(timer, now, timr->it_interval);
|
2017-05-31 05:15:50 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2023-04-26 02:49:12 +08:00
|
|
|
* Get the time remaining on a POSIX.1b interval timer.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2023-04-26 02:49:12 +08:00
|
|
|
* Two issues to handle here:
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2023-04-26 02:49:12 +08:00
|
|
|
* 1) The timer has a requeue pending. The return value must appear as
|
|
|
|
* if the timer has been requeued right now.
|
|
|
|
*
|
|
|
|
* 2) The timer is a SIGEV_NONE timer. These timers are never enqueued
|
|
|
|
* into the hrtimer queue and therefore never expired. Emulate expiry
|
|
|
|
* here taking #1 into account.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-05-31 05:15:59 +08:00
|
|
|
void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-31 05:15:50 +08:00
|
|
|
const struct k_clock *kc = timr->kclock;
|
2006-03-26 17:38:07 +08:00
|
|
|
ktime_t now, remaining, iv;
|
2017-05-31 05:15:50 +08:00
|
|
|
bool sig_none;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-12-15 17:32:03 +08:00
|
|
|
sig_none = timr->it_sigev_notify == SIGEV_NONE;
|
2017-05-31 05:15:43 +08:00
|
|
|
iv = timr->it_interval;
|
2006-03-26 17:38:07 +08:00
|
|
|
|
2006-01-10 12:52:38 +08:00
|
|
|
/* interval timer ? */
|
2017-05-31 05:15:50 +08:00
|
|
|
if (iv) {
|
2017-03-27 03:04:17 +08:00
|
|
|
cur_setting->it_interval = ktime_to_timespec64(iv);
|
2017-05-31 05:15:50 +08:00
|
|
|
} else if (!timr->it_active) {
|
|
|
|
/*
|
2023-04-26 02:49:12 +08:00
|
|
|
* SIGEV_NONE oneshot timers are never queued and therefore
|
|
|
|
* timr->it_active is always false. The check below
|
|
|
|
* vs. remaining time will handle this case.
|
|
|
|
*
|
|
|
|
* For all other timers there is nothing to update here, so
|
|
|
|
* return.
|
2017-05-31 05:15:50 +08:00
|
|
|
*/
|
|
|
|
if (!sig_none)
|
|
|
|
return;
|
|
|
|
}
|
2006-03-26 17:38:07 +08:00
|
|
|
|
2019-11-12 09:26:59 +08:00
|
|
|
now = kc->clock_get_ktime(timr->it_clock);
|
2006-03-26 17:38:07 +08:00
|
|
|
|
2006-01-10 12:52:38 +08:00
|
|
|
/*
|
2023-04-26 02:49:12 +08:00
|
|
|
* If this is an interval timer and either has requeue pending or
|
|
|
|
* is a SIGEV_NONE timer move the expiry time forward by intervals,
|
|
|
|
* so expiry is > now.
|
2006-01-10 12:52:38 +08:00
|
|
|
*/
|
2017-05-31 05:15:50 +08:00
|
|
|
if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
|
2018-06-26 21:21:32 +08:00
|
|
|
timr->it_overrun += kc->timer_forward(timr, now);
|
2006-03-26 17:38:07 +08:00
|
|
|
|
2017-05-31 05:15:50 +08:00
|
|
|
remaining = kc->timer_remaining(timr, now);
|
2023-04-26 02:49:12 +08:00
|
|
|
/*
|
|
|
|
* As @now is retrieved before a possible timer_forward() and
|
|
|
|
* cannot be reevaluated by the compiler @remaining is based on the
|
|
|
|
* same @now value. Therefore @remaining is consistent vs. @now.
|
|
|
|
*
|
|
|
|
* Consequently all interval timers, i.e. @iv > 0, cannot have a
|
|
|
|
* remaining time <= 0 because timer_forward() guarantees to move
|
|
|
|
* them forward so that the next timer expiry is > @now.
|
|
|
|
*/
|
2016-12-25 18:38:40 +08:00
|
|
|
if (remaining <= 0) {
|
2006-03-26 17:38:07 +08:00
|
|
|
/*
|
2023-04-26 02:49:12 +08:00
|
|
|
* A single shot SIGEV_NONE timer must return 0, when it is
|
|
|
|
* expired! Timers which have a real signal delivery mode
|
|
|
|
* must return a remaining time greater than 0 because the
|
|
|
|
* signal has not yet been delivered.
|
2006-03-26 17:38:07 +08:00
|
|
|
*/
|
2017-05-31 05:15:50 +08:00
|
|
|
if (!sig_none)
|
2006-03-26 17:38:07 +08:00
|
|
|
cur_setting->it_value.tv_nsec = 1;
|
2017-05-31 05:15:50 +08:00
|
|
|
} else {
|
2017-03-27 03:04:17 +08:00
|
|
|
cur_setting->it_value = ktime_to_timespec64(remaining);
|
2017-05-31 05:15:50 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2017-06-07 16:42:36 +08:00
|
|
|
static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc;
|
2023-04-26 02:49:27 +08:00
|
|
|
struct k_itimer *timr;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long flags;
|
2011-02-01 21:52:04 +08:00
|
|
|
int ret = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
timr = lock_timer(timer_id, &flags);
|
|
|
|
if (!timr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-06-07 16:42:36 +08:00
|
|
|
memset(setting, 0, sizeof(*setting));
|
2017-05-31 05:15:44 +08:00
|
|
|
kc = timr->kclock;
|
2011-02-01 21:52:04 +08:00
|
|
|
if (WARN_ON_ONCE(!kc || !kc->timer_get))
|
|
|
|
ret = -EINVAL;
|
|
|
|
else
|
2017-06-07 16:42:36 +08:00
|
|
|
kc->timer_get(timr, setting);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
unlock_timer(timr, flags);
|
2017-06-07 16:42:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-07 16:42:36 +08:00
|
|
|
/* Get the time remaining on a POSIX.1b interval timer. */
|
|
|
|
SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
|
2018-06-17 13:11:44 +08:00
|
|
|
struct __kernel_itimerspec __user *, setting)
|
2017-06-07 16:42:36 +08:00
|
|
|
{
|
2017-06-25 02:45:08 +08:00
|
|
|
struct itimerspec64 cur_setting;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-25 02:45:08 +08:00
|
|
|
int ret = do_timer_gettime(timer_id, &cur_setting);
|
2017-06-07 16:42:36 +08:00
|
|
|
if (!ret) {
|
2017-06-25 02:45:08 +08:00
|
|
|
if (put_itimerspec64(&cur_setting, setting))
|
2017-06-07 16:42:36 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
2011-02-01 21:52:04 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-01-10 12:52:38 +08:00
|
|
|
|
2018-06-17 13:11:44 +08:00
|
|
|
#ifdef CONFIG_COMPAT_32BIT_TIME
|
|
|
|
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE2(timer_gettime32, timer_t, timer_id,
|
|
|
|
struct old_itimerspec32 __user *, setting)
|
2017-06-07 16:42:36 +08:00
|
|
|
{
|
2017-06-25 02:45:08 +08:00
|
|
|
struct itimerspec64 cur_setting;
|
2017-06-07 16:42:36 +08:00
|
|
|
|
2017-06-25 02:45:08 +08:00
|
|
|
int ret = do_timer_gettime(timer_id, &cur_setting);
|
2017-06-07 16:42:36 +08:00
|
|
|
if (!ret) {
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (put_old_itimerspec32(&cur_setting, setting))
|
2017-06-07 16:42:36 +08:00
|
|
|
ret = -EFAULT;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
2018-06-17 13:11:44 +08:00
|
|
|
|
2017-06-07 16:42:36 +08:00
|
|
|
#endif
|
|
|
|
|
2023-04-26 02:49:14 +08:00
|
|
|
/**
|
|
|
|
* sys_timer_getoverrun - Get the number of overruns of a POSIX.1b interval timer
|
|
|
|
* @timer_id: The timer ID which identifies the timer
|
|
|
|
*
|
|
|
|
* The "overrun count" of a timer is one plus the number of expiration
|
|
|
|
* intervals which have elapsed between the first expiry, which queues the
|
|
|
|
* signal and the actual signal delivery. On signal delivery the "overrun
|
|
|
|
* count" is calculated and cached, so it can be returned directly here.
|
|
|
|
*
|
|
|
|
* As this is relative to the last queued signal the returned overrun count
|
|
|
|
* is meaningless outside of the signal delivery path and even there it
|
|
|
|
* does not accurately reflect the current state when user space evaluates
|
|
|
|
* it.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* -EINVAL @timer_id is invalid
|
|
|
|
* 1..INT_MAX The number of overruns related to the last delivered signal
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-01-14 21:14:07 +08:00
|
|
|
SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct k_itimer *timr;
|
2007-10-15 02:35:50 +08:00
|
|
|
unsigned long flags;
|
2023-04-26 02:49:27 +08:00
|
|
|
int overrun;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
timr = lock_timer(timer_id, &flags);
|
|
|
|
if (!timr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-06-26 21:21:32 +08:00
|
|
|
overrun = timer_overrun_to_int(timr, 0);
|
2005-04-17 06:20:36 +08:00
|
|
|
unlock_timer(timr, flags);
|
|
|
|
|
|
|
|
return overrun;
|
|
|
|
}
|
|
|
|
|
2017-05-31 05:15:53 +08:00
|
|
|
static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
|
|
|
|
bool absolute, bool sigev_none)
|
|
|
|
{
|
|
|
|
struct hrtimer *timer = &timr->it.real.timer;
|
|
|
|
enum hrtimer_mode mode;
|
|
|
|
|
|
|
|
mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
|
2017-06-13 01:39:49 +08:00
|
|
|
/*
|
|
|
|
* Posix magic: Relative CLOCK_REALTIME timers are not affected by
|
|
|
|
* clock modifications, so they become CLOCK_MONOTONIC based under the
|
|
|
|
* hood. See hrtimer_init(). Update timr->kclock, so the generic
|
2019-11-12 09:26:58 +08:00
|
|
|
* functions which use timr->kclock->clock_get_*() work.
|
2017-06-13 01:39:49 +08:00
|
|
|
*
|
|
|
|
* Note: it_clock stays unmodified, because the next timer_set() might
|
|
|
|
* use ABSTIME, so it needs to switch back.
|
|
|
|
*/
|
|
|
|
if (timr->it_clock == CLOCK_REALTIME)
|
|
|
|
timr->kclock = absolute ? &clock_realtime : &clock_monotonic;
|
|
|
|
|
2017-05-31 05:15:53 +08:00
|
|
|
hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
|
|
|
|
timr->it.real.timer.function = posix_timer_fn;
|
|
|
|
|
|
|
|
if (!absolute)
|
|
|
|
expires = ktime_add_safe(expires, timer->base->get_time());
|
|
|
|
hrtimer_set_expires(timer, expires);
|
|
|
|
|
|
|
|
if (!sigev_none)
|
|
|
|
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
|
|
|
|
{
|
|
|
|
return hrtimer_try_to_cancel(&timr->it.real.timer);
|
|
|
|
}
|
|
|
|
|
2019-08-02 13:35:59 +08:00
|
|
|
static void common_timer_wait_running(struct k_itimer *timer)
|
|
|
|
{
|
|
|
|
hrtimer_cancel_wait_running(&timer->it.real.timer);
|
|
|
|
}
|
|
|
|
|
2019-08-20 21:12:23 +08:00
|
|
|
/*
|
2023-04-26 02:49:00 +08:00
|
|
|
* On PREEMPT_RT this prevents priority inversion and a potential livelock
|
|
|
|
* against the ksoftirqd thread in case that ksoftirqd gets preempted while
|
|
|
|
* executing a hrtimer callback.
|
|
|
|
*
|
|
|
|
* See the comments in hrtimer_cancel_wait_running(). For PREEMPT_RT=n this
|
|
|
|
* just results in a cpu_relax().
|
|
|
|
*
|
|
|
|
* For POSIX CPU timers with CONFIG_POSIX_CPU_TIMERS_TASK_WORK=n this is
|
|
|
|
* just a cpu_relax(). With CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y this
|
|
|
|
* prevents spinning on an eventually scheduled out task and a livelock
|
|
|
|
* when the task which tries to delete or disarm the timer has preempted
|
|
|
|
* the task which runs the expiry in task work context.
|
2019-08-20 21:12:23 +08:00
|
|
|
*/
|
2019-07-31 06:33:53 +08:00
|
|
|
static struct k_itimer *timer_wait_running(struct k_itimer *timer,
|
|
|
|
unsigned long *flags)
|
|
|
|
{
|
2019-08-02 13:35:59 +08:00
|
|
|
const struct k_clock *kc = READ_ONCE(timer->kclock);
|
2019-07-31 06:33:53 +08:00
|
|
|
timer_t timer_id = READ_ONCE(timer->it_id);
|
|
|
|
|
2019-08-02 13:35:59 +08:00
|
|
|
/* Prevent kfree(timer) after dropping the lock */
|
|
|
|
rcu_read_lock();
|
2019-07-31 06:33:53 +08:00
|
|
|
unlock_timer(timer, *flags);
|
2019-08-02 13:35:59 +08:00
|
|
|
|
2023-04-17 21:37:55 +08:00
|
|
|
/*
|
|
|
|
* kc->timer_wait_running() might drop RCU lock. So @timer
|
|
|
|
* cannot be touched anymore after the function returns!
|
|
|
|
*/
|
2019-08-02 13:35:59 +08:00
|
|
|
if (!WARN_ON_ONCE(!kc->timer_wait_running))
|
|
|
|
kc->timer_wait_running(timer);
|
|
|
|
|
|
|
|
rcu_read_unlock();
|
2019-07-31 06:33:53 +08:00
|
|
|
/* Relock the timer. It might be not longer hashed. */
|
|
|
|
return lock_timer(timer_id, flags);
|
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Set a POSIX.1b interval timer. */
|
2017-05-31 05:15:59 +08:00
|
|
|
int common_timer_set(struct k_itimer *timr, int flags,
|
|
|
|
struct itimerspec64 *new_setting,
|
|
|
|
struct itimerspec64 *old_setting)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-31 05:15:53 +08:00
|
|
|
const struct k_clock *kc = timr->kclock;
|
|
|
|
bool sigev_none;
|
|
|
|
ktime_t expires;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (old_setting)
|
|
|
|
common_timer_get(timr, old_setting);
|
|
|
|
|
2017-05-31 05:15:53 +08:00
|
|
|
/* Prevent rearming by clearing the interval */
|
2017-05-31 05:15:43 +08:00
|
|
|
timr->it_interval = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2017-05-31 05:15:53 +08:00
|
|
|
* Careful here. On SMP systems the timer expiry function could be
|
|
|
|
* active and spinning on timr->it_lock.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2017-05-31 05:15:53 +08:00
|
|
|
if (kc->timer_try_to_cancel(timr) < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return TIMER_RETRY;
|
|
|
|
|
2017-05-31 05:15:48 +08:00
|
|
|
timr->it_active = 0;
|
|
|
|
timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
|
2005-04-17 06:20:36 +08:00
|
|
|
~REQUEUE_PENDING;
|
|
|
|
timr->it_overrun_last = 0;
|
|
|
|
|
2017-05-31 05:15:53 +08:00
|
|
|
/* Switch off the timer when it_value is zero */
|
2006-01-10 12:52:38 +08:00
|
|
|
if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
|
|
|
|
return 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-31 05:15:43 +08:00
|
|
|
timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
|
2017-05-31 05:15:53 +08:00
|
|
|
expires = timespec64_to_ktime(new_setting->it_value);
|
2019-11-12 09:27:03 +08:00
|
|
|
if (flags & TIMER_ABSTIME)
|
|
|
|
expires = timens_ktime_to_host(timr->it_clock, expires);
|
2017-12-15 17:32:03 +08:00
|
|
|
sigev_none = timr->it_sigev_notify == SIGEV_NONE;
|
2006-01-10 12:52:38 +08:00
|
|
|
|
2017-05-31 05:15:53 +08:00
|
|
|
kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
|
|
|
|
timr->it_active = !sigev_none;
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-07-31 06:33:52 +08:00
|
|
|
static int do_timer_settime(timer_t timer_id, int tmr_flags,
|
2017-06-07 16:42:35 +08:00
|
|
|
struct itimerspec64 *new_spec64,
|
|
|
|
struct itimerspec64 *old_spec64)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-06-07 16:42:35 +08:00
|
|
|
const struct k_clock *kc;
|
2017-03-27 03:04:17 +08:00
|
|
|
struct k_itimer *timr;
|
2019-07-31 06:33:52 +08:00
|
|
|
unsigned long flags;
|
2017-03-27 03:04:17 +08:00
|
|
|
int error = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-07 16:42:35 +08:00
|
|
|
if (!timespec64_valid(&new_spec64->it_interval) ||
|
|
|
|
!timespec64_valid(&new_spec64->it_value))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-06-07 16:42:35 +08:00
|
|
|
if (old_spec64)
|
|
|
|
memset(old_spec64, 0, sizeof(*old_spec64));
|
2019-07-31 06:33:53 +08:00
|
|
|
|
2019-07-31 06:33:52 +08:00
|
|
|
timr = lock_timer(timer_id, &flags);
|
2019-07-31 06:33:53 +08:00
|
|
|
retry:
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!timr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-05-31 05:15:44 +08:00
|
|
|
kc = timr->kclock;
|
2011-02-01 21:52:01 +08:00
|
|
|
if (WARN_ON_ONCE(!kc || !kc->timer_set))
|
|
|
|
error = -EINVAL;
|
|
|
|
else
|
2019-07-31 06:33:52 +08:00
|
|
|
error = kc->timer_set(timr, tmr_flags, new_spec64, old_spec64);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (error == TIMER_RETRY) {
|
2019-07-31 06:33:53 +08:00
|
|
|
// We already got the old time...
|
|
|
|
old_spec64 = NULL;
|
|
|
|
/* Unlocks and relocks the timer if it still exists */
|
|
|
|
timr = timer_wait_running(timr, &flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto retry;
|
|
|
|
}
|
2019-07-31 06:33:53 +08:00
|
|
|
unlock_timer(timr, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-07 16:42:35 +08:00
|
|
|
return error;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-07 16:42:35 +08:00
|
|
|
/* Set a POSIX.1b interval timer */
|
|
|
|
SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
|
2018-06-17 13:11:44 +08:00
|
|
|
const struct __kernel_itimerspec __user *, new_setting,
|
|
|
|
struct __kernel_itimerspec __user *, old_setting)
|
2017-06-07 16:42:35 +08:00
|
|
|
{
|
2023-04-26 02:49:27 +08:00
|
|
|
struct itimerspec64 new_spec, old_spec, *rtn;
|
2017-06-07 16:42:35 +08:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (!new_setting)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-06-25 02:45:08 +08:00
|
|
|
if (get_itimerspec64(&new_spec, new_setting))
|
2017-06-07 16:42:35 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2023-04-26 02:49:27 +08:00
|
|
|
rtn = old_setting ? &old_spec : NULL;
|
2017-06-25 02:45:08 +08:00
|
|
|
error = do_timer_settime(timer_id, flags, &new_spec, rtn);
|
2017-06-07 16:42:35 +08:00
|
|
|
if (!error && old_setting) {
|
2017-06-25 02:45:08 +08:00
|
|
|
if (put_itimerspec64(&old_spec, old_setting))
|
2017-06-07 16:42:35 +08:00
|
|
|
error = -EFAULT;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2018-06-17 13:11:44 +08:00
|
|
|
#ifdef CONFIG_COMPAT_32BIT_TIME
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE4(timer_settime32, timer_t, timer_id, int, flags,
|
|
|
|
struct old_itimerspec32 __user *, new,
|
|
|
|
struct old_itimerspec32 __user *, old)
|
2017-06-07 16:42:35 +08:00
|
|
|
{
|
2017-06-25 02:45:08 +08:00
|
|
|
struct itimerspec64 new_spec, old_spec;
|
|
|
|
struct itimerspec64 *rtn = old ? &old_spec : NULL;
|
2017-06-07 16:42:35 +08:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (!new)
|
|
|
|
return -EINVAL;
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (get_old_itimerspec32(&new_spec, new))
|
2017-06-07 16:42:35 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2017-06-25 02:45:08 +08:00
|
|
|
error = do_timer_settime(timer_id, flags, &new_spec, rtn);
|
2017-06-07 16:42:35 +08:00
|
|
|
if (!error && old) {
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (put_old_itimerspec32(&old_spec, old))
|
2017-06-07 16:42:35 +08:00
|
|
|
error = -EFAULT;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return error;
|
|
|
|
}
|
2017-06-07 16:42:35 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-31 05:15:59 +08:00
|
|
|
int common_timer_del(struct k_itimer *timer)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-31 05:15:53 +08:00
|
|
|
const struct k_clock *kc = timer->kclock;
|
2005-06-23 15:09:00 +08:00
|
|
|
|
2017-05-31 05:15:53 +08:00
|
|
|
timer->it_interval = 0;
|
|
|
|
if (kc->timer_try_to_cancel(timer) < 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
return TIMER_RETRY;
|
2017-05-31 05:15:48 +08:00
|
|
|
timer->it_active = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int timer_delete_hook(struct k_itimer *timer)
|
|
|
|
{
|
2017-05-31 05:15:44 +08:00
|
|
|
const struct k_clock *kc = timer->kclock;
|
2011-02-01 21:52:07 +08:00
|
|
|
|
|
|
|
if (WARN_ON_ONCE(!kc || !kc->timer_del))
|
|
|
|
return -EINVAL;
|
|
|
|
return kc->timer_del(timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete a POSIX.1b interval timer. */
|
2009-01-14 21:14:07 +08:00
|
|
|
SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct k_itimer *timer;
|
2007-10-15 02:35:50 +08:00
|
|
|
unsigned long flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
timer = lock_timer(timer_id, &flags);
|
2019-07-31 06:33:53 +08:00
|
|
|
|
|
|
|
retry_delete:
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!timer)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-07-31 06:33:53 +08:00
|
|
|
if (unlikely(timer_delete_hook(timer) == TIMER_RETRY)) {
|
|
|
|
/* Unlocks and relocks the timer if it still exists */
|
|
|
|
timer = timer_wait_running(timer, &flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
goto retry_delete;
|
|
|
|
}
|
2006-01-10 12:52:38 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_lock(¤t->sighand->siglock);
|
|
|
|
list_del(&timer->list);
|
|
|
|
spin_unlock(¤t->sighand->siglock);
|
|
|
|
/*
|
2023-04-26 02:49:05 +08:00
|
|
|
* A concurrent lookup could check timer::it_signal lockless. It
|
|
|
|
* will reevaluate with timer::it_lock held and observe the NULL.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2023-04-26 02:49:05 +08:00
|
|
|
WRITE_ONCE(timer->it_signal, NULL);
|
2008-07-25 16:47:26 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
unlock_timer(timer, flags);
|
2023-04-26 02:49:09 +08:00
|
|
|
posix_timer_unhash_and_free(timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2006-01-10 12:52:38 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2023-06-02 04:16:34 +08:00
|
|
|
* Delete a timer if it is armed, remove it from the hash and schedule it
|
|
|
|
* for RCU freeing.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-01-15 05:20:43 +08:00
|
|
|
static void itimer_delete(struct k_itimer *timer)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2023-06-02 04:16:34 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* irqsave is required to make timer_wait_running() work.
|
|
|
|
*/
|
|
|
|
spin_lock_irqsave(&timer->it_lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-06-02 04:16:34 +08:00
|
|
|
retry_delete:
|
|
|
|
/*
|
|
|
|
* Even if the timer is not longer accessible from other tasks
|
|
|
|
* it still might be armed and queued in the underlying timer
|
|
|
|
* mechanism. Worse, that timer mechanism might run the expiry
|
|
|
|
* function concurrently.
|
|
|
|
*/
|
2006-01-10 12:52:38 +08:00
|
|
|
if (timer_delete_hook(timer) == TIMER_RETRY) {
|
2023-06-02 04:16:34 +08:00
|
|
|
/*
|
|
|
|
* Timer is expired concurrently, prevent livelocks
|
|
|
|
* and pointless spinning on RT.
|
|
|
|
*
|
|
|
|
* timer_wait_running() drops timer::it_lock, which opens
|
|
|
|
* the possibility for another task to delete the timer.
|
|
|
|
*
|
|
|
|
* That's not possible here because this is invoked from
|
|
|
|
* do_exit() only for the last thread of the thread group.
|
|
|
|
* So no other task can access and delete that timer.
|
|
|
|
*/
|
|
|
|
if (WARN_ON_ONCE(timer_wait_running(timer, &flags) != timer))
|
|
|
|
return;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
goto retry_delete;
|
|
|
|
}
|
|
|
|
list_del(&timer->list);
|
2008-07-25 16:47:26 +08:00
|
|
|
|
2023-04-26 02:49:06 +08:00
|
|
|
/*
|
|
|
|
* Setting timer::it_signal to NULL is technically not required
|
|
|
|
* here as nothing can access the timer anymore legitimately via
|
|
|
|
* the hash table. Set it to NULL nevertheless so that all deletion
|
|
|
|
* paths are consistent.
|
|
|
|
*/
|
|
|
|
WRITE_ONCE(timer->it_signal, NULL);
|
|
|
|
|
2023-06-02 04:16:34 +08:00
|
|
|
spin_unlock_irqrestore(&timer->it_lock, flags);
|
2023-04-26 02:49:09 +08:00
|
|
|
posix_timer_unhash_and_free(timer);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2023-06-02 04:16:34 +08:00
|
|
|
* Invoked from do_exit() when the last thread of a thread group exits.
|
|
|
|
* At that point no other task can access the timers of the dying
|
|
|
|
* task anymore.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2022-07-12 00:16:25 +08:00
|
|
|
void exit_itimers(struct task_struct *tsk)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2022-07-12 00:16:25 +08:00
|
|
|
struct list_head timers;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct k_itimer *tmr;
|
|
|
|
|
2022-07-12 00:16:25 +08:00
|
|
|
if (list_empty(&tsk->signal->posix_timers))
|
|
|
|
return;
|
|
|
|
|
2023-06-02 04:16:34 +08:00
|
|
|
/* Protect against concurrent read via /proc/$PID/timers */
|
2022-07-12 00:16:25 +08:00
|
|
|
spin_lock_irq(&tsk->sighand->siglock);
|
|
|
|
list_replace_init(&tsk->signal->posix_timers, &timers);
|
|
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
|
|
|
|
2023-06-02 04:16:34 +08:00
|
|
|
/* The timers are not longer accessible via tsk::signal */
|
2022-07-12 00:16:25 +08:00
|
|
|
while (!list_empty(&timers)) {
|
|
|
|
tmr = list_first_entry(&timers, struct k_itimer, list);
|
2005-04-17 06:20:36 +08:00
|
|
|
itimer_delete(tmr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-14 21:14:07 +08:00
|
|
|
SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
|
2018-03-14 12:03:32 +08:00
|
|
|
const struct __kernel_timespec __user *, tp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:05 +08:00
|
|
|
struct timespec64 new_tp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-01 21:51:48 +08:00
|
|
|
if (!kc || !kc->clock_set)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2011-02-01 21:51:48 +08:00
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
if (get_timespec64(&new_tp, tp))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2023-04-26 02:49:16 +08:00
|
|
|
/*
|
|
|
|
* Permission checks have to be done inside the clock specific
|
|
|
|
* setter callback.
|
|
|
|
*/
|
2017-06-25 02:45:05 +08:00
|
|
|
return kc->clock_set(which_clock, &new_tp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2009-01-14 21:14:07 +08:00
|
|
|
SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
|
2018-03-14 12:03:32 +08:00
|
|
|
struct __kernel_timespec __user *, tp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:05 +08:00
|
|
|
struct timespec64 kernel_tp;
|
2005-04-17 06:20:36 +08:00
|
|
|
int error;
|
|
|
|
|
2011-02-01 21:51:50 +08:00
|
|
|
if (!kc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2011-02-01 21:51:50 +08:00
|
|
|
|
2019-11-12 09:26:54 +08:00
|
|
|
error = kc->clock_get_timespec(which_clock, &kernel_tp);
|
2011-02-01 21:51:50 +08:00
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
if (!error && put_timespec64(&kernel_tp, tp))
|
2005-04-17 06:20:36 +08:00
|
|
|
error = -EFAULT;
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2018-07-03 13:44:21 +08:00
|
|
|
int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx)
|
2011-02-01 21:52:26 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2011-02-01 21:52:26 +08:00
|
|
|
|
|
|
|
if (!kc)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!kc->clock_adj)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2019-01-04 04:12:39 +08:00
|
|
|
return kc->clock_adj(which_clock, ktx);
|
|
|
|
}
|
|
|
|
|
|
|
|
SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
|
2018-07-03 13:44:22 +08:00
|
|
|
struct __kernel_timex __user *, utx)
|
2019-01-04 04:12:39 +08:00
|
|
|
{
|
2018-07-03 13:44:21 +08:00
|
|
|
struct __kernel_timex ktx;
|
2019-01-04 04:12:39 +08:00
|
|
|
int err;
|
|
|
|
|
2011-02-01 21:52:26 +08:00
|
|
|
if (copy_from_user(&ktx, utx, sizeof(ktx)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2019-01-04 04:12:39 +08:00
|
|
|
err = do_clock_adjtime(which_clock, &ktx);
|
2011-02-01 21:52:26 +08:00
|
|
|
|
2013-01-11 18:58:58 +08:00
|
|
|
if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
|
2011-02-01 21:52:26 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-04-26 02:49:11 +08:00
|
|
|
/**
|
|
|
|
* sys_clock_getres - Get the resolution of a clock
|
|
|
|
* @which_clock: The clock to get the resolution for
|
|
|
|
* @tp: Pointer to a a user space timespec64 for storage
|
|
|
|
*
|
|
|
|
* POSIX defines:
|
|
|
|
*
|
|
|
|
* "The clock_getres() function shall return the resolution of any
|
|
|
|
* clock. Clock resolutions are implementation-defined and cannot be set by
|
|
|
|
* a process. If the argument res is not NULL, the resolution of the
|
|
|
|
* specified clock shall be stored in the location pointed to by res. If
|
|
|
|
* res is NULL, the clock resolution is not returned. If the time argument
|
|
|
|
* of clock_settime() is not a multiple of res, then the value is truncated
|
|
|
|
* to a multiple of res."
|
|
|
|
*
|
|
|
|
* Due to the various hardware constraints the real resolution can vary
|
|
|
|
* wildly and even change during runtime when the underlying devices are
|
|
|
|
* replaced. The kernel also can use hardware devices with different
|
|
|
|
* resolutions for reading the time and for arming timers.
|
|
|
|
*
|
|
|
|
* The kernel therefore deviates from the POSIX spec in various aspects:
|
|
|
|
*
|
|
|
|
* 1) The resolution returned to user space
|
|
|
|
*
|
|
|
|
* For CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_BOOTTIME, CLOCK_TAI,
|
|
|
|
* CLOCK_REALTIME_ALARM, CLOCK_BOOTTIME_ALAREM and CLOCK_MONOTONIC_RAW
|
|
|
|
* the kernel differentiates only two cases:
|
|
|
|
*
|
|
|
|
* I) Low resolution mode:
|
|
|
|
*
|
|
|
|
* When high resolution timers are disabled at compile or runtime
|
|
|
|
* the resolution returned is nanoseconds per tick, which represents
|
|
|
|
* the precision at which timers expire.
|
|
|
|
*
|
|
|
|
* II) High resolution mode:
|
|
|
|
*
|
|
|
|
* When high resolution timers are enabled the resolution returned
|
|
|
|
* is always one nanosecond independent of the actual resolution of
|
|
|
|
* the underlying hardware devices.
|
|
|
|
*
|
|
|
|
* For CLOCK_*_ALARM the actual resolution depends on system
|
|
|
|
* state. When system is running the resolution is the same as the
|
|
|
|
* resolution of the other clocks. During suspend the actual
|
|
|
|
* resolution is the resolution of the underlying RTC device which
|
|
|
|
* might be way less precise than the clockevent device used during
|
|
|
|
* running state.
|
|
|
|
*
|
|
|
|
* For CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE the resolution
|
|
|
|
* returned is always nanoseconds per tick.
|
|
|
|
*
|
|
|
|
* For CLOCK_PROCESS_CPUTIME and CLOCK_THREAD_CPUTIME the resolution
|
|
|
|
* returned is always one nanosecond under the assumption that the
|
|
|
|
* underlying scheduler clock has a better resolution than nanoseconds
|
|
|
|
* per tick.
|
|
|
|
*
|
|
|
|
* For dynamic POSIX clocks (PTP devices) the resolution returned is
|
|
|
|
* always one nanosecond.
|
|
|
|
*
|
|
|
|
* 2) Affect on sys_clock_settime()
|
|
|
|
*
|
|
|
|
* The kernel does not truncate the time which is handed in to
|
|
|
|
* sys_clock_settime(). The kernel internal timekeeping is always using
|
|
|
|
* nanoseconds precision independent of the clocksource device which is
|
|
|
|
* used to read the time from. The resolution of that device only
|
|
|
|
* affects the presicion of the time returned by sys_clock_gettime().
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* 0 Success. @tp contains the resolution
|
|
|
|
* -EINVAL @which_clock is not a valid clock ID
|
|
|
|
* -EFAULT Copying the resolution to @tp faulted
|
|
|
|
* -ENODEV Dynamic POSIX clock is not backed by a device
|
|
|
|
* -EOPNOTSUPP Dynamic POSIX clock does not support getres()
|
|
|
|
*/
|
2017-06-07 16:42:38 +08:00
|
|
|
SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
|
2018-03-14 12:03:32 +08:00
|
|
|
struct __kernel_timespec __user *, tp)
|
2017-06-07 16:42:38 +08:00
|
|
|
{
|
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:05 +08:00
|
|
|
struct timespec64 rtn_tp;
|
2017-06-07 16:42:38 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (!kc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
error = kc->clock_getres(which_clock, &rtn_tp);
|
2017-06-07 16:42:38 +08:00
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
if (!error && tp && put_timespec64(&rtn_tp, tp))
|
2017-06-07 16:42:38 +08:00
|
|
|
error = -EFAULT;
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2018-03-14 12:03:29 +08:00
|
|
|
#ifdef CONFIG_COMPAT_32BIT_TIME
|
2017-06-07 16:42:34 +08:00
|
|
|
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE2(clock_settime32, clockid_t, which_clock,
|
|
|
|
struct old_timespec32 __user *, tp)
|
2017-06-07 16:42:38 +08:00
|
|
|
{
|
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:05 +08:00
|
|
|
struct timespec64 ts;
|
2017-06-07 16:42:38 +08:00
|
|
|
|
|
|
|
if (!kc || !kc->clock_set)
|
|
|
|
return -EINVAL;
|
|
|
|
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (get_old_timespec32(&ts, tp))
|
2017-06-07 16:42:38 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
return kc->clock_set(which_clock, &ts);
|
2017-06-07 16:42:38 +08:00
|
|
|
}
|
|
|
|
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE2(clock_gettime32, clockid_t, which_clock,
|
|
|
|
struct old_timespec32 __user *, tp)
|
2017-06-07 16:42:38 +08:00
|
|
|
{
|
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:05 +08:00
|
|
|
struct timespec64 ts;
|
|
|
|
int err;
|
2017-06-07 16:42:38 +08:00
|
|
|
|
|
|
|
if (!kc)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-11-12 09:26:54 +08:00
|
|
|
err = kc->clock_get_timespec(which_clock, &ts);
|
2017-06-07 16:42:38 +08:00
|
|
|
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (!err && put_old_timespec32(&ts, tp))
|
2017-06-25 02:45:05 +08:00
|
|
|
err = -EFAULT;
|
2017-06-07 16:42:38 +08:00
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
return err;
|
2017-06-07 16:42:38 +08:00
|
|
|
}
|
|
|
|
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE2(clock_adjtime32, clockid_t, which_clock,
|
|
|
|
struct old_timex32 __user *, utp)
|
2017-06-07 16:42:34 +08:00
|
|
|
{
|
2018-07-03 13:44:21 +08:00
|
|
|
struct __kernel_timex ktx;
|
2017-06-07 16:42:34 +08:00
|
|
|
int err;
|
|
|
|
|
2019-01-02 20:28:47 +08:00
|
|
|
err = get_old_timex32(&ktx, utp);
|
2017-06-07 16:42:34 +08:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2019-01-04 04:12:39 +08:00
|
|
|
err = do_clock_adjtime(which_clock, &ktx);
|
2017-06-07 16:42:34 +08:00
|
|
|
|
2021-04-14 11:04:49 +08:00
|
|
|
if (err >= 0 && put_old_timex32(utp, &ktx))
|
|
|
|
return -EFAULT;
|
2017-06-07 16:42:34 +08:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE2(clock_getres_time32, clockid_t, which_clock,
|
|
|
|
struct old_timespec32 __user *, tp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:05 +08:00
|
|
|
struct timespec64 ts;
|
|
|
|
int err;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-01 21:51:53 +08:00
|
|
|
if (!kc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
err = kc->clock_getres(which_clock, &ts);
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (!err && tp && put_old_timespec32(&ts, tp))
|
2017-06-25 02:45:05 +08:00
|
|
|
return -EFAULT;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-25 02:45:05 +08:00
|
|
|
return err;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2017-06-25 02:45:05 +08:00
|
|
|
|
2017-06-07 16:42:38 +08:00
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-01-10 12:52:37 +08:00
|
|
|
/*
|
2023-04-26 02:49:17 +08:00
|
|
|
* sys_clock_nanosleep() for CLOCK_REALTIME and CLOCK_TAI
|
2006-01-10 12:52:37 +08:00
|
|
|
*/
|
|
|
|
static int common_nsleep(const clockid_t which_clock, int flags,
|
2017-06-14 05:34:33 +08:00
|
|
|
const struct timespec64 *rqtp)
|
2006-01-10 12:52:37 +08:00
|
|
|
{
|
2019-11-12 09:27:05 +08:00
|
|
|
ktime_t texp = timespec64_to_ktime(*rqtp);
|
|
|
|
|
|
|
|
return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
|
2008-02-01 22:29:05 +08:00
|
|
|
HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
|
|
|
|
which_clock);
|
2006-01-10 12:52:37 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2023-04-26 02:49:17 +08:00
|
|
|
/*
|
|
|
|
* sys_clock_nanosleep() for CLOCK_MONOTONIC and CLOCK_BOOTTIME
|
|
|
|
*
|
|
|
|
* Absolute nanosleeps for these clocks are time-namespace adjusted.
|
|
|
|
*/
|
2019-11-12 09:27:06 +08:00
|
|
|
static int common_nsleep_timens(const clockid_t which_clock, int flags,
|
2023-04-26 02:49:17 +08:00
|
|
|
const struct timespec64 *rqtp)
|
2019-11-12 09:27:06 +08:00
|
|
|
{
|
|
|
|
ktime_t texp = timespec64_to_ktime(*rqtp);
|
|
|
|
|
|
|
|
if (flags & TIMER_ABSTIME)
|
|
|
|
texp = timens_ktime_to_host(which_clock, texp);
|
|
|
|
|
|
|
|
return hrtimer_nanosleep(texp, flags & TIMER_ABSTIME ?
|
|
|
|
HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
|
|
|
|
which_clock);
|
|
|
|
}
|
|
|
|
|
2009-01-14 21:14:07 +08:00
|
|
|
SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
|
2018-03-14 12:03:33 +08:00
|
|
|
const struct __kernel_timespec __user *, rqtp,
|
|
|
|
struct __kernel_timespec __user *, rmtp)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:06 +08:00
|
|
|
struct timespec64 t;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-02-01 21:51:11 +08:00
|
|
|
if (!kc)
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2011-02-01 21:51:11 +08:00
|
|
|
if (!kc->nsleep)
|
2018-07-02 00:18:15 +08:00
|
|
|
return -EOPNOTSUPP;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-25 02:45:06 +08:00
|
|
|
if (get_timespec64(&t, rqtp))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EFAULT;
|
|
|
|
|
2017-06-25 02:45:06 +08:00
|
|
|
if (!timespec64_valid(&t))
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EINVAL;
|
2017-06-07 16:42:30 +08:00
|
|
|
if (flags & TIMER_ABSTIME)
|
|
|
|
rmtp = NULL;
|
timers: Prevent union confusion from unexpected restart_syscall()
The nanosleep syscalls use the restart_block mechanism, with a quirk:
The `type` and `rmtp`/`compat_rmtp` fields are set up unconditionally on
syscall entry, while the rest of the restart_block is only set up in the
unlikely case that the syscall is actually interrupted by a signal (or
pseudo-signal) that doesn't have a signal handler.
If the restart_block was set up by a previous syscall (futex(...,
FUTEX_WAIT, ...) or poll()) and hasn't been invalidated somehow since then,
this will clobber some of the union fields used by futex_wait_restart() and
do_restart_poll().
If userspace afterwards wrongly calls the restart_syscall syscall,
futex_wait_restart()/do_restart_poll() will read struct fields that have
been clobbered.
This doesn't actually lead to anything particularly interesting because
none of the union fields contain trusted kernel data, and
futex(..., FUTEX_WAIT, ...) and poll() aren't syscalls where it makes much
sense to apply seccomp filters to their arguments.
So the current consequences are just of the "if userspace does bad stuff,
it can damage itself, and that's not a problem" flavor.
But still, it seems like a hazard for future developers, so invalidate the
restart_block when partly setting it up in the nanosleep syscalls.
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230105134403.754986-1-jannh@google.com
2023-01-05 21:44:03 +08:00
|
|
|
current->restart_block.fn = do_no_restart_syscall;
|
2017-06-07 16:42:31 +08:00
|
|
|
current->restart_block.nanosleep.type = rmtp ? TT_NATIVE : TT_NONE;
|
2017-06-07 16:42:30 +08:00
|
|
|
current->restart_block.nanosleep.rmtp = rmtp;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-25 02:45:06 +08:00
|
|
|
return kc->nsleep(which_clock, flags, &t);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2006-09-29 17:00:28 +08:00
|
|
|
|
2018-03-14 12:03:29 +08:00
|
|
|
#ifdef CONFIG_COMPAT_32BIT_TIME
|
|
|
|
|
2019-01-07 07:33:08 +08:00
|
|
|
SYSCALL_DEFINE4(clock_nanosleep_time32, clockid_t, which_clock, int, flags,
|
|
|
|
struct old_timespec32 __user *, rqtp,
|
|
|
|
struct old_timespec32 __user *, rmtp)
|
2006-09-29 17:00:28 +08:00
|
|
|
{
|
2017-05-26 17:03:11 +08:00
|
|
|
const struct k_clock *kc = clockid_to_kclock(which_clock);
|
2017-06-25 02:45:06 +08:00
|
|
|
struct timespec64 t;
|
2011-02-01 21:51:17 +08:00
|
|
|
|
2017-06-07 16:42:31 +08:00
|
|
|
if (!kc)
|
2011-02-01 21:51:17 +08:00
|
|
|
return -EINVAL;
|
2017-06-07 16:42:31 +08:00
|
|
|
if (!kc->nsleep)
|
2018-07-02 00:18:15 +08:00
|
|
|
return -EOPNOTSUPP;
|
2017-06-07 16:42:31 +08:00
|
|
|
|
y2038: globally rename compat_time to old_time32
Christoph Hellwig suggested a slightly different path for handling
backwards compatibility with the 32-bit time_t based system calls:
Rather than simply reusing the compat_sys_* entry points on 32-bit
architectures unchanged, we get rid of those entry points and the
compat_time types by renaming them to something that makes more sense
on 32-bit architectures (which don't have a compat mode otherwise),
and then share the entry points under the new name with the 64-bit
architectures that use them for implementing the compatibility.
The following types and interfaces are renamed here, and moved
from linux/compat_time.h to linux/time32.h:
old new
--- ---
compat_time_t old_time32_t
struct compat_timeval struct old_timeval32
struct compat_timespec struct old_timespec32
struct compat_itimerspec struct old_itimerspec32
ns_to_compat_timeval() ns_to_old_timeval32()
get_compat_itimerspec64() get_old_itimerspec32()
put_compat_itimerspec64() put_old_itimerspec32()
compat_get_timespec64() get_old_timespec32()
compat_put_timespec64() put_old_timespec32()
As we already have aliases in place, this patch addresses only the
instances that are relevant to the system call interface in particular,
not those that occur in device drivers and other modules. Those
will get handled separately, while providing the 64-bit version
of the respective interfaces.
I'm not renaming the timex, rusage and itimerval structures, as we are
still debating what the new interface will look like, and whether we
will need a replacement at all.
This also doesn't change the names of the syscall entry points, which can
be done more easily when we actually switch over the 32-bit architectures
to use them, at that point we need to change COMPAT_SYSCALL_DEFINEx to
SYSCALL_DEFINEx with a new name, e.g. with a _time32 suffix.
Suggested-by: Christoph Hellwig <hch@infradead.org>
Link: https://lore.kernel.org/lkml/20180705222110.GA5698@infradead.org/
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-07-13 18:52:28 +08:00
|
|
|
if (get_old_timespec32(&t, rqtp))
|
2017-06-07 16:42:31 +08:00
|
|
|
return -EFAULT;
|
2006-09-29 17:00:28 +08:00
|
|
|
|
2017-06-25 02:45:06 +08:00
|
|
|
if (!timespec64_valid(&t))
|
2017-06-07 16:42:31 +08:00
|
|
|
return -EINVAL;
|
|
|
|
if (flags & TIMER_ABSTIME)
|
|
|
|
rmtp = NULL;
|
timers: Prevent union confusion from unexpected restart_syscall()
The nanosleep syscalls use the restart_block mechanism, with a quirk:
The `type` and `rmtp`/`compat_rmtp` fields are set up unconditionally on
syscall entry, while the rest of the restart_block is only set up in the
unlikely case that the syscall is actually interrupted by a signal (or
pseudo-signal) that doesn't have a signal handler.
If the restart_block was set up by a previous syscall (futex(...,
FUTEX_WAIT, ...) or poll()) and hasn't been invalidated somehow since then,
this will clobber some of the union fields used by futex_wait_restart() and
do_restart_poll().
If userspace afterwards wrongly calls the restart_syscall syscall,
futex_wait_restart()/do_restart_poll() will read struct fields that have
been clobbered.
This doesn't actually lead to anything particularly interesting because
none of the union fields contain trusted kernel data, and
futex(..., FUTEX_WAIT, ...) and poll() aren't syscalls where it makes much
sense to apply seccomp filters to their arguments.
So the current consequences are just of the "if userspace does bad stuff,
it can damage itself, and that's not a problem" flavor.
But still, it seems like a hazard for future developers, so invalidate the
restart_block when partly setting it up in the nanosleep syscalls.
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20230105134403.754986-1-jannh@google.com
2023-01-05 21:44:03 +08:00
|
|
|
current->restart_block.fn = do_no_restart_syscall;
|
2017-06-07 16:42:31 +08:00
|
|
|
current->restart_block.nanosleep.type = rmtp ? TT_COMPAT : TT_NONE;
|
|
|
|
current->restart_block.nanosleep.compat_rmtp = rmtp;
|
|
|
|
|
2017-06-25 02:45:06 +08:00
|
|
|
return kc->nsleep(which_clock, flags, &t);
|
2006-09-29 17:00:28 +08:00
|
|
|
}
|
2018-03-14 12:03:29 +08:00
|
|
|
|
2017-06-07 16:42:31 +08:00
|
|
|
#endif
|
2017-05-31 05:15:39 +08:00
|
|
|
|
|
|
|
static const struct k_clock clock_realtime = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_hrtimer_res,
|
2019-11-12 09:26:55 +08:00
|
|
|
.clock_get_timespec = posix_get_realtime_timespec,
|
2019-11-12 09:26:58 +08:00
|
|
|
.clock_get_ktime = posix_get_realtime_ktime,
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_set = posix_clock_realtime_set,
|
|
|
|
.clock_adj = posix_clock_realtime_adj,
|
|
|
|
.nsleep = common_nsleep,
|
|
|
|
.timer_create = common_timer_create,
|
|
|
|
.timer_set = common_timer_set,
|
|
|
|
.timer_get = common_timer_get,
|
|
|
|
.timer_del = common_timer_del,
|
|
|
|
.timer_rearm = common_hrtimer_rearm,
|
|
|
|
.timer_forward = common_hrtimer_forward,
|
|
|
|
.timer_remaining = common_hrtimer_remaining,
|
|
|
|
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
|
2019-08-02 13:35:59 +08:00
|
|
|
.timer_wait_running = common_timer_wait_running,
|
2017-05-31 05:15:53 +08:00
|
|
|
.timer_arm = common_hrtimer_arm,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock clock_monotonic = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_hrtimer_res,
|
2019-11-12 09:26:55 +08:00
|
|
|
.clock_get_timespec = posix_get_monotonic_timespec,
|
2019-11-12 09:26:58 +08:00
|
|
|
.clock_get_ktime = posix_get_monotonic_ktime,
|
2019-11-12 09:27:06 +08:00
|
|
|
.nsleep = common_nsleep_timens,
|
2017-05-31 05:15:53 +08:00
|
|
|
.timer_create = common_timer_create,
|
|
|
|
.timer_set = common_timer_set,
|
|
|
|
.timer_get = common_timer_get,
|
|
|
|
.timer_del = common_timer_del,
|
|
|
|
.timer_rearm = common_hrtimer_rearm,
|
|
|
|
.timer_forward = common_hrtimer_forward,
|
|
|
|
.timer_remaining = common_hrtimer_remaining,
|
|
|
|
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
|
2019-08-02 13:35:59 +08:00
|
|
|
.timer_wait_running = common_timer_wait_running,
|
2017-05-31 05:15:53 +08:00
|
|
|
.timer_arm = common_hrtimer_arm,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock clock_monotonic_raw = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_hrtimer_res,
|
2019-11-12 09:26:54 +08:00
|
|
|
.clock_get_timespec = posix_get_monotonic_raw,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock clock_realtime_coarse = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_coarse_res,
|
2019-11-12 09:26:54 +08:00
|
|
|
.clock_get_timespec = posix_get_realtime_coarse,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock clock_monotonic_coarse = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_coarse_res,
|
2019-11-12 09:26:54 +08:00
|
|
|
.clock_get_timespec = posix_get_monotonic_coarse,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock clock_tai = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_hrtimer_res,
|
2019-11-12 09:26:58 +08:00
|
|
|
.clock_get_ktime = posix_get_tai_ktime,
|
2019-11-12 09:26:55 +08:00
|
|
|
.clock_get_timespec = posix_get_tai_timespec,
|
2017-05-31 05:15:53 +08:00
|
|
|
.nsleep = common_nsleep,
|
|
|
|
.timer_create = common_timer_create,
|
|
|
|
.timer_set = common_timer_set,
|
|
|
|
.timer_get = common_timer_get,
|
|
|
|
.timer_del = common_timer_del,
|
|
|
|
.timer_rearm = common_hrtimer_rearm,
|
|
|
|
.timer_forward = common_hrtimer_forward,
|
|
|
|
.timer_remaining = common_hrtimer_remaining,
|
|
|
|
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
|
2019-08-02 13:35:59 +08:00
|
|
|
.timer_wait_running = common_timer_wait_running,
|
2017-05-31 05:15:53 +08:00
|
|
|
.timer_arm = common_hrtimer_arm,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
2018-04-25 21:33:38 +08:00
|
|
|
static const struct k_clock clock_boottime = {
|
2017-05-31 05:15:53 +08:00
|
|
|
.clock_getres = posix_get_hrtimer_res,
|
2019-11-12 09:26:58 +08:00
|
|
|
.clock_get_ktime = posix_get_boottime_ktime,
|
2019-11-12 09:26:55 +08:00
|
|
|
.clock_get_timespec = posix_get_boottime_timespec,
|
2019-11-12 09:27:06 +08:00
|
|
|
.nsleep = common_nsleep_timens,
|
2018-04-25 21:33:38 +08:00
|
|
|
.timer_create = common_timer_create,
|
|
|
|
.timer_set = common_timer_set,
|
|
|
|
.timer_get = common_timer_get,
|
|
|
|
.timer_del = common_timer_del,
|
|
|
|
.timer_rearm = common_hrtimer_rearm,
|
|
|
|
.timer_forward = common_hrtimer_forward,
|
|
|
|
.timer_remaining = common_hrtimer_remaining,
|
|
|
|
.timer_try_to_cancel = common_hrtimer_try_to_cancel,
|
2019-08-02 13:35:59 +08:00
|
|
|
.timer_wait_running = common_timer_wait_running,
|
2018-04-25 21:33:38 +08:00
|
|
|
.timer_arm = common_hrtimer_arm,
|
2017-05-31 05:15:39 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock * const posix_clocks[] = {
|
|
|
|
[CLOCK_REALTIME] = &clock_realtime,
|
|
|
|
[CLOCK_MONOTONIC] = &clock_monotonic,
|
|
|
|
[CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
|
|
|
|
[CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
|
|
|
|
[CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
|
|
|
|
[CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
|
|
|
|
[CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
|
2018-04-25 21:33:38 +08:00
|
|
|
[CLOCK_BOOTTIME] = &clock_boottime,
|
2017-05-31 05:15:39 +08:00
|
|
|
[CLOCK_REALTIME_ALARM] = &alarm_clock,
|
|
|
|
[CLOCK_BOOTTIME_ALARM] = &alarm_clock,
|
|
|
|
[CLOCK_TAI] = &clock_tai,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct k_clock *clockid_to_kclock(const clockid_t id)
|
|
|
|
{
|
2018-02-16 00:21:55 +08:00
|
|
|
clockid_t idx = id;
|
|
|
|
|
|
|
|
if (id < 0) {
|
2017-05-31 05:15:39 +08:00
|
|
|
return (id & CLOCKFD_MASK) == CLOCKFD ?
|
|
|
|
&clock_posix_dynamic : &clock_posix_cpu;
|
2018-02-16 00:21:55 +08:00
|
|
|
}
|
2017-05-31 05:15:39 +08:00
|
|
|
|
2018-02-16 00:21:55 +08:00
|
|
|
if (id >= ARRAY_SIZE(posix_clocks))
|
2017-05-31 05:15:39 +08:00
|
|
|
return NULL;
|
2018-02-16 00:21:55 +08:00
|
|
|
|
|
|
|
return posix_clocks[array_index_nospec(idx, ARRAY_SIZE(posix_clocks))];
|
2017-05-31 05:15:39 +08:00
|
|
|
}
|