mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
da15cfdae0
After talking with some application writers who want very fast, but not fine-grained timestamps, I decided to try to implement new clock_ids to clock_gettime(): CLOCK_REALTIME_COARSE and CLOCK_MONOTONIC_COARSE which returns the time at the last tick. This is very fast as we don't have to access any hardware (which can be very painful if you're using something like the acpi_pm clocksource), and we can even use the vdso clock_gettime() method to avoid the syscall. The only trade off is you only get low-res tick grained time resolution. This isn't a new idea, I know Ingo has a patch in the -rt tree that made the vsyscall gettimeofday() return coarse grained time when the vsyscall64 sysctrl was set to 2. However this affects all applications on a system. With this method, applications can choose the proper speed/granularity trade-off for themselves. Signed-off-by: John Stultz <johnstul@us.ibm.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: nikolag@ca.ibm.com Cc: Darren Hart <dvhltc@us.ibm.com> Cc: arjan@infradead.org Cc: jonathan@jonmasters.org LKML-Reference: <1250734414.6897.5.camel@localhost.localdomain> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
160 lines
4.2 KiB
C
160 lines
4.2 KiB
C
/*
|
|
* Copyright 2006 Andi Kleen, SUSE Labs.
|
|
* Subject to the GNU Public License, v.2
|
|
*
|
|
* Fast user context implementation of clock_gettime and gettimeofday.
|
|
*
|
|
* The code should have no internal unresolved relocations.
|
|
* Check with readelf after changing.
|
|
* Also alternative() doesn't work.
|
|
*/
|
|
|
|
/* Disable profiling for userspace code: */
|
|
#define DISABLE_BRANCH_PROFILING
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/posix-timers.h>
|
|
#include <linux/time.h>
|
|
#include <linux/string.h>
|
|
#include <asm/vsyscall.h>
|
|
#include <asm/vgtod.h>
|
|
#include <asm/timex.h>
|
|
#include <asm/hpet.h>
|
|
#include <asm/unistd.h>
|
|
#include <asm/io.h>
|
|
#include "vextern.h"
|
|
|
|
#define gtod vdso_vsyscall_gtod_data
|
|
|
|
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|
{
|
|
long ret;
|
|
asm("syscall" : "=a" (ret) :
|
|
"0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
|
|
return ret;
|
|
}
|
|
|
|
notrace static inline long vgetns(void)
|
|
{
|
|
long v;
|
|
cycles_t (*vread)(void);
|
|
vread = gtod->clock.vread;
|
|
v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
|
|
return (v * gtod->clock.mult) >> gtod->clock.shift;
|
|
}
|
|
|
|
notrace static noinline int do_realtime(struct timespec *ts)
|
|
{
|
|
unsigned long seq, ns;
|
|
do {
|
|
seq = read_seqbegin(>od->lock);
|
|
ts->tv_sec = gtod->wall_time_sec;
|
|
ts->tv_nsec = gtod->wall_time_nsec;
|
|
ns = vgetns();
|
|
} while (unlikely(read_seqretry(>od->lock, seq)));
|
|
timespec_add_ns(ts, ns);
|
|
return 0;
|
|
}
|
|
|
|
/* Copy of the version in kernel/time.c which we cannot directly access */
|
|
notrace static void
|
|
vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
|
|
{
|
|
while (nsec >= NSEC_PER_SEC) {
|
|
nsec -= NSEC_PER_SEC;
|
|
++sec;
|
|
}
|
|
while (nsec < 0) {
|
|
nsec += NSEC_PER_SEC;
|
|
--sec;
|
|
}
|
|
ts->tv_sec = sec;
|
|
ts->tv_nsec = nsec;
|
|
}
|
|
|
|
notrace static noinline int do_monotonic(struct timespec *ts)
|
|
{
|
|
unsigned long seq, ns, secs;
|
|
do {
|
|
seq = read_seqbegin(>od->lock);
|
|
secs = gtod->wall_time_sec;
|
|
ns = gtod->wall_time_nsec + vgetns();
|
|
secs += gtod->wall_to_monotonic.tv_sec;
|
|
ns += gtod->wall_to_monotonic.tv_nsec;
|
|
} while (unlikely(read_seqretry(>od->lock, seq)));
|
|
vset_normalized_timespec(ts, secs, ns);
|
|
return 0;
|
|
}
|
|
|
|
notrace static noinline int do_realtime_coarse(struct timespec *ts)
|
|
{
|
|
unsigned long seq;
|
|
do {
|
|
seq = read_seqbegin(>od->lock);
|
|
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
|
|
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
|
|
} while (unlikely(read_seqretry(>od->lock, seq)));
|
|
return 0;
|
|
}
|
|
|
|
notrace static noinline int do_monotonic_coarse(struct timespec *ts)
|
|
{
|
|
unsigned long seq, ns, secs;
|
|
do {
|
|
seq = read_seqbegin(>od->lock);
|
|
secs = gtod->wall_time_coarse.tv_sec;
|
|
ns = gtod->wall_time_coarse.tv_nsec;
|
|
secs += gtod->wall_to_monotonic.tv_sec;
|
|
ns += gtod->wall_to_monotonic.tv_nsec;
|
|
} while (unlikely(read_seqretry(>od->lock, seq)));
|
|
vset_normalized_timespec(ts, secs, ns);
|
|
return 0;
|
|
}
|
|
|
|
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
|
|
{
|
|
if (likely(gtod->sysctl_enabled))
|
|
switch (clock) {
|
|
case CLOCK_REALTIME:
|
|
if (likely(gtod->clock.vread))
|
|
return do_realtime(ts);
|
|
break;
|
|
case CLOCK_MONOTONIC:
|
|
if (likely(gtod->clock.vread))
|
|
return do_monotonic(ts);
|
|
break;
|
|
case CLOCK_REALTIME_COARSE:
|
|
return do_realtime_coarse(ts);
|
|
case CLOCK_MONOTONIC_COARSE:
|
|
return do_monotonic_coarse(ts);
|
|
}
|
|
return vdso_fallback_gettime(clock, ts);
|
|
}
|
|
int clock_gettime(clockid_t, struct timespec *)
|
|
__attribute__((weak, alias("__vdso_clock_gettime")));
|
|
|
|
notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
|
|
{
|
|
long ret;
|
|
if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
|
|
if (likely(tv != NULL)) {
|
|
BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
|
|
offsetof(struct timespec, tv_nsec) ||
|
|
sizeof(*tv) != sizeof(struct timespec));
|
|
do_realtime((struct timespec *)tv);
|
|
tv->tv_usec /= 1000;
|
|
}
|
|
if (unlikely(tz != NULL)) {
|
|
/* Avoid memcpy. Some old compilers fail to inline it */
|
|
tz->tz_minuteswest = gtod->sys_tz.tz_minuteswest;
|
|
tz->tz_dsttime = gtod->sys_tz.tz_dsttime;
|
|
}
|
|
return 0;
|
|
}
|
|
asm("syscall" : "=a" (ret) :
|
|
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
|
return ret;
|
|
}
|
|
int gettimeofday(struct timeval *, struct timezone *)
|
|
__attribute__((weak, alias("__vdso_gettimeofday")));
|