2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 21:24:00 +08:00
linux-next/arch/x86/include/asm/vgtod.h
Andy Lutomirski e76b027e64 x86,vdso: Use LSL unconditionally for vgetcpu
LSL is faster than RDTSCP and works everywhere; there's no need to
switch between them depending on CPU.

Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Cc: Andi Kleen <andi@firstfloor.org>
Link: http://lkml.kernel.org/r/72f73d5ec4514e02bba345b9759177ef03742efb.1414706021.git.luto@amacapital.net
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2014-11-03 13:41:53 +01:00

93 lines
1.7 KiB
C

#ifndef _ASM_X86_VGTOD_H
#define _ASM_X86_VGTOD_H
#include <linux/compiler.h>
#include <linux/clocksource.h>
#ifdef BUILD_VDSO32_64
typedef u64 gtod_long_t;
#else
typedef unsigned long gtod_long_t;
#endif
/*
* vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
* so be carefull by modifying this structure.
*/
struct vsyscall_gtod_data {
unsigned seq;
int vclock_mode;
cycle_t cycle_last;
cycle_t mask;
u32 mult;
u32 shift;
/* open coded 'struct timespec' */
u64 wall_time_snsec;
gtod_long_t wall_time_sec;
gtod_long_t monotonic_time_sec;
u64 monotonic_time_snsec;
gtod_long_t wall_time_coarse_sec;
gtod_long_t wall_time_coarse_nsec;
gtod_long_t monotonic_time_coarse_sec;
gtod_long_t monotonic_time_coarse_nsec;
int tz_minuteswest;
int tz_dsttime;
};
extern struct vsyscall_gtod_data vsyscall_gtod_data;
static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
{
unsigned ret;
repeat:
ret = ACCESS_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb();
return ret;
}
static inline int gtod_read_retry(const struct vsyscall_gtod_data *s,
unsigned start)
{
smp_rmb();
return unlikely(s->seq != start);
}
static inline void gtod_write_begin(struct vsyscall_gtod_data *s)
{
++s->seq;
smp_wmb();
}
static inline void gtod_write_end(struct vsyscall_gtod_data *s)
{
smp_wmb();
++s->seq;
}
#ifdef CONFIG_X86_64
#define VGETCPU_CPU_MASK 0xfff
static inline unsigned int __getcpu(void)
{
unsigned int p;
/*
* Load per CPU data from GDT. LSL is faster than RDTSCP and
* works on all CPUs.
*/
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
return p;
}
#endif /* CONFIG_X86_64 */
#endif /* _ASM_X86_VGTOD_H */