mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-14 22:44:27 +08:00
4c5a116ada
MIPS already uses and S390 will need the vdso data pointer in __arch_get_hw_counter(). This works nicely as long as the architecture does not support time namespaces in the VDSO. With time namespaces enabled the regular accessor to the vdso data pointer __arch_get_vdso_data() will return the namespace specific VDSO data page for tasks which are part of a non-root time namespace. This would cause the architectures which need the vdso data pointer in __arch_get_hw_counter() to access the wrong vdso data page. Add a vdso_data pointer argument to __arch_get_hw_counter() and hand it in from the call sites in the core code. For architectures which do not need the data pointer in their counter accessor function the compiler will just optimize it out. Fix up all existing architecture implementations and make MIPS utilize the pointer instead of invoking the accessor function. No functional change and no change in the resulting object code (except MIPS). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/draft-87wo2ekuzn.fsf@nanos.tec.linutronix.de
112 lines
2.3 KiB
C
112 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2018 ARM Limited
|
|
*/
|
|
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
|
|
#define __ASM_VDSO_GETTIMEOFDAY_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/barrier.h>
|
|
#include <asm/unistd.h>
|
|
|
|
#define VDSO_HAS_CLOCK_GETRES 1
|
|
|
|
static __always_inline
|
|
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
|
|
struct timezone *_tz)
|
|
{
|
|
register struct timezone *tz asm("x1") = _tz;
|
|
register struct __kernel_old_timeval *tv asm("x0") = _tv;
|
|
register long ret asm ("x0");
|
|
register long nr asm("x8") = __NR_gettimeofday;
|
|
|
|
asm volatile(
|
|
" svc #0\n"
|
|
: "=r" (ret)
|
|
: "r" (tv), "r" (tz), "r" (nr)
|
|
: "memory");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline
|
|
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|
{
|
|
register struct __kernel_timespec *ts asm("x1") = _ts;
|
|
register clockid_t clkid asm("x0") = _clkid;
|
|
register long ret asm ("x0");
|
|
register long nr asm("x8") = __NR_clock_gettime;
|
|
|
|
asm volatile(
|
|
" svc #0\n"
|
|
: "=r" (ret)
|
|
: "r" (clkid), "r" (ts), "r" (nr)
|
|
: "memory");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline
|
|
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|
{
|
|
register struct __kernel_timespec *ts asm("x1") = _ts;
|
|
register clockid_t clkid asm("x0") = _clkid;
|
|
register long ret asm ("x0");
|
|
register long nr asm("x8") = __NR_clock_getres;
|
|
|
|
asm volatile(
|
|
" svc #0\n"
|
|
: "=r" (ret)
|
|
: "r" (clkid), "r" (ts), "r" (nr)
|
|
: "memory");
|
|
|
|
return ret;
|
|
}
|
|
|
|
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
|
|
const struct vdso_data *vd)
|
|
{
|
|
u64 res;
|
|
|
|
/*
|
|
* Core checks for mode already, so this raced against a concurrent
|
|
* update. Return something. Core will do another round and then
|
|
* see the mode change and fallback to the syscall.
|
|
*/
|
|
if (clock_mode == VDSO_CLOCKMODE_NONE)
|
|
return 0;
|
|
|
|
/*
|
|
* This isb() is required to prevent that the counter value
|
|
* is speculated.
|
|
*/
|
|
isb();
|
|
asm volatile("mrs %0, cntvct_el0" : "=r" (res) :: "memory");
|
|
/*
|
|
* This isb() is required to prevent that the seq lock is
|
|
* speculated.#
|
|
*/
|
|
isb();
|
|
|
|
return res;
|
|
}
|
|
|
|
static __always_inline
|
|
const struct vdso_data *__arch_get_vdso_data(void)
|
|
{
|
|
return _vdso_data;
|
|
}
|
|
|
|
#ifdef CONFIG_TIME_NS
|
|
static __always_inline
|
|
const struct vdso_data *__arch_get_timens_vdso_data(void)
|
|
{
|
|
return _timens_data;
|
|
}
|
|
#endif
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
|