sched/clock: Make local_clock() noinstr

With sched_clock() noinstr, provide a noinstr implementation of
local_clock().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230126151323.760767043@infradead.org
This commit is contained in:
Peter Zijlstra 2023-01-26 16:08:37 +01:00 committed by Ingo Molnar
parent 8739c68115
commit 776f22913b
2 changed files with 24 additions and 11 deletions

View File

@ -45,7 +45,7 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
static inline u64 local_clock(void)
static __always_inline u64 local_clock(void)
{
return sched_clock();
}
@ -79,10 +79,8 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
static inline u64 local_clock(void)
{
return sched_clock_cpu(raw_smp_processor_id());
}
extern u64 local_clock(void);
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING

View File

@ -93,7 +93,7 @@ struct sched_clock_data {
static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
notrace static inline struct sched_clock_data *this_scd(void)
static __always_inline struct sched_clock_data *this_scd(void)
{
return this_cpu_ptr(&sched_clock_data);
}
@ -244,12 +244,12 @@ late_initcall(sched_clock_init_late);
* min, max except they take wrapping into account
*/
notrace static inline u64 wrap_min(u64 x, u64 y)
static __always_inline u64 wrap_min(u64 x, u64 y)
{
return (s64)(x - y) < 0 ? x : y;
}
notrace static inline u64 wrap_max(u64 x, u64 y)
static __always_inline u64 wrap_max(u64 x, u64 y)
{
return (s64)(x - y) > 0 ? x : y;
}
@ -260,7 +260,7 @@ notrace static inline u64 wrap_max(u64 x, u64 y)
* - filter out backward motion
* - use the GTOD tick value to create a window to filter crazy TSC values
*/
notrace static u64 sched_clock_local(struct sched_clock_data *scd)
static __always_inline u64 sched_clock_local(struct sched_clock_data *scd)
{
u64 now, clock, old_clock, min_clock, max_clock, gtod;
s64 delta;
@ -287,13 +287,28 @@ again:
clock = wrap_max(clock, min_clock);
clock = wrap_min(clock, max_clock);
if (!try_cmpxchg64(&scd->clock, &old_clock, clock))
if (!arch_try_cmpxchg64(&scd->clock, &old_clock, clock))
goto again;
return clock;
}
notrace static u64 sched_clock_remote(struct sched_clock_data *scd)
noinstr u64 local_clock(void)
{
u64 clock;
if (static_branch_likely(&__sched_clock_stable))
return sched_clock() + __sched_clock_offset;
preempt_disable_notrace();
clock = sched_clock_local(this_scd());
preempt_enable_notrace();
return clock;
}
EXPORT_SYMBOL_GPL(local_clock);
static notrace u64 sched_clock_remote(struct sched_clock_data *scd)
{
struct sched_clock_data *my_scd = this_scd();
u64 this_clock, remote_clock;