timekeeping: Let timekeeping_cycles_to_ns() handle both under and overflow

For the case !CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE, forego overflow
protection in the range (mask << 1) < delta <= mask, and interpret it
always as an inconsistency between CPU clock values. That allows
slightly neater code, and it is on a slow path so has no effect on
performance.

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20240325064023.2997-19-adrian.hunter@intel.com
This commit is contained in:
Adrian Hunter 2024-03-25 08:40:22 +02:00 committed by Thomas Gleixner
parent fcf190c369
commit 135225a363

View File

@ -266,17 +266,14 @@ static inline u64 timekeeping_debug_get_ns(const struct tk_read_base *tkr)
* Try to catch underflows by checking if we are seeing small
* mask-relative negative values.
*/
if (unlikely((~delta & mask) < (mask >> 3))) {
if (unlikely((~delta & mask) < (mask >> 3)))
tk->underflow_seen = 1;
now = last;
}
/* Cap delta value to the max_cycles values to avoid mult overflows */
if (unlikely(delta > max)) {
/* Check for multiplication overflows */
if (unlikely(delta > max))
tk->overflow_seen = 1;
now = last + max;
}
/* timekeeping_cycles_to_ns() handles both under and overflow */
return timekeeping_cycles_to_ns(tkr, now);
}
#else
@ -375,19 +372,17 @@ static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 c
u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
/*
* This detects the case where the delta overflows the multiplication
* with tkr->mult.
* This detects both negative motion and the case where the delta
* overflows the multiplication with tkr->mult.
*/
if (unlikely(delta > tkr->clock->max_cycles)) {
if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
/*
* Handle clocksource inconsistency between CPUs to prevent
* time from going backwards by checking for the MSB of the
* mask being set in the delta.
*/
if (unlikely(delta & ~(mask >> 1)))
return tkr->xtime_nsec >> tkr->shift;
}
/*
* Handle clocksource inconsistency between CPUs to prevent
* time from going backwards by checking for the MSB of the
* mask being set in the delta.
*/
if (delta & ~(mask >> 1))
return tkr->xtime_nsec >> tkr->shift;
return delta_to_ns_safe(tkr, delta);
}