mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
4c73056e32
In __delay() we use cpus_have_const_cap() to check for ARM64_HAS_WFXT, but this is not necessary and alternative_has_cap() would be preferable. For historical reasons, cpus_have_const_cap() is more complicated than it needs to be. Before cpucaps are finalized, it will perform a bitmap test of the system_cpucaps bitmap, and once cpucaps are finalized it will use an alternative branch. This used to be necessary to handle some race conditions in the window between cpucap detection and the subsequent patching of alternatives and static branches, where different branches could be out-of-sync with one another (or w.r.t. alternative sequences). Now that we use alternative branches instead of static branches, these are all patched atomically w.r.t. one another, and there are only a handful of cases that need special care in the window between cpucap detection and alternative patching. Due to the above, it would be nice to remove cpus_have_const_cap(), and migrate callers over to alternative_has_cap_*(), cpus_have_final_cap(), or cpus_have_cap() depending on when their requirements. This will remove redundant instructions and improve code generation, and will make it easier to determine how each callsite will behave before, during, and after alternative patching. The cpus_have_const_cap() check in __delay() is an optimization to use WFIT and WFET in preference to busy-polling the counter and/or using regular WFE and relying upon the architected timer event stream. It is not necessary to apply this optimization in the window between detecting the ARM64_HAS_WFXT cpucap and patching alternatives. This patch replaces the use of cpus_have_const_cap() with alternative_has_cap_unlikely(), which will avoid generating code to test the system_cpucaps bitmap and should be better for all subsequent calls at runtime. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
70 lines
1.5 KiB
C
70 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Delay loops based on the OpenRISC implementation.
|
|
*
|
|
* Copyright (C) 2012 ARM Limited
|
|
*
|
|
* Author: Will Deacon <will.deacon@arm.com>
|
|
*/
|
|
|
|
#include <linux/delay.h>
|
|
#include <linux/init.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/timex.h>
|
|
|
|
#include <clocksource/arm_arch_timer.h>
|
|
|
|
#define USECS_TO_CYCLES(time_usecs) \
|
|
xloops_to_cycles((time_usecs) * 0x10C7UL)
|
|
|
|
static inline unsigned long xloops_to_cycles(unsigned long xloops)
|
|
{
|
|
return (xloops * loops_per_jiffy * HZ) >> 32;
|
|
}
|
|
|
|
void __delay(unsigned long cycles)
|
|
{
|
|
cycles_t start = get_cycles();
|
|
|
|
if (alternative_has_cap_unlikely(ARM64_HAS_WFXT)) {
|
|
u64 end = start + cycles;
|
|
|
|
/*
|
|
* Start with WFIT. If an interrupt makes us resume
|
|
* early, use a WFET loop to complete the delay.
|
|
*/
|
|
wfit(end);
|
|
while ((get_cycles() - start) < cycles)
|
|
wfet(end);
|
|
} else if (arch_timer_evtstrm_available()) {
|
|
const cycles_t timer_evt_period =
|
|
USECS_TO_CYCLES(ARCH_TIMER_EVT_STREAM_PERIOD_US);
|
|
|
|
while ((get_cycles() - start + timer_evt_period) < cycles)
|
|
wfe();
|
|
}
|
|
|
|
while ((get_cycles() - start) < cycles)
|
|
cpu_relax();
|
|
}
|
|
EXPORT_SYMBOL(__delay);
|
|
|
|
inline void __const_udelay(unsigned long xloops)
|
|
{
|
|
__delay(xloops_to_cycles(xloops));
|
|
}
|
|
EXPORT_SYMBOL(__const_udelay);
|
|
|
|
void __udelay(unsigned long usecs)
|
|
{
|
|
__const_udelay(usecs * 0x10C7UL); /* 2**32 / 1000000 (rounded up) */
|
|
}
|
|
EXPORT_SYMBOL(__udelay);
|
|
|
|
void __ndelay(unsigned long nsecs)
|
|
{
|
|
__const_udelay(nsecs * 0x5UL); /* 2**32 / 1000000000 (rounded up) */
|
|
}
|
|
EXPORT_SYMBOL(__ndelay);
|