mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
f7dd3b1734
Pull timer updates from Thomas Gleixner: "This is the last functional update from the tip tree for 4.10. It got delayed due to a newly reported and anlyzed variant of BIOS bug and the resulting wreckage: - Seperation of TSC being marked realiable and the fact that the platform provides the TSC frequency via CPUID/MSRs and making use for it for GOLDMONT. - TSC adjust MSR validation and sanitizing: The TSC adjust MSR contains the offset to the hardware counter. The sum of the adjust MSR and the counter is the TSC value which is read via RDTSC. On at least two machines from different vendors the BIOS sets the TSC adjust MSR to negative values. This happens on cold and warm boot. While on cold boot the offset is a few milliseconds, on warm boot it basically compensates the power on time of the system. The BIOSes are not even using the adjust MSR to set all CPUs in the package to the same offset. The offsets are different which renders the TSC unusable, What's worse is that the TSC deadline timer has a HW feature^Wbug. It malfunctions when the TSC adjust value is negative or greater equal 0x80000000 resulting in silent boot failures, hard lockups or non firing timers. This looks like some hardware internal 32/64bit issue with a sign extension problem. Intel has been silent so far on the issue. The update contains sanity checks and keeps the adjust register within working limits and in sync on the package. As it looks like this disease is spreading via BIOS crapware, we need to address this urgently as the boot failures are hard to debug for users" * 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/tsc: Limit the adjust value further x86/tsc: Annotate printouts as firmware bug x86/tsc: Force TSC_ADJUST register to value >= zero x86/tsc: Validate TSC_ADJUST after resume x86/tsc: Validate cpumask pointer before accessing it x86/tsc: Fix broken CONFIG_X86_TSC=n build x86/tsc: Try to adjust TSC if sync test fails x86/tsc: Prepare warp test for TSC adjustment x86/tsc: Move sync cleanup to a safe place x86/tsc: Sync test only for the first cpu in a package x86/tsc: Verify TSC_ADJUST from idle x86/tsc: Store and check TSC ADJUST MSR x86/tsc: Detect random warps x86/tsc: Use X86_FEATURE_TSC_ADJUST in detect_art() x86/tsc: Finalize the split of the TSC_RELIABLE flag x86/tsc: Set TSC_KNOWN_FREQ and TSC_RELIABLE flags on Intel Atom SoCs x86/tsc: Mark Intel ATOM_GOLDMONT TSC reliable x86/tsc: Mark TSC frequency determined by CPUID as known x86/tsc: Add X86_FEATURE_TSC_KNOWN_FREQ flag
539 lines
12 KiB
C
539 lines
12 KiB
C
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/prctl.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/init.h>
|
|
#include <linux/export.h>
|
|
#include <linux/pm.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/random.h>
|
|
#include <linux/user-return-notifier.h>
|
|
#include <linux/dmi.h>
|
|
#include <linux/utsname.h>
|
|
#include <linux/stackprotector.h>
|
|
#include <linux/tick.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <trace/events/power.h>
|
|
#include <linux/hw_breakpoint.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/syscalls.h>
|
|
#include <asm/uaccess.h>
|
|
#include <asm/mwait.h>
|
|
#include <asm/fpu/internal.h>
|
|
#include <asm/debugreg.h>
|
|
#include <asm/nmi.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/vm86.h>
|
|
#include <asm/switch_to.h>
|
|
|
|
/*
|
|
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
|
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
|
* so they are allowed to end up in the .data..cacheline_aligned
|
|
* section. Since TSS's are completely CPU-local, we want them
|
|
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
|
*/
|
|
__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
|
|
.x86_tss = {
|
|
.sp0 = TOP_OF_INIT_STACK,
|
|
#ifdef CONFIG_X86_32
|
|
.ss0 = __KERNEL_DS,
|
|
.ss1 = __KERNEL_CS,
|
|
.io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
|
|
#endif
|
|
},
|
|
#ifdef CONFIG_X86_32
|
|
/*
|
|
* Note that the .io_bitmap member must be extra-big. This is because
|
|
* the CPU will access an additional byte beyond the end of the IO
|
|
* permission bitmap. The extra byte must be all 1 bits, and must
|
|
* be within the limit.
|
|
*/
|
|
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
|
|
#endif
|
|
#ifdef CONFIG_X86_32
|
|
.SYSENTER_stack_canary = STACK_END_MAGIC,
|
|
#endif
|
|
};
|
|
EXPORT_PER_CPU_SYMBOL(cpu_tss);
|
|
|
|
/*
|
|
* this gets called so that we can store lazy state into memory and copy the
|
|
* current task into the new thread.
|
|
*/
|
|
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|
{
|
|
memcpy(dst, src, arch_task_struct_size);
|
|
#ifdef CONFIG_VM86
|
|
dst->thread.vm86 = NULL;
|
|
#endif
|
|
|
|
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
|
|
}
|
|
|
|
/*
|
|
* Free current thread data structures etc..
|
|
*/
|
|
void exit_thread(struct task_struct *tsk)
|
|
{
|
|
struct thread_struct *t = &tsk->thread;
|
|
unsigned long *bp = t->io_bitmap_ptr;
|
|
struct fpu *fpu = &t->fpu;
|
|
|
|
if (bp) {
|
|
struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
|
|
|
|
t->io_bitmap_ptr = NULL;
|
|
clear_thread_flag(TIF_IO_BITMAP);
|
|
/*
|
|
* Careful, clear this in the TSS too:
|
|
*/
|
|
memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
|
|
t->io_bitmap_max = 0;
|
|
put_cpu();
|
|
kfree(bp);
|
|
}
|
|
|
|
free_vm86(t);
|
|
|
|
fpu__drop(fpu);
|
|
}
|
|
|
|
void flush_thread(void)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
flush_ptrace_hw_breakpoint(tsk);
|
|
memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
|
|
|
|
fpu__clear(&tsk->thread.fpu);
|
|
}
|
|
|
|
static void hard_disable_TSC(void)
|
|
{
|
|
cr4_set_bits(X86_CR4_TSD);
|
|
}
|
|
|
|
void disable_TSC(void)
|
|
{
|
|
preempt_disable();
|
|
if (!test_and_set_thread_flag(TIF_NOTSC))
|
|
/*
|
|
* Must flip the CPU state synchronously with
|
|
* TIF_NOTSC in the current running context.
|
|
*/
|
|
hard_disable_TSC();
|
|
preempt_enable();
|
|
}
|
|
|
|
static void hard_enable_TSC(void)
|
|
{
|
|
cr4_clear_bits(X86_CR4_TSD);
|
|
}
|
|
|
|
static void enable_TSC(void)
|
|
{
|
|
preempt_disable();
|
|
if (test_and_clear_thread_flag(TIF_NOTSC))
|
|
/*
|
|
* Must flip the CPU state synchronously with
|
|
* TIF_NOTSC in the current running context.
|
|
*/
|
|
hard_enable_TSC();
|
|
preempt_enable();
|
|
}
|
|
|
|
int get_tsc_mode(unsigned long adr)
|
|
{
|
|
unsigned int val;
|
|
|
|
if (test_thread_flag(TIF_NOTSC))
|
|
val = PR_TSC_SIGSEGV;
|
|
else
|
|
val = PR_TSC_ENABLE;
|
|
|
|
return put_user(val, (unsigned int __user *)adr);
|
|
}
|
|
|
|
int set_tsc_mode(unsigned int val)
|
|
{
|
|
if (val == PR_TSC_SIGSEGV)
|
|
disable_TSC();
|
|
else if (val == PR_TSC_ENABLE)
|
|
enable_TSC();
|
|
else
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
|
|
struct tss_struct *tss)
|
|
{
|
|
struct thread_struct *prev, *next;
|
|
|
|
prev = &prev_p->thread;
|
|
next = &next_p->thread;
|
|
|
|
if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
|
|
test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
|
|
unsigned long debugctl = get_debugctlmsr();
|
|
|
|
debugctl &= ~DEBUGCTLMSR_BTF;
|
|
if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
|
|
debugctl |= DEBUGCTLMSR_BTF;
|
|
|
|
update_debugctlmsr(debugctl);
|
|
}
|
|
|
|
if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
|
|
test_tsk_thread_flag(next_p, TIF_NOTSC)) {
|
|
/* prev and next are different */
|
|
if (test_tsk_thread_flag(next_p, TIF_NOTSC))
|
|
hard_disable_TSC();
|
|
else
|
|
hard_enable_TSC();
|
|
}
|
|
|
|
if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
|
|
/*
|
|
* Copy the relevant range of the IO bitmap.
|
|
* Normally this is 128 bytes or less:
|
|
*/
|
|
memcpy(tss->io_bitmap, next->io_bitmap_ptr,
|
|
max(prev->io_bitmap_max, next->io_bitmap_max));
|
|
} else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
|
|
/*
|
|
* Clear any possible leftover bits:
|
|
*/
|
|
memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
|
|
}
|
|
propagate_user_return_notify(prev_p, next_p);
|
|
}
|
|
|
|
/*
|
|
* Idle related variables and functions
|
|
*/
|
|
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
|
|
EXPORT_SYMBOL(boot_option_idle_override);
|
|
|
|
static void (*x86_idle)(void);
|
|
|
|
#ifndef CONFIG_SMP
|
|
static inline void play_dead(void)
|
|
{
|
|
BUG();
|
|
}
|
|
#endif
|
|
|
|
void arch_cpu_idle_enter(void)
|
|
{
|
|
tsc_verify_tsc_adjust(false);
|
|
local_touch_nmi();
|
|
}
|
|
|
|
void arch_cpu_idle_dead(void)
|
|
{
|
|
play_dead();
|
|
}
|
|
|
|
/*
|
|
* Called from the generic idle code.
|
|
*/
|
|
void arch_cpu_idle(void)
|
|
{
|
|
x86_idle();
|
|
}
|
|
|
|
/*
|
|
* We use this if we don't have any better idle routine..
|
|
*/
|
|
void __cpuidle default_idle(void)
|
|
{
|
|
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
|
safe_halt();
|
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
|
}
|
|
#ifdef CONFIG_APM_MODULE
|
|
EXPORT_SYMBOL(default_idle);
|
|
#endif
|
|
|
|
#ifdef CONFIG_XEN
|
|
bool xen_set_default_idle(void)
|
|
{
|
|
bool ret = !!x86_idle;
|
|
|
|
x86_idle = default_idle;
|
|
|
|
return ret;
|
|
}
|
|
#endif
|
|
void stop_this_cpu(void *dummy)
|
|
{
|
|
local_irq_disable();
|
|
/*
|
|
* Remove this CPU:
|
|
*/
|
|
set_cpu_online(smp_processor_id(), false);
|
|
disable_local_APIC();
|
|
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
|
|
|
|
for (;;)
|
|
halt();
|
|
}
|
|
|
|
/*
|
|
* AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
|
|
* states (local apic timer and TSC stop).
|
|
*/
|
|
static void amd_e400_idle(void)
|
|
{
|
|
/*
|
|
* We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
|
|
* gets set after static_cpu_has() places have been converted via
|
|
* alternatives.
|
|
*/
|
|
if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
|
|
default_idle();
|
|
return;
|
|
}
|
|
|
|
tick_broadcast_enter();
|
|
|
|
default_idle();
|
|
|
|
/*
|
|
* The switch back from broadcast mode needs to be called with
|
|
* interrupts disabled.
|
|
*/
|
|
local_irq_disable();
|
|
tick_broadcast_exit();
|
|
local_irq_enable();
|
|
}
|
|
|
|
/*
|
|
* Intel Core2 and older machines prefer MWAIT over HALT for C1.
|
|
* We can't rely on cpuidle installing MWAIT, because it will not load
|
|
* on systems that support only C1 -- so the boot default must be MWAIT.
|
|
*
|
|
* Some AMD machines are the opposite, they depend on using HALT.
|
|
*
|
|
* So for default C1, which is used during boot until cpuidle loads,
|
|
* use MWAIT-C1 on Intel HW that has it, else use HALT.
|
|
*/
|
|
static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
|
{
|
|
if (c->x86_vendor != X86_VENDOR_INTEL)
|
|
return 0;
|
|
|
|
if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
|
|
return 0;
|
|
|
|
return 1;
|
|
}
|
|
|
|
/*
|
|
* MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
|
|
* with interrupts enabled and no flags, which is backwards compatible with the
|
|
* original MWAIT implementation.
|
|
*/
|
|
static __cpuidle void mwait_idle(void)
|
|
{
|
|
if (!current_set_polling_and_test()) {
|
|
trace_cpu_idle_rcuidle(1, smp_processor_id());
|
|
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
|
|
mb(); /* quirk */
|
|
clflush((void *)¤t_thread_info()->flags);
|
|
mb(); /* quirk */
|
|
}
|
|
|
|
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
|
if (!need_resched())
|
|
__sti_mwait(0, 0);
|
|
else
|
|
local_irq_enable();
|
|
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
|
|
} else {
|
|
local_irq_enable();
|
|
}
|
|
__current_clr_polling();
|
|
}
|
|
|
|
void select_idle_routine(const struct cpuinfo_x86 *c)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
|
|
pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
|
|
#endif
|
|
if (x86_idle || boot_option_idle_override == IDLE_POLL)
|
|
return;
|
|
|
|
if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
|
|
pr_info("using AMD E400 aware idle routine\n");
|
|
x86_idle = amd_e400_idle;
|
|
} else if (prefer_mwait_c1_over_halt(c)) {
|
|
pr_info("using mwait in idle threads\n");
|
|
x86_idle = mwait_idle;
|
|
} else
|
|
x86_idle = default_idle;
|
|
}
|
|
|
|
void amd_e400_c1e_apic_setup(void)
|
|
{
|
|
if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
|
|
pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
|
|
local_irq_disable();
|
|
tick_broadcast_force();
|
|
local_irq_enable();
|
|
}
|
|
}
|
|
|
|
void __init arch_post_acpi_subsys_init(void)
|
|
{
|
|
u32 lo, hi;
|
|
|
|
if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
|
|
return;
|
|
|
|
/*
|
|
* AMD E400 detection needs to happen after ACPI has been enabled. If
|
|
* the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
|
|
* MSR_K8_INT_PENDING_MSG.
|
|
*/
|
|
rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
|
|
if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
|
|
return;
|
|
|
|
boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
|
mark_tsc_unstable("TSC halt in AMD C1E");
|
|
pr_info("System has AMD C1E enabled\n");
|
|
}
|
|
|
|
static int __init idle_setup(char *str)
|
|
{
|
|
if (!str)
|
|
return -EINVAL;
|
|
|
|
if (!strcmp(str, "poll")) {
|
|
pr_info("using polling idle threads\n");
|
|
boot_option_idle_override = IDLE_POLL;
|
|
cpu_idle_poll_ctrl(true);
|
|
} else if (!strcmp(str, "halt")) {
|
|
/*
|
|
* When the boot option of idle=halt is added, halt is
|
|
* forced to be used for CPU idle. In such case CPU C2/C3
|
|
* won't be used again.
|
|
* To continue to load the CPU idle driver, don't touch
|
|
* the boot_option_idle_override.
|
|
*/
|
|
x86_idle = default_idle;
|
|
boot_option_idle_override = IDLE_HALT;
|
|
} else if (!strcmp(str, "nomwait")) {
|
|
/*
|
|
* If the boot option of "idle=nomwait" is added,
|
|
* it means that mwait will be disabled for CPU C2/C3
|
|
* states. In such case it won't touch the variable
|
|
* of boot_option_idle_override.
|
|
*/
|
|
boot_option_idle_override = IDLE_NOMWAIT;
|
|
} else
|
|
return -1;
|
|
|
|
return 0;
|
|
}
|
|
early_param("idle", idle_setup);
|
|
|
|
unsigned long arch_align_stack(unsigned long sp)
|
|
{
|
|
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
|
|
sp -= get_random_int() % 8192;
|
|
return sp & ~0xf;
|
|
}
|
|
|
|
unsigned long arch_randomize_brk(struct mm_struct *mm)
|
|
{
|
|
return randomize_page(mm->brk, 0x02000000);
|
|
}
|
|
|
|
/*
|
|
* Return saved PC of a blocked thread.
|
|
* What is this good for? it will be always the scheduler or ret_from_fork.
|
|
*/
|
|
unsigned long thread_saved_pc(struct task_struct *tsk)
|
|
{
|
|
struct inactive_task_frame *frame =
|
|
(struct inactive_task_frame *) READ_ONCE(tsk->thread.sp);
|
|
return READ_ONCE_NOCHECK(frame->ret_addr);
|
|
}
|
|
|
|
/*
|
|
* Called from fs/proc with a reference on @p to find the function
|
|
* which called into schedule(). This needs to be done carefully
|
|
* because the task might wake up and we might look at a stack
|
|
* changing under us.
|
|
*/
|
|
unsigned long get_wchan(struct task_struct *p)
|
|
{
|
|
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
|
|
int count = 0;
|
|
|
|
if (!p || p == current || p->state == TASK_RUNNING)
|
|
return 0;
|
|
|
|
if (!try_get_task_stack(p))
|
|
return 0;
|
|
|
|
start = (unsigned long)task_stack_page(p);
|
|
if (!start)
|
|
goto out;
|
|
|
|
/*
|
|
* Layout of the stack page:
|
|
*
|
|
* ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
|
|
* PADDING
|
|
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
|
|
* stack
|
|
* ----------- bottom = start
|
|
*
|
|
* The tasks stack pointer points at the location where the
|
|
* framepointer is stored. The data on the stack is:
|
|
* ... IP FP ... IP FP
|
|
*
|
|
* We need to read FP and IP, so we need to adjust the upper
|
|
* bound by another unsigned long.
|
|
*/
|
|
top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
|
|
top -= 2 * sizeof(unsigned long);
|
|
bottom = start;
|
|
|
|
sp = READ_ONCE(p->thread.sp);
|
|
if (sp < bottom || sp > top)
|
|
goto out;
|
|
|
|
fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
|
|
do {
|
|
if (fp < bottom || fp > top)
|
|
goto out;
|
|
ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
|
|
if (!in_sched_functions(ip)) {
|
|
ret = ip;
|
|
goto out;
|
|
}
|
|
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
|
|
} while (count++ < 16 && p->state != TASK_RUNNING);
|
|
|
|
out:
|
|
put_task_stack(p);
|
|
return ret;
|
|
}
|