mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-20 10:44:23 +08:00
4e241557fc
for silicon that no one owns: these are really new features for everyone. * ARM: several features are in progress but missed the 4.2 deadline. So here is just a smattering of bug fixes, plus enabling the VFIO integration. * s390: Some fixes/refactorings/optimizations, plus support for 2GB pages. * x86: 1) host and guest support for marking kvmclock as a stable scheduler clock. 2) support for write combining. 3) support for system management mode, needed for secure boot in guests. 4) a bunch of cleanups required for 2+3. 5) support for virtualized performance counters on AMD; 6) legacy PCI device assignment is deprecated and defaults to "n" in Kconfig; VFIO replaces it. On top of this there are also bug fixes and eager FPU context loading for FPU-heavy guests. * Common code: Support for multiple address spaces; for now it is used only for x86 SMM but the s390 folks also have plans. There are some x86 conflicts, one with the rc8 pull request and the rest with Ingo's FPU rework. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJViYzhAAoJEL/70l94x66Dda0H/1IepMbfEy+o849d5G71fNTs F8Y8qUP2GZuL7T53FyFUGSBw+AX7kimu9ia4gR/PmDK+QYsdosYeEjwlsolZfTBf sHuzNtPoJhi5o1o/ur4NGameo0WjGK8f1xyzr+U8z74QDQyQv/QYCdK/4isp4BJL ugHNHkuROX6Zng4i7jc9rfaSRg29I3GBxQUYpMkEnD3eMYMUBWGm6Rs8pHgGAMvL vqzntgW00WNxehTqcAkmD/Wv+txxhkvIadZnjgaxH49e9JeXeBKTIR5vtb7Hns3s SuapZUyw+c95DIipXq4EznxxaOrjbebOeFgLCJo8+XMXZum8RZf/ob24KroYad0= =YsAR -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull first batch of KVM updates from Paolo Bonzini: "The bulk of the changes here is for x86. And for once it's not for silicon that no one owns: these are really new features for everyone. Details: - ARM: several features are in progress but missed the 4.2 deadline. So here is just a smattering of bug fixes, plus enabling the VFIO integration. - s390: Some fixes/refactorings/optimizations, plus support for 2GB pages. - x86: * host and guest support for marking kvmclock as a stable scheduler clock. * support for write combining. * support for system management mode, needed for secure boot in guests. * a bunch of cleanups required for the above * support for virtualized performance counters on AMD * legacy PCI device assignment is deprecated and defaults to "n" in Kconfig; VFIO replaces it On top of this there are also bug fixes and eager FPU context loading for FPU-heavy guests. - Common code: Support for multiple address spaces; for now it is used only for x86 SMM but the s390 folks also have plans" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (124 commits) KVM: s390: clear floating interrupt bitmap and parameters KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs KVM: x86/vPMU: Implement AMD vPMU code for KVM KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch KVM: x86/vPMU: introduce kvm_pmu_msr_idx_to_pmc KVM: x86/vPMU: reorder PMU functions KVM: x86/vPMU: whitespace and stylistic adjustments in PMU code KVM: x86/vPMU: use the new macros to go between PMC, PMU and VCPU KVM: x86/vPMU: introduce pmu.h header KVM: x86/vPMU: rename a few PMU functions KVM: MTRR: do not map huge page for non-consistent range KVM: MTRR: simplify kvm_mtrr_get_guest_memory_type KVM: MTRR: introduce mtrr_for_each_mem_type KVM: MTRR: introduce fixed_mtrr_addr_* functions KVM: MTRR: sort variable MTRRs KVM: MTRR: introduce var_mtrr_range KVM: MTRR: introduce fixed_mtrr_segment table KVM: MTRR: improve kvm_mtrr_get_guest_memory_type KVM: MTRR: do not split 64 bits MSR content KVM: MTRR: clean up mtrr default type ...
893 lines
20 KiB
C
893 lines
20 KiB
C
/*
|
|
* KVM paravirt_ops implementation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
*
|
|
* Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
* Copyright IBM Corporation, 2007
|
|
* Authors: Anthony Liguori <aliguori@us.ibm.com>
|
|
*/
|
|
|
|
#include <linux/context_tracking.h>
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/kvm_para.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/hardirq.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/reboot.h>
|
|
#include <linux/hash.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/debugfs.h>
|
|
#include <linux/nmi.h>
|
|
#include <asm/timer.h>
|
|
#include <asm/cpu.h>
|
|
#include <asm/traps.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/idle.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/apicdef.h>
|
|
#include <asm/hypervisor.h>
|
|
#include <asm/kvm_guest.h>
|
|
|
|
static int kvmapf = 1;
|
|
|
|
static int parse_no_kvmapf(char *arg)
|
|
{
|
|
kvmapf = 0;
|
|
return 0;
|
|
}
|
|
|
|
early_param("no-kvmapf", parse_no_kvmapf);
|
|
|
|
static int steal_acc = 1;
|
|
static int parse_no_stealacc(char *arg)
|
|
{
|
|
steal_acc = 0;
|
|
return 0;
|
|
}
|
|
|
|
early_param("no-steal-acc", parse_no_stealacc);
|
|
|
|
static int kvmclock_vsyscall = 1;
|
|
static int parse_no_kvmclock_vsyscall(char *arg)
|
|
{
|
|
kvmclock_vsyscall = 0;
|
|
return 0;
|
|
}
|
|
|
|
early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
|
|
|
|
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
|
|
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
|
|
static int has_steal_clock = 0;
|
|
|
|
/*
|
|
* No need for any "IO delay" on KVM
|
|
*/
|
|
static void kvm_io_delay(void)
|
|
{
|
|
}
|
|
|
|
#define KVM_TASK_SLEEP_HASHBITS 8
|
|
#define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
|
|
|
|
struct kvm_task_sleep_node {
|
|
struct hlist_node link;
|
|
wait_queue_head_t wq;
|
|
u32 token;
|
|
int cpu;
|
|
bool halted;
|
|
};
|
|
|
|
static struct kvm_task_sleep_head {
|
|
spinlock_t lock;
|
|
struct hlist_head list;
|
|
} async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
|
|
|
|
static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
|
|
u32 token)
|
|
{
|
|
struct hlist_node *p;
|
|
|
|
hlist_for_each(p, &b->list) {
|
|
struct kvm_task_sleep_node *n =
|
|
hlist_entry(p, typeof(*n), link);
|
|
if (n->token == token)
|
|
return n;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
void kvm_async_pf_task_wait(u32 token)
|
|
{
|
|
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
|
|
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
|
|
struct kvm_task_sleep_node n, *e;
|
|
DEFINE_WAIT(wait);
|
|
|
|
rcu_irq_enter();
|
|
|
|
spin_lock(&b->lock);
|
|
e = _find_apf_task(b, token);
|
|
if (e) {
|
|
/* dummy entry exist -> wake up was delivered ahead of PF */
|
|
hlist_del(&e->link);
|
|
kfree(e);
|
|
spin_unlock(&b->lock);
|
|
|
|
rcu_irq_exit();
|
|
return;
|
|
}
|
|
|
|
n.token = token;
|
|
n.cpu = smp_processor_id();
|
|
n.halted = is_idle_task(current) || preempt_count() > 1;
|
|
init_waitqueue_head(&n.wq);
|
|
hlist_add_head(&n.link, &b->list);
|
|
spin_unlock(&b->lock);
|
|
|
|
for (;;) {
|
|
if (!n.halted)
|
|
prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
|
|
if (hlist_unhashed(&n.link))
|
|
break;
|
|
|
|
if (!n.halted) {
|
|
local_irq_enable();
|
|
schedule();
|
|
local_irq_disable();
|
|
} else {
|
|
/*
|
|
* We cannot reschedule. So halt.
|
|
*/
|
|
rcu_irq_exit();
|
|
native_safe_halt();
|
|
rcu_irq_enter();
|
|
local_irq_disable();
|
|
}
|
|
}
|
|
if (!n.halted)
|
|
finish_wait(&n.wq, &wait);
|
|
|
|
rcu_irq_exit();
|
|
return;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
|
|
|
|
static void apf_task_wake_one(struct kvm_task_sleep_node *n)
|
|
{
|
|
hlist_del_init(&n->link);
|
|
if (n->halted)
|
|
smp_send_reschedule(n->cpu);
|
|
else if (waitqueue_active(&n->wq))
|
|
wake_up(&n->wq);
|
|
}
|
|
|
|
static void apf_task_wake_all(void)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
|
|
struct hlist_node *p, *next;
|
|
struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
|
|
spin_lock(&b->lock);
|
|
hlist_for_each_safe(p, next, &b->list) {
|
|
struct kvm_task_sleep_node *n =
|
|
hlist_entry(p, typeof(*n), link);
|
|
if (n->cpu == smp_processor_id())
|
|
apf_task_wake_one(n);
|
|
}
|
|
spin_unlock(&b->lock);
|
|
}
|
|
}
|
|
|
|
void kvm_async_pf_task_wake(u32 token)
|
|
{
|
|
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
|
|
struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
|
|
struct kvm_task_sleep_node *n;
|
|
|
|
if (token == ~0) {
|
|
apf_task_wake_all();
|
|
return;
|
|
}
|
|
|
|
again:
|
|
spin_lock(&b->lock);
|
|
n = _find_apf_task(b, token);
|
|
if (!n) {
|
|
/*
|
|
* async PF was not yet handled.
|
|
* Add dummy entry for the token.
|
|
*/
|
|
n = kzalloc(sizeof(*n), GFP_ATOMIC);
|
|
if (!n) {
|
|
/*
|
|
* Allocation failed! Busy wait while other cpu
|
|
* handles async PF.
|
|
*/
|
|
spin_unlock(&b->lock);
|
|
cpu_relax();
|
|
goto again;
|
|
}
|
|
n->token = token;
|
|
n->cpu = smp_processor_id();
|
|
init_waitqueue_head(&n->wq);
|
|
hlist_add_head(&n->link, &b->list);
|
|
} else
|
|
apf_task_wake_one(n);
|
|
spin_unlock(&b->lock);
|
|
return;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
|
|
|
|
u32 kvm_read_and_reset_pf_reason(void)
|
|
{
|
|
u32 reason = 0;
|
|
|
|
if (__this_cpu_read(apf_reason.enabled)) {
|
|
reason = __this_cpu_read(apf_reason.reason);
|
|
__this_cpu_write(apf_reason.reason, 0);
|
|
}
|
|
|
|
return reason;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
|
|
NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
|
|
|
|
dotraplinkage void
|
|
do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
{
|
|
enum ctx_state prev_state;
|
|
|
|
switch (kvm_read_and_reset_pf_reason()) {
|
|
default:
|
|
trace_do_page_fault(regs, error_code);
|
|
break;
|
|
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
|
/* page is swapped out by the host. */
|
|
prev_state = exception_enter();
|
|
exit_idle();
|
|
kvm_async_pf_task_wait((u32)read_cr2());
|
|
exception_exit(prev_state);
|
|
break;
|
|
case KVM_PV_REASON_PAGE_READY:
|
|
rcu_irq_enter();
|
|
exit_idle();
|
|
kvm_async_pf_task_wake((u32)read_cr2());
|
|
rcu_irq_exit();
|
|
break;
|
|
}
|
|
}
|
|
NOKPROBE_SYMBOL(do_async_page_fault);
|
|
|
|
static void __init paravirt_ops_setup(void)
|
|
{
|
|
pv_info.name = "KVM";
|
|
|
|
/*
|
|
* KVM isn't paravirt in the sense of paravirt_enabled. A KVM
|
|
* guest kernel works like a bare metal kernel with additional
|
|
* features, and paravirt_enabled is about features that are
|
|
* missing.
|
|
*/
|
|
pv_info.paravirt_enabled = 0;
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
|
|
pv_cpu_ops.io_delay = kvm_io_delay;
|
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
no_timer_check = 1;
|
|
#endif
|
|
}
|
|
|
|
static void kvm_register_steal_time(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
|
|
|
|
if (!has_steal_clock)
|
|
return;
|
|
|
|
memset(st, 0, sizeof(*st));
|
|
|
|
wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
|
|
pr_info("kvm-stealtime: cpu %d, msr %llx\n",
|
|
cpu, (unsigned long long) slow_virt_to_phys(st));
|
|
}
|
|
|
|
static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
|
|
|
|
static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
|
|
{
|
|
/**
|
|
* This relies on __test_and_clear_bit to modify the memory
|
|
* in a way that is atomic with respect to the local CPU.
|
|
* The hypervisor only accesses this memory from the local CPU so
|
|
* there's no need for lock or memory barriers.
|
|
* An optimization barrier is implied in apic write.
|
|
*/
|
|
if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
|
|
return;
|
|
apic_write(APIC_EOI, APIC_EOI_ACK);
|
|
}
|
|
|
|
static void kvm_guest_cpu_init(void)
|
|
{
|
|
if (!kvm_para_available())
|
|
return;
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
|
|
u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
|
|
|
|
#ifdef CONFIG_PREEMPT
|
|
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
|
|
#endif
|
|
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
|
|
__this_cpu_write(apf_reason.enabled, 1);
|
|
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
|
|
smp_processor_id());
|
|
}
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
|
|
unsigned long pa;
|
|
/* Size alignment is implied but just to make it explicit. */
|
|
BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
|
|
__this_cpu_write(kvm_apic_eoi, 0);
|
|
pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
|
|
| KVM_MSR_ENABLED;
|
|
wrmsrl(MSR_KVM_PV_EOI_EN, pa);
|
|
}
|
|
|
|
if (has_steal_clock)
|
|
kvm_register_steal_time();
|
|
}
|
|
|
|
static void kvm_pv_disable_apf(void)
|
|
{
|
|
if (!__this_cpu_read(apf_reason.enabled))
|
|
return;
|
|
|
|
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
|
|
__this_cpu_write(apf_reason.enabled, 0);
|
|
|
|
printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
|
|
smp_processor_id());
|
|
}
|
|
|
|
static void kvm_pv_guest_cpu_reboot(void *unused)
|
|
{
|
|
/*
|
|
* We disable PV EOI before we load a new kernel by kexec,
|
|
* since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
|
|
* New kernel can re-enable when it boots.
|
|
*/
|
|
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
|
wrmsrl(MSR_KVM_PV_EOI_EN, 0);
|
|
kvm_pv_disable_apf();
|
|
kvm_disable_steal_time();
|
|
}
|
|
|
|
static int kvm_pv_reboot_notify(struct notifier_block *nb,
|
|
unsigned long code, void *unused)
|
|
{
|
|
if (code == SYS_RESTART)
|
|
on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static struct notifier_block kvm_pv_reboot_nb = {
|
|
.notifier_call = kvm_pv_reboot_notify,
|
|
};
|
|
|
|
static u64 kvm_steal_clock(int cpu)
|
|
{
|
|
u64 steal;
|
|
struct kvm_steal_time *src;
|
|
int version;
|
|
|
|
src = &per_cpu(steal_time, cpu);
|
|
do {
|
|
version = src->version;
|
|
rmb();
|
|
steal = src->steal;
|
|
rmb();
|
|
} while ((version & 1) || (version != src->version));
|
|
|
|
return steal;
|
|
}
|
|
|
|
void kvm_disable_steal_time(void)
|
|
{
|
|
if (!has_steal_clock)
|
|
return;
|
|
|
|
wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
|
|
}
|
|
|
|
#ifdef CONFIG_SMP
|
|
static void __init kvm_smp_prepare_boot_cpu(void)
|
|
{
|
|
kvm_guest_cpu_init();
|
|
native_smp_prepare_boot_cpu();
|
|
kvm_spinlock_init();
|
|
}
|
|
|
|
static void kvm_guest_cpu_online(void *dummy)
|
|
{
|
|
kvm_guest_cpu_init();
|
|
}
|
|
|
|
static void kvm_guest_cpu_offline(void *dummy)
|
|
{
|
|
kvm_disable_steal_time();
|
|
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
|
wrmsrl(MSR_KVM_PV_EOI_EN, 0);
|
|
kvm_pv_disable_apf();
|
|
apf_task_wake_all();
|
|
}
|
|
|
|
static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
|
|
void *hcpu)
|
|
{
|
|
int cpu = (unsigned long)hcpu;
|
|
switch (action) {
|
|
case CPU_ONLINE:
|
|
case CPU_DOWN_FAILED:
|
|
case CPU_ONLINE_FROZEN:
|
|
smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
|
|
break;
|
|
case CPU_DOWN_PREPARE:
|
|
case CPU_DOWN_PREPARE_FROZEN:
|
|
smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
return NOTIFY_OK;
|
|
}
|
|
|
|
static struct notifier_block kvm_cpu_notifier = {
|
|
.notifier_call = kvm_cpu_notify,
|
|
};
|
|
#endif
|
|
|
|
static void __init kvm_apf_trap_init(void)
|
|
{
|
|
set_intr_gate(14, async_page_fault);
|
|
}
|
|
|
|
void __init kvm_guest_init(void)
|
|
{
|
|
int i;
|
|
|
|
if (!kvm_para_available())
|
|
return;
|
|
|
|
paravirt_ops_setup();
|
|
register_reboot_notifier(&kvm_pv_reboot_nb);
|
|
for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
|
|
spin_lock_init(&async_pf_sleepers[i].lock);
|
|
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
|
|
x86_init.irqs.trap_init = kvm_apf_trap_init;
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
|
|
has_steal_clock = 1;
|
|
pv_time_ops.steal_clock = kvm_steal_clock;
|
|
}
|
|
|
|
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
|
|
apic_set_eoi_write(kvm_guest_apic_eoi_write);
|
|
|
|
if (kvmclock_vsyscall)
|
|
kvm_setup_vsyscall_timeinfo();
|
|
|
|
#ifdef CONFIG_SMP
|
|
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
|
|
register_cpu_notifier(&kvm_cpu_notifier);
|
|
#else
|
|
kvm_guest_cpu_init();
|
|
#endif
|
|
|
|
/*
|
|
* Hard lockup detection is enabled by default. Disable it, as guests
|
|
* can get false positives too easily, for example if the host is
|
|
* overcommitted.
|
|
*/
|
|
hardlockup_detector_disable();
|
|
}
|
|
|
|
static noinline uint32_t __kvm_cpuid_base(void)
|
|
{
|
|
if (boot_cpu_data.cpuid_level < 0)
|
|
return 0; /* So we don't blow up on old processors */
|
|
|
|
if (cpu_has_hypervisor)
|
|
return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline uint32_t kvm_cpuid_base(void)
|
|
{
|
|
static int kvm_cpuid_base = -1;
|
|
|
|
if (kvm_cpuid_base == -1)
|
|
kvm_cpuid_base = __kvm_cpuid_base();
|
|
|
|
return kvm_cpuid_base;
|
|
}
|
|
|
|
bool kvm_para_available(void)
|
|
{
|
|
return kvm_cpuid_base() != 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(kvm_para_available);
|
|
|
|
unsigned int kvm_arch_para_features(void)
|
|
{
|
|
return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
|
|
}
|
|
|
|
static uint32_t __init kvm_detect(void)
|
|
{
|
|
return kvm_cpuid_base();
|
|
}
|
|
|
|
const struct hypervisor_x86 x86_hyper_kvm __refconst = {
|
|
.name = "KVM",
|
|
.detect = kvm_detect,
|
|
.x2apic_available = kvm_para_available,
|
|
};
|
|
EXPORT_SYMBOL_GPL(x86_hyper_kvm);
|
|
|
|
static __init int activate_jump_labels(void)
|
|
{
|
|
if (has_steal_clock) {
|
|
static_key_slow_inc(¶virt_steal_enabled);
|
|
if (steal_acc)
|
|
static_key_slow_inc(¶virt_steal_rq_enabled);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
arch_initcall(activate_jump_labels);
|
|
|
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
|
|
/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
|
|
static void kvm_kick_cpu(int cpu)
|
|
{
|
|
int apicid;
|
|
unsigned long flags = 0;
|
|
|
|
apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
|
kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_QUEUED_SPINLOCKS
|
|
|
|
#include <asm/qspinlock.h>
|
|
|
|
static void kvm_wait(u8 *ptr, u8 val)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (in_nmi())
|
|
return;
|
|
|
|
local_irq_save(flags);
|
|
|
|
if (READ_ONCE(*ptr) != val)
|
|
goto out;
|
|
|
|
/*
|
|
* halt until it's our turn and kicked. Note that we do safe halt
|
|
* for irq enabled case to avoid hang when lock info is overwritten
|
|
* in irq spinlock slowpath and no spurious interrupt occur to save us.
|
|
*/
|
|
if (arch_irqs_disabled_flags(flags))
|
|
halt();
|
|
else
|
|
safe_halt();
|
|
|
|
out:
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
#else /* !CONFIG_QUEUED_SPINLOCKS */
|
|
|
|
enum kvm_contention_stat {
|
|
TAKEN_SLOW,
|
|
TAKEN_SLOW_PICKUP,
|
|
RELEASED_SLOW,
|
|
RELEASED_SLOW_KICKED,
|
|
NR_CONTENTION_STATS
|
|
};
|
|
|
|
#ifdef CONFIG_KVM_DEBUG_FS
|
|
#define HISTO_BUCKETS 30
|
|
|
|
static struct kvm_spinlock_stats
|
|
{
|
|
u32 contention_stats[NR_CONTENTION_STATS];
|
|
u32 histo_spin_blocked[HISTO_BUCKETS+1];
|
|
u64 time_blocked;
|
|
} spinlock_stats;
|
|
|
|
static u8 zero_stats;
|
|
|
|
static inline void check_zero(void)
|
|
{
|
|
u8 ret;
|
|
u8 old;
|
|
|
|
old = READ_ONCE(zero_stats);
|
|
if (unlikely(old)) {
|
|
ret = cmpxchg(&zero_stats, old, 0);
|
|
/* This ensures only one fellow resets the stat */
|
|
if (ret == old)
|
|
memset(&spinlock_stats, 0, sizeof(spinlock_stats));
|
|
}
|
|
}
|
|
|
|
static inline void add_stats(enum kvm_contention_stat var, u32 val)
|
|
{
|
|
check_zero();
|
|
spinlock_stats.contention_stats[var] += val;
|
|
}
|
|
|
|
|
|
static inline u64 spin_time_start(void)
|
|
{
|
|
return sched_clock();
|
|
}
|
|
|
|
static void __spin_time_accum(u64 delta, u32 *array)
|
|
{
|
|
unsigned index;
|
|
|
|
index = ilog2(delta);
|
|
check_zero();
|
|
|
|
if (index < HISTO_BUCKETS)
|
|
array[index]++;
|
|
else
|
|
array[HISTO_BUCKETS]++;
|
|
}
|
|
|
|
static inline void spin_time_accum_blocked(u64 start)
|
|
{
|
|
u32 delta;
|
|
|
|
delta = sched_clock() - start;
|
|
__spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
|
|
spinlock_stats.time_blocked += delta;
|
|
}
|
|
|
|
static struct dentry *d_spin_debug;
|
|
static struct dentry *d_kvm_debug;
|
|
|
|
static struct dentry *kvm_init_debugfs(void)
|
|
{
|
|
d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
|
|
if (!d_kvm_debug)
|
|
printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
|
|
|
|
return d_kvm_debug;
|
|
}
|
|
|
|
static int __init kvm_spinlock_debugfs(void)
|
|
{
|
|
struct dentry *d_kvm;
|
|
|
|
d_kvm = kvm_init_debugfs();
|
|
if (d_kvm == NULL)
|
|
return -ENOMEM;
|
|
|
|
d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
|
|
|
|
debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
|
|
|
|
debugfs_create_u32("taken_slow", 0444, d_spin_debug,
|
|
&spinlock_stats.contention_stats[TAKEN_SLOW]);
|
|
debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
|
|
&spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
|
|
|
|
debugfs_create_u32("released_slow", 0444, d_spin_debug,
|
|
&spinlock_stats.contention_stats[RELEASED_SLOW]);
|
|
debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
|
|
&spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
|
|
|
|
debugfs_create_u64("time_blocked", 0444, d_spin_debug,
|
|
&spinlock_stats.time_blocked);
|
|
|
|
debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
|
|
spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
|
|
|
|
return 0;
|
|
}
|
|
fs_initcall(kvm_spinlock_debugfs);
|
|
#else /* !CONFIG_KVM_DEBUG_FS */
|
|
static inline void add_stats(enum kvm_contention_stat var, u32 val)
|
|
{
|
|
}
|
|
|
|
static inline u64 spin_time_start(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void spin_time_accum_blocked(u64 start)
|
|
{
|
|
}
|
|
#endif /* CONFIG_KVM_DEBUG_FS */
|
|
|
|
struct kvm_lock_waiting {
|
|
struct arch_spinlock *lock;
|
|
__ticket_t want;
|
|
};
|
|
|
|
/* cpus 'waiting' on a spinlock to become available */
|
|
static cpumask_t waiting_cpus;
|
|
|
|
/* Track spinlock on which a cpu is waiting */
|
|
static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
|
|
|
|
__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
|
|
{
|
|
struct kvm_lock_waiting *w;
|
|
int cpu;
|
|
u64 start;
|
|
unsigned long flags;
|
|
__ticket_t head;
|
|
|
|
if (in_nmi())
|
|
return;
|
|
|
|
w = this_cpu_ptr(&klock_waiting);
|
|
cpu = smp_processor_id();
|
|
start = spin_time_start();
|
|
|
|
/*
|
|
* Make sure an interrupt handler can't upset things in a
|
|
* partially setup state.
|
|
*/
|
|
local_irq_save(flags);
|
|
|
|
/*
|
|
* The ordering protocol on this is that the "lock" pointer
|
|
* may only be set non-NULL if the "want" ticket is correct.
|
|
* If we're updating "want", we must first clear "lock".
|
|
*/
|
|
w->lock = NULL;
|
|
smp_wmb();
|
|
w->want = want;
|
|
smp_wmb();
|
|
w->lock = lock;
|
|
|
|
add_stats(TAKEN_SLOW, 1);
|
|
|
|
/*
|
|
* This uses set_bit, which is atomic but we should not rely on its
|
|
* reordering gurantees. So barrier is needed after this call.
|
|
*/
|
|
cpumask_set_cpu(cpu, &waiting_cpus);
|
|
|
|
barrier();
|
|
|
|
/*
|
|
* Mark entry to slowpath before doing the pickup test to make
|
|
* sure we don't deadlock with an unlocker.
|
|
*/
|
|
__ticket_enter_slowpath(lock);
|
|
|
|
/* make sure enter_slowpath, which is atomic does not cross the read */
|
|
smp_mb__after_atomic();
|
|
|
|
/*
|
|
* check again make sure it didn't become free while
|
|
* we weren't looking.
|
|
*/
|
|
head = READ_ONCE(lock->tickets.head);
|
|
if (__tickets_equal(head, want)) {
|
|
add_stats(TAKEN_SLOW_PICKUP, 1);
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* halt until it's our turn and kicked. Note that we do safe halt
|
|
* for irq enabled case to avoid hang when lock info is overwritten
|
|
* in irq spinlock slowpath and no spurious interrupt occur to save us.
|
|
*/
|
|
if (arch_irqs_disabled_flags(flags))
|
|
halt();
|
|
else
|
|
safe_halt();
|
|
|
|
out:
|
|
cpumask_clear_cpu(cpu, &waiting_cpus);
|
|
w->lock = NULL;
|
|
local_irq_restore(flags);
|
|
spin_time_accum_blocked(start);
|
|
}
|
|
PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
|
|
|
|
/* Kick vcpu waiting on @lock->head to reach value @ticket */
|
|
static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
|
|
{
|
|
int cpu;
|
|
|
|
add_stats(RELEASED_SLOW, 1);
|
|
for_each_cpu(cpu, &waiting_cpus) {
|
|
const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
|
|
if (READ_ONCE(w->lock) == lock &&
|
|
READ_ONCE(w->want) == ticket) {
|
|
add_stats(RELEASED_SLOW_KICKED, 1);
|
|
kvm_kick_cpu(cpu);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
#endif /* !CONFIG_QUEUED_SPINLOCKS */
|
|
|
|
/*
|
|
* Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
|
|
*/
|
|
void __init kvm_spinlock_init(void)
|
|
{
|
|
if (!kvm_para_available())
|
|
return;
|
|
/* Does host kernel support KVM_FEATURE_PV_UNHALT? */
|
|
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
|
return;
|
|
|
|
#ifdef CONFIG_QUEUED_SPINLOCKS
|
|
__pv_init_lock_hash();
|
|
pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
|
|
pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
|
|
pv_lock_ops.wait = kvm_wait;
|
|
pv_lock_ops.kick = kvm_kick_cpu;
|
|
#else /* !CONFIG_QUEUED_SPINLOCKS */
|
|
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
|
|
pv_lock_ops.unlock_kick = kvm_unlock_kick;
|
|
#endif
|
|
}
|
|
|
|
static __init int kvm_spinlock_init_jump(void)
|
|
{
|
|
if (!kvm_para_available())
|
|
return 0;
|
|
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
|
return 0;
|
|
|
|
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
|
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
|
|
|
|
return 0;
|
|
}
|
|
early_initcall(kvm_spinlock_init_jump);
|
|
|
|
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|