2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 18:53:52 +08:00

KVM paravirt: Handle async PF in non preemptable context

If async page fault is received by idle task or when preemp_count is
not zero guest cannot reschedule, so do sti; hlt and wait for page to be
ready. vcpu can still process interrupts while it waits for the page to
be ready.

Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Gleb Natapov 2010-10-14 11:22:54 +02:00 committed by Avi Kivity
parent 7c90705bf2
commit 6c047cd982

View File

@ -37,6 +37,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/desc.h> #include <asm/desc.h>
#include <asm/tlbflush.h>
#define MMU_QUEUE_SIZE 1024 #define MMU_QUEUE_SIZE 1024
@ -78,6 +79,8 @@ struct kvm_task_sleep_node {
wait_queue_head_t wq; wait_queue_head_t wq;
u32 token; u32 token;
int cpu; int cpu;
bool halted;
struct mm_struct *mm;
}; };
static struct kvm_task_sleep_head { static struct kvm_task_sleep_head {
@ -106,6 +109,11 @@ void kvm_async_pf_task_wait(u32 token)
struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
struct kvm_task_sleep_node n, *e; struct kvm_task_sleep_node n, *e;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int cpu, idle;
cpu = get_cpu();
idle = idle_cpu(cpu);
put_cpu();
spin_lock(&b->lock); spin_lock(&b->lock);
e = _find_apf_task(b, token); e = _find_apf_task(b, token);
@ -119,19 +127,33 @@ void kvm_async_pf_task_wait(u32 token)
n.token = token; n.token = token;
n.cpu = smp_processor_id(); n.cpu = smp_processor_id();
n.mm = current->active_mm;
n.halted = idle || preempt_count() > 1;
atomic_inc(&n.mm->mm_count);
init_waitqueue_head(&n.wq); init_waitqueue_head(&n.wq);
hlist_add_head(&n.link, &b->list); hlist_add_head(&n.link, &b->list);
spin_unlock(&b->lock); spin_unlock(&b->lock);
for (;;) { for (;;) {
prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); if (!n.halted)
prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
if (hlist_unhashed(&n.link)) if (hlist_unhashed(&n.link))
break; break;
local_irq_enable();
schedule(); if (!n.halted) {
local_irq_disable(); local_irq_enable();
schedule();
local_irq_disable();
} else {
/*
* We cannot reschedule. So halt.
*/
native_safe_halt();
local_irq_disable();
}
} }
finish_wait(&n.wq, &wait); if (!n.halted)
finish_wait(&n.wq, &wait);
return; return;
} }
@ -140,7 +162,12 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
static void apf_task_wake_one(struct kvm_task_sleep_node *n) static void apf_task_wake_one(struct kvm_task_sleep_node *n)
{ {
hlist_del_init(&n->link); hlist_del_init(&n->link);
if (waitqueue_active(&n->wq)) if (!n->mm)
return;
mmdrop(n->mm);
if (n->halted)
smp_send_reschedule(n->cpu);
else if (waitqueue_active(&n->wq))
wake_up(&n->wq); wake_up(&n->wq);
} }
@ -193,6 +220,7 @@ again:
} }
n->token = token; n->token = token;
n->cpu = smp_processor_id(); n->cpu = smp_processor_id();
n->mm = NULL;
init_waitqueue_head(&n->wq); init_waitqueue_head(&n->wq);
hlist_add_head(&n->link, &b->list); hlist_add_head(&n->link, &b->list);
} else } else