mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 06:34:12 +08:00
KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not set
When an MMU notifier call results in unmapping a range that spans multiple
PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary,
since this avoids running into RCU stalls during VM teardown. Unfortunately,
if the VM is destroyed as a result of OOM, then blocking is not permitted
and the call to the scheduler triggers the following BUG():
| BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394
| in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper
| INFO: lockdep is turned off.
| CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1
| Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015
| Call trace:
| dump_backtrace+0x0/0x284
| show_stack+0x1c/0x28
| dump_stack+0xf0/0x1a4
| ___might_sleep+0x2bc/0x2cc
| unmap_stage2_range+0x160/0x1ac
| kvm_unmap_hva_range+0x1a0/0x1c8
| kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8
| __mmu_notifier_invalidate_range_start+0x218/0x31c
| mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0
| __oom_reap_task_mm+0x128/0x268
| oom_reap_task+0xac/0x298
| oom_reaper+0x178/0x17c
| kthread+0x1e4/0x1fc
| ret_from_fork+0x10/0x30
Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we
only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier
flags.
Cc: <stable@vger.kernel.org>
Fixes: 8b3405e345
("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd")
Cc: Marc Zyngier <maz@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: James Morse <james.morse@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Message-Id: <20200811102725.7121-3-will@kernel.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
fdfe7cbd58
commit
b5331379bc
@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
|
|||||||
* destroying the VM), otherwise another faulting VCPU may come in and mess
|
* destroying the VM), otherwise another faulting VCPU may come in and mess
|
||||||
* with things behind our backs.
|
* with things behind our backs.
|
||||||
*/
|
*/
|
||||||
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size,
|
||||||
|
bool may_block)
|
||||||
{
|
{
|
||||||
struct kvm *kvm = mmu->kvm;
|
struct kvm *kvm = mmu->kvm;
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si
|
|||||||
* If the range is too large, release the kvm->mmu_lock
|
* If the range is too large, release the kvm->mmu_lock
|
||||||
* to prevent starvation and lockup detector warnings.
|
* to prevent starvation and lockup detector warnings.
|
||||||
*/
|
*/
|
||||||
if (next != end)
|
if (may_block && next != end)
|
||||||
cond_resched_lock(&kvm->mmu_lock);
|
cond_resched_lock(&kvm->mmu_lock);
|
||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
|
||||||
|
{
|
||||||
|
__unmap_stage2_range(mmu, start, size, true);
|
||||||
|
}
|
||||||
|
|
||||||
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
|
static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd,
|
||||||
phys_addr_t addr, phys_addr_t end)
|
phys_addr_t addr, phys_addr_t end)
|
||||||
{
|
{
|
||||||
@ -2208,7 +2214,10 @@ static int handle_hva_to_gpa(struct kvm *kvm,
|
|||||||
|
|
||||||
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
|
||||||
{
|
{
|
||||||
unmap_stage2_range(&kvm->arch.mmu, gpa, size);
|
unsigned flags = *(unsigned *)data;
|
||||||
|
bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
|
||||||
|
|
||||||
|
__unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2219,7 +2228,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
trace_kvm_unmap_hva_range(start, end);
|
trace_kvm_unmap_hva_range(start, end);
|
||||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user