mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 08:04:22 +08:00
4b5e694e25
Currently unwind_next_frame_record() has an optional callback to convert the address space of the FP. This is necessary for the NVHE unwinder, which tracks the stacks in the hyp VA space, but accesses the frame records in the kernel VA space. This is a bit unfortunate since it clutters unwind_next_frame_record(), which will get in the way of future rework. Instead, this patch changes the NVHE unwinder to track the stacks in the kernel's VA space and translate to FP prior to calling unwind_next_frame_record(). This removes the need for the translate_fp() callback, as all unwinders consistently track stacks in the native address space of the unwinder. At the same time, this patch consolidates the generation of the stack addresses behind the stackinfo_get_*() helpers. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Kalesh Singh <kaleshsingh@google.com> Reviewed-by: Madhavan T. Venkataraman <madvenka@linux.microsoft.com> Reviewed-by: Mark Brown <broonie@kernel.org> Cc: Fuad Tabba <tabba@google.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20220901130646.1316937-10-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
246 lines
6.6 KiB
C
246 lines
6.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* KVM nVHE hypervisor stack tracing support.
|
|
*
|
|
* The unwinder implementation depends on the nVHE mode:
|
|
*
|
|
* 1) Non-protected nVHE mode - the host can directly access the
|
|
* HYP stack pages and unwind the HYP stack in EL1. This saves having
|
|
* to allocate shared buffers for the host to read the unwinded
|
|
* stacktrace.
|
|
*
|
|
* 2) pKVM (protected nVHE) mode - the host cannot directly access
|
|
* the HYP memory. The stack is unwinded in EL2 and dumped to a shared
|
|
* buffer where the host can read and print the stacktrace.
|
|
*
|
|
* Copyright (C) 2022 Google LLC
|
|
*/
|
|
|
|
#include <linux/kvm.h>
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/stacktrace/nvhe.h>
|
|
|
|
static struct stack_info stackinfo_get_overflow(void)
|
|
{
|
|
struct kvm_nvhe_stacktrace_info *stacktrace_info
|
|
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
|
|
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
|
|
unsigned long high = low + OVERFLOW_STACK_SIZE;
|
|
|
|
return (struct stack_info) {
|
|
.low = low,
|
|
.high = high,
|
|
};
|
|
}
|
|
|
|
static struct stack_info stackinfo_get_overflow_kern_va(void)
|
|
{
|
|
unsigned long low = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
|
|
unsigned long high = low + OVERFLOW_STACK_SIZE;
|
|
|
|
return (struct stack_info) {
|
|
.low = low,
|
|
.high = high,
|
|
};
|
|
}
|
|
|
|
static struct stack_info stackinfo_get_hyp(void)
|
|
{
|
|
struct kvm_nvhe_stacktrace_info *stacktrace_info
|
|
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
|
|
unsigned long low = (unsigned long)stacktrace_info->stack_base;
|
|
unsigned long high = low + PAGE_SIZE;
|
|
|
|
return (struct stack_info) {
|
|
.low = low,
|
|
.high = high,
|
|
};
|
|
}
|
|
|
|
static struct stack_info stackinfo_get_hyp_kern_va(void)
|
|
{
|
|
unsigned long low = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
|
|
unsigned long high = low + PAGE_SIZE;
|
|
|
|
return (struct stack_info) {
|
|
.low = low,
|
|
.high = high,
|
|
};
|
|
}
|
|
|
|
/*
|
|
* kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
|
|
*
|
|
* The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
|
|
* allow for guard pages below the stack. Consequently, the fixed offset address
|
|
* translation macros won't work here.
|
|
*
|
|
* The kernel VA is calculated as an offset from the kernel VA of the hypervisor
|
|
* stack base.
|
|
*
|
|
* Returns true on success and updates @addr to its corresponding kernel VA;
|
|
* otherwise returns false.
|
|
*/
|
|
static bool kvm_nvhe_stack_kern_va(unsigned long *addr, unsigned long size)
|
|
{
|
|
struct stack_info stack_hyp, stack_kern;
|
|
|
|
stack_hyp = stackinfo_get_hyp();
|
|
stack_kern = stackinfo_get_hyp_kern_va();
|
|
if (stackinfo_on_stack(&stack_hyp, *addr, size))
|
|
goto found;
|
|
|
|
stack_hyp = stackinfo_get_overflow();
|
|
stack_kern = stackinfo_get_overflow_kern_va();
|
|
if (stackinfo_on_stack(&stack_hyp, *addr, size))
|
|
goto found;
|
|
|
|
return false;
|
|
|
|
found:
|
|
*addr = *addr - stack_hyp.low + stack_kern.low;
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* Convert a KVN nVHE HYP frame record address to a kernel VA
|
|
*/
|
|
static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr)
|
|
{
|
|
return kvm_nvhe_stack_kern_va(addr, 16);
|
|
}
|
|
|
|
static int unwind_next(struct unwind_state *state)
|
|
{
|
|
/*
|
|
* The FP is in the hypervisor VA space. Convert it to the kernel VA
|
|
* space so it can be unwound by the regular unwind functions.
|
|
*/
|
|
if (!kvm_nvhe_stack_kern_record_va(&state->fp))
|
|
return -EINVAL;
|
|
|
|
return unwind_next_frame_record(state);
|
|
}
|
|
|
|
static void unwind(struct unwind_state *state,
|
|
stack_trace_consume_fn consume_entry, void *cookie)
|
|
{
|
|
while (1) {
|
|
int ret;
|
|
|
|
if (!consume_entry(cookie, state->pc))
|
|
break;
|
|
ret = unwind_next(state);
|
|
if (ret < 0)
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
|
|
*
|
|
* @arg : the hypervisor offset, used for address translation
|
|
* @where : the program counter corresponding to the stack frame
|
|
*/
|
|
static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
|
|
{
|
|
unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
|
|
unsigned long hyp_offset = (unsigned long)arg;
|
|
|
|
/* Mask tags and convert to kern addr */
|
|
where = (where & va_mask) + hyp_offset;
|
|
kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
|
|
|
|
return true;
|
|
}
|
|
|
|
static void kvm_nvhe_dump_backtrace_start(void)
|
|
{
|
|
kvm_err("nVHE call trace:\n");
|
|
}
|
|
|
|
static void kvm_nvhe_dump_backtrace_end(void)
|
|
{
|
|
kvm_err("---[ end nVHE call trace ]---\n");
|
|
}
|
|
|
|
/*
|
|
* hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
|
|
*
|
|
* @hyp_offset: hypervisor offset, used for address translation.
|
|
*
|
|
* The host can directly access HYP stack pages in non-protected
|
|
* mode, so the unwinding is done directly from EL1. This removes
|
|
* the need for shared buffers between host and hypervisor for
|
|
* the stacktrace.
|
|
*/
|
|
static void hyp_dump_backtrace(unsigned long hyp_offset)
|
|
{
|
|
struct kvm_nvhe_stacktrace_info *stacktrace_info;
|
|
struct stack_info stacks[] = {
|
|
stackinfo_get_overflow_kern_va(),
|
|
stackinfo_get_hyp_kern_va(),
|
|
};
|
|
struct unwind_state state = {
|
|
.stacks = stacks,
|
|
.nr_stacks = ARRAY_SIZE(stacks),
|
|
};
|
|
|
|
stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
|
|
|
|
kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
|
|
|
|
kvm_nvhe_dump_backtrace_start();
|
|
unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
|
|
kvm_nvhe_dump_backtrace_end();
|
|
}
|
|
|
|
#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
|
|
DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
|
|
pkvm_stacktrace);
|
|
|
|
/*
|
|
* pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
|
|
*
|
|
* @hyp_offset: hypervisor offset, used for address translation.
|
|
*
|
|
* Dumping of the pKVM HYP backtrace is done by reading the
|
|
* stack addresses from the shared stacktrace buffer, since the
|
|
* host cannot directly access hypervisor memory in protected
|
|
* mode.
|
|
*/
|
|
static void pkvm_dump_backtrace(unsigned long hyp_offset)
|
|
{
|
|
unsigned long *stacktrace
|
|
= (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
|
|
int i;
|
|
|
|
kvm_nvhe_dump_backtrace_start();
|
|
/* The saved stacktrace is terminated by a null entry */
|
|
for (i = 0;
|
|
i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
|
|
i++)
|
|
kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
|
|
kvm_nvhe_dump_backtrace_end();
|
|
}
|
|
#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
|
|
static void pkvm_dump_backtrace(unsigned long hyp_offset)
|
|
{
|
|
kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
|
|
}
|
|
#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
|
|
|
|
/*
|
|
* kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
|
|
*
|
|
* @hyp_offset: hypervisor offset, used for address translation.
|
|
*/
|
|
void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
|
|
{
|
|
if (is_protected_kvm_enabled())
|
|
pkvm_dump_backtrace(hyp_offset);
|
|
else
|
|
hyp_dump_backtrace(hyp_offset);
|
|
}
|