2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-19 08:05:27 +08:00

arch/tile: various bugs in stack backtracer

Fix a long-standing bug in the stack backtracer where we would print
garbage to the console instead of kernel function names, if the kernel
wasn't built with symbol support (e.g. mboot).

Make sure to tag every line of userspace backtrace output if we actually
have the mmap_sem, since that way if there's no tag, we know that it's
because we couldn't trylock the semaphore.

Stop doing a TLB flush and examining page tables during backtrace.
Instead, just trust that __copy_from_user_inatomic() will properly fault
and return a failure, which it should do in all cases.

Fix a latent bug where the backtracer would directly examine a signal
context in user space, rather than copying it safely to kernel memory
first.  This meant that a race with another thread could potentially
have caused a kernel panic.

Guard against unaligned sp when trying to restart backtrace at an
interrupt or signal handler point in the kernel backtracer.

Report kernel symbolic information for the call instruction rather
than for the following instruction.  We still report the actual numeric
address corresponding to the instruction after the call, for the sake
of consistency with the normal expectations for stack backtracers.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
Chris Metcalf 2012-03-29 14:06:14 -04:00
parent efb734d8ed
commit 5f639fdcd8
2 changed files with 111 additions and 119 deletions

View File

@ -25,7 +25,6 @@
struct KBacktraceIterator { struct KBacktraceIterator {
BacktraceIterator it; BacktraceIterator it;
struct task_struct *task; /* task we are backtracing */ struct task_struct *task; /* task we are backtracing */
pte_t *pgtable; /* page table for user space access */
int end; /* iteration complete. */ int end; /* iteration complete. */
int new_context; /* new context is starting */ int new_context; /* new context is starting */
int profile; /* profiling, so stop on async intrpt */ int profile; /* profiling, so stop on async intrpt */

View File

@ -21,9 +21,10 @@
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/dcache.h>
#include <linux/fs.h>
#include <asm/backtrace.h> #include <asm/backtrace.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlbflush.h>
#include <asm/ucontext.h> #include <asm/ucontext.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
#include <asm/sigframe.h> #include <asm/sigframe.h>
@ -45,72 +46,23 @@ static int in_kernel_stack(struct KBacktraceIterator *kbt, unsigned long sp)
return sp >= kstack_base && sp < kstack_base + THREAD_SIZE; return sp >= kstack_base && sp < kstack_base + THREAD_SIZE;
} }
/* Is address valid for reading? */
static int valid_address(struct KBacktraceIterator *kbt, unsigned long address)
{
HV_PTE *l1_pgtable = kbt->pgtable;
HV_PTE *l2_pgtable;
unsigned long pfn;
HV_PTE pte;
struct page *page;
if (l1_pgtable == NULL)
return 0; /* can't read user space in other tasks */
#ifdef CONFIG_64BIT
/* Find the real l1_pgtable by looking in the l0_pgtable. */
pte = l1_pgtable[HV_L0_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
pfn = hv_pte_get_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("L0 huge page has bad pfn %#lx\n", pfn);
return 0;
}
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
page = pfn_to_page(pfn);
BUG_ON(PageHighMem(page)); /* No HIGHMEM on 64-bit. */
l1_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
#endif
pte = l1_pgtable[HV_L1_INDEX(address)];
if (!hv_pte_get_present(pte))
return 0;
pfn = hv_pte_get_pfn(pte);
if (pte_huge(pte)) {
if (!pfn_valid(pfn)) {
pr_err("huge page has bad pfn %#lx\n", pfn);
return 0;
}
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
page = pfn_to_page(pfn);
if (PageHighMem(page)) {
pr_err("L2 page table not in LOWMEM (%#llx)\n",
HV_PFN_TO_CPA(pfn));
return 0;
}
l2_pgtable = (HV_PTE *)pfn_to_kaddr(pfn);
pte = l2_pgtable[HV_L2_INDEX(address)];
return hv_pte_get_present(pte) && hv_pte_get_readable(pte);
}
/* Callback for backtracer; basically a glorified memcpy */ /* Callback for backtracer; basically a glorified memcpy */
static bool read_memory_func(void *result, unsigned long address, static bool read_memory_func(void *result, unsigned long address,
unsigned int size, void *vkbt) unsigned int size, void *vkbt)
{ {
int retval; int retval;
struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt; struct KBacktraceIterator *kbt = (struct KBacktraceIterator *)vkbt;
if (address == 0)
return 0;
if (__kernel_text_address(address)) { if (__kernel_text_address(address)) {
/* OK to read kernel code. */ /* OK to read kernel code. */
} else if (address >= PAGE_OFFSET) { } else if (address >= PAGE_OFFSET) {
/* We only tolerate kernel-space reads of this task's stack */ /* We only tolerate kernel-space reads of this task's stack */
if (!in_kernel_stack(kbt, address)) if (!in_kernel_stack(kbt, address))
return 0; return 0;
} else if (!valid_address(kbt, address)) { } else if (!kbt->is_current) {
return 0; /* invalid user-space address */ return 0; /* can't read from other user address spaces */
} }
pagefault_disable(); pagefault_disable();
retval = __copy_from_user_inatomic(result, retval = __copy_from_user_inatomic(result,
@ -128,6 +80,8 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
unsigned long sp = kbt->it.sp; unsigned long sp = kbt->it.sp;
struct pt_regs *p; struct pt_regs *p;
if (sp % sizeof(long) != 0)
return NULL;
if (!in_kernel_stack(kbt, sp)) if (!in_kernel_stack(kbt, sp))
return NULL; return NULL;
if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1)) if (!in_kernel_stack(kbt, sp + C_ABI_SAVE_AREA_SIZE + PTREGS_SIZE-1))
@ -170,27 +124,27 @@ static int is_sigreturn(unsigned long pc)
} }
/* Return a pt_regs pointer for a valid signal handler frame */ /* Return a pt_regs pointer for a valid signal handler frame */
static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt) static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
struct rt_sigframe* kframe)
{ {
BacktraceIterator *b = &kbt->it; BacktraceIterator *b = &kbt->it;
if (b->pc == VDSO_BASE) { if (b->pc == VDSO_BASE && b->sp < PAGE_OFFSET &&
struct rt_sigframe *frame; b->sp % sizeof(long) == 0) {
unsigned long sigframe_top = int retval;
b->sp + sizeof(struct rt_sigframe) - 1; pagefault_disable();
if (!valid_address(kbt, b->sp) || retval = __copy_from_user_inatomic(
!valid_address(kbt, sigframe_top)) { kframe, (void __user __force *)b->sp,
if (kbt->verbose) sizeof(*kframe));
pr_err(" (odd signal: sp %#lx?)\n", pagefault_enable();
(unsigned long)(b->sp)); if (retval != 0 ||
(unsigned int)(kframe->info.si_signo) >= _NSIG)
return NULL; return NULL;
}
frame = (struct rt_sigframe *)b->sp;
if (kbt->verbose) { if (kbt->verbose) {
pr_err(" <received signal %d>\n", pr_err(" <received signal %d>\n",
frame->info.si_signo); kframe->info.si_signo);
} }
return (struct pt_regs *)&frame->uc.uc_mcontext; return (struct pt_regs *)&kframe->uc.uc_mcontext;
} }
return NULL; return NULL;
} }
@ -203,10 +157,11 @@ static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
{ {
struct pt_regs *p; struct pt_regs *p;
struct rt_sigframe kframe;
p = valid_fault_handler(kbt); p = valid_fault_handler(kbt);
if (p == NULL) if (p == NULL)
p = valid_sigframe(kbt); p = valid_sigframe(kbt, &kframe);
if (p == NULL) if (p == NULL)
return 0; return 0;
backtrace_init(&kbt->it, read_memory_func, kbt, backtrace_init(&kbt->it, read_memory_func, kbt,
@ -266,41 +221,19 @@ void KBacktraceIterator_init(struct KBacktraceIterator *kbt,
/* /*
* Set up callback information. We grab the kernel stack base * Set up callback information. We grab the kernel stack base
* so we will allow reads of that address range, and if we're * so we will allow reads of that address range.
* asking about the current process we grab the page table
* so we can check user accesses before trying to read them.
* We flush the TLB to avoid any weird skew issues.
*/ */
is_current = (t == NULL); is_current = (t == NULL || t == current);
kbt->is_current = is_current; kbt->is_current = is_current;
if (is_current) if (is_current)
t = validate_current(); t = validate_current();
kbt->task = t; kbt->task = t;
kbt->pgtable = NULL;
kbt->verbose = 0; /* override in caller if desired */ kbt->verbose = 0; /* override in caller if desired */
kbt->profile = 0; /* override in caller if desired */ kbt->profile = 0; /* override in caller if desired */
kbt->end = KBT_ONGOING; kbt->end = KBT_ONGOING;
kbt->new_context = 0; kbt->new_context = 1;
if (is_current) { if (is_current)
HV_PhysAddr pgdir_pa = hv_inquire_context().page_table;
if (pgdir_pa == (unsigned long)swapper_pg_dir - PAGE_OFFSET) {
/*
* Not just an optimization: this also allows
* this to work at all before va/pa mappings
* are set up.
*/
kbt->pgtable = swapper_pg_dir;
} else {
struct page *page = pfn_to_page(PFN_DOWN(pgdir_pa));
if (!PageHighMem(page))
kbt->pgtable = __va(pgdir_pa);
else
pr_err("page table not in LOWMEM"
" (%#llx)\n", pgdir_pa);
}
local_flush_tlb_all();
validate_stack(regs); validate_stack(regs);
}
if (regs == NULL) { if (regs == NULL) {
if (is_current || t->state == TASK_RUNNING) { if (is_current || t->state == TASK_RUNNING) {
@ -346,6 +279,78 @@ void KBacktraceIterator_next(struct KBacktraceIterator *kbt)
} }
EXPORT_SYMBOL(KBacktraceIterator_next); EXPORT_SYMBOL(KBacktraceIterator_next);
static void describe_addr(struct KBacktraceIterator *kbt,
unsigned long address,
int have_mmap_sem, char *buf, size_t bufsize)
{
struct vm_area_struct *vma;
size_t namelen, remaining;
unsigned long size, offset, adjust;
char *p, *modname;
const char *name;
int rc;
/*
* Look one byte back for every caller frame (i.e. those that
* aren't a new context) so we look up symbol data for the
* call itself, not the following instruction, which may be on
* a different line (or in a different function).
*/
adjust = !kbt->new_context;
address -= adjust;
if (address >= PAGE_OFFSET) {
/* Handle kernel symbols. */
BUG_ON(bufsize < KSYM_NAME_LEN);
name = kallsyms_lookup(address, &size, &offset,
&modname, buf);
if (name == NULL) {
buf[0] = '\0';
return;
}
namelen = strlen(buf);
remaining = (bufsize - 1) - namelen;
p = buf + namelen;
rc = snprintf(p, remaining, "+%#lx/%#lx ",
offset + adjust, size);
if (modname && rc < remaining)
snprintf(p + rc, remaining - rc, "[%s] ", modname);
buf[bufsize-1] = '\0';
return;
}
/* If we don't have the mmap_sem, we can't show any more info. */
buf[0] = '\0';
if (!have_mmap_sem)
return;
/* Find vma info. */
vma = find_vma(kbt->task->mm, address);
if (vma == NULL || address < vma->vm_start) {
snprintf(buf, bufsize, "[unmapped address] ");
return;
}
if (vma->vm_file) {
char *s;
p = d_path(&vma->vm_file->f_path, buf, bufsize);
if (IS_ERR(p))
p = "?";
s = strrchr(p, '/');
if (s)
p = s+1;
} else {
p = "anon";
}
/* Generate a string description of the vma info. */
namelen = strlen(p);
remaining = (bufsize - 1) - namelen;
memmove(buf, p, namelen);
snprintf(buf + namelen, remaining, "[%lx+%lx] ",
vma->vm_start, vma->vm_end - vma->vm_start);
}
/* /*
* This method wraps the backtracer's more generic support. * This method wraps the backtracer's more generic support.
* It is only invoked from the architecture-specific code; show_stack() * It is only invoked from the architecture-specific code; show_stack()
@ -354,6 +359,7 @@ EXPORT_SYMBOL(KBacktraceIterator_next);
void tile_show_stack(struct KBacktraceIterator *kbt, int headers) void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
{ {
int i; int i;
int have_mmap_sem = 0;
if (headers) { if (headers) {
/* /*
@ -370,31 +376,16 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
kbt->verbose = 1; kbt->verbose = 1;
i = 0; i = 0;
for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
char *modname;
const char *name;
unsigned long address = kbt->it.pc;
unsigned long offset, size;
char namebuf[KSYM_NAME_LEN+100]; char namebuf[KSYM_NAME_LEN+100];
unsigned long address = kbt->it.pc;
if (address >= PAGE_OFFSET) /* Try to acquire the mmap_sem as we pass into userspace. */
name = kallsyms_lookup(address, &size, &offset, if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm)
&modname, namebuf); have_mmap_sem =
else down_read_trylock(&kbt->task->mm->mmap_sem);
name = NULL;
if (!name) describe_addr(kbt, address, have_mmap_sem,
namebuf[0] = '\0'; namebuf, sizeof(namebuf));
else {
size_t namelen = strlen(namebuf);
size_t remaining = (sizeof(namebuf) - 1) - namelen;
char *p = namebuf + namelen;
int rc = snprintf(p, remaining, "+%#lx/%#lx ",
offset, size);
if (modname && rc < remaining)
snprintf(p + rc, remaining - rc,
"[%s] ", modname);
namebuf[sizeof(namebuf)-1] = '\0';
}
pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n",
i++, address, namebuf, (unsigned long)(kbt->it.sp)); i++, address, namebuf, (unsigned long)(kbt->it.sp));
@ -409,6 +400,8 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
pr_err("Stack dump stopped; next frame identical to this one\n"); pr_err("Stack dump stopped; next frame identical to this one\n");
if (headers) if (headers)
pr_err("Stack dump complete\n"); pr_err("Stack dump complete\n");
if (have_mmap_sem)
up_read(&kbt->task->mm->mmap_sem);
} }
EXPORT_SYMBOL(tile_show_stack); EXPORT_SYMBOL(tile_show_stack);