2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
|
|
|
|
*
|
|
|
|
* This file contains the lowest level x86_64-specific interrupt
|
|
|
|
* entry and irq statistics code. All the remaining irq logic is
|
|
|
|
* done by the generic kernel/irq/ code and in the
|
|
|
|
* x86_64-specific irq controller code. (e.g. i8259.c and
|
|
|
|
* io_apic.c.)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/module.h>
|
2005-06-26 05:55:00 +08:00
|
|
|
#include <linux/delay.h>
|
2008-12-10 06:54:20 +08:00
|
|
|
#include <linux/ftrace.h>
|
2009-01-04 18:55:19 +08:00
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/smp.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/io_apic.h>
|
2006-01-12 05:44:36 +08:00
|
|
|
#include <asm/idle.h>
|
2009-01-23 10:03:29 +08:00
|
|
|
#include <asm/apic.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-01-18 23:38:57 +08:00
|
|
|
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
|
|
|
|
EXPORT_PER_CPU_SYMBOL(irq_stat);
|
|
|
|
|
2009-01-21 16:26:06 +08:00
|
|
|
DEFINE_PER_CPU(struct pt_regs *, irq_regs);
|
|
|
|
EXPORT_PER_CPU_SYMBOL(irq_regs);
|
|
|
|
|
2011-11-29 14:08:36 +08:00
|
|
|
int sysctl_panic_on_stackoverflow;
|
|
|
|
|
2006-06-26 20:00:05 +08:00
|
|
|
/*
|
|
|
|
* Probabilistic stack overflow check:
|
|
|
|
*
|
|
|
|
* Only check the stack in process context, because everything else
|
|
|
|
* runs on the big interrupt stacks. Checking reliably is too expensive,
|
|
|
|
* so we just check from interrupts.
|
|
|
|
*/
|
|
|
|
static inline void stack_overflow_check(struct pt_regs *regs)
|
|
|
|
{
|
2008-11-23 16:02:26 +08:00
|
|
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
2011-12-07 16:29:10 +08:00
|
|
|
#define STACK_TOP_MARGIN 128
|
2011-11-29 14:08:29 +08:00
|
|
|
struct orig_ist *oist;
|
|
|
|
u64 irq_stack_top, irq_stack_bottom;
|
|
|
|
u64 estack_top, estack_bottom;
|
2007-05-09 17:35:16 +08:00
|
|
|
u64 curbase = (u64)task_stack_page(current);
|
2008-11-23 16:02:26 +08:00
|
|
|
|
2011-11-29 14:08:21 +08:00
|
|
|
if (user_mode_vm(regs))
|
|
|
|
return;
|
|
|
|
|
2011-11-29 14:08:45 +08:00
|
|
|
if (regs->sp >= curbase + sizeof(struct thread_info) +
|
2011-12-07 16:29:10 +08:00
|
|
|
sizeof(struct pt_regs) + STACK_TOP_MARGIN &&
|
2011-11-29 14:08:45 +08:00
|
|
|
regs->sp <= curbase + THREAD_SIZE)
|
2011-11-29 14:08:29 +08:00
|
|
|
return;
|
|
|
|
|
2011-12-07 16:29:10 +08:00
|
|
|
irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) +
|
|
|
|
STACK_TOP_MARGIN;
|
2011-11-29 14:08:29 +08:00
|
|
|
irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr);
|
|
|
|
if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
|
|
|
|
return;
|
|
|
|
|
|
|
|
oist = &__get_cpu_var(orig_ist);
|
2011-12-07 16:29:10 +08:00
|
|
|
estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
|
2011-11-29 14:08:29 +08:00
|
|
|
estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
|
|
|
|
if (regs->sp >= estack_top && regs->sp <= estack_bottom)
|
|
|
|
return;
|
2008-11-23 16:02:26 +08:00
|
|
|
|
2011-11-29 14:08:29 +08:00
|
|
|
WARN_ONCE(1, "do_IRQ(): %s has overflown the kernel stack (cur:%Lx,sp:%lx,irq stk top-bottom:%Lx-%Lx,exception stk top-bottom:%Lx-%Lx)\n",
|
|
|
|
current->comm, curbase, regs->sp,
|
|
|
|
irq_stack_top, irq_stack_bottom,
|
|
|
|
estack_top, estack_bottom);
|
2011-11-29 14:08:36 +08:00
|
|
|
|
|
|
|
if (sysctl_panic_on_stackoverflow)
|
|
|
|
panic("low stack detected by irq handler - check messages\n");
|
2006-06-26 20:00:05 +08:00
|
|
|
#endif
|
2008-11-23 16:02:26 +08:00
|
|
|
}
|
2006-06-26 20:00:05 +08:00
|
|
|
|
2009-02-07 06:09:40 +08:00
|
|
|
bool handle_irq(unsigned irq, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
stack_overflow_check(regs);
|
|
|
|
|
|
|
|
desc = irq_to_desc(irq);
|
|
|
|
if (unlikely(!desc))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
generic_handle_irq_desc(irq, desc);
|
|
|
|
return true;
|
|
|
|
}
|