2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 15:43:59 +08:00
linux-next/arch/powerpc/kernel/irq.c
Linus Torvalds c207f3a431 Generialize powerpc's irq_host as irq_domain
This branch takes the PowerPC irq_host infrastructure (reverse mapping
 from Linux IRQ numbers to hardware irq numbering), generalizes it,
 renames it to irq_domain, and makes it available to all architectures.
 
 Originally the plan has been to create an all-new irq_domain
 implementation which addresses some of the powerpc shortcomings such
 as not handling 1:1 mappings well, but doing that proved to be far
 more difficult and invasive than generalizing the working code and
 refactoring it in-place.  So, this branch rips out the 'new'
 irq_domain and replaces it with the modified powerpc version (in a
 fully bisectable way of course).  It converts all users over to the
 new API and makes irq_domain selectable on any architecture.
 
 No architecture is forced to enable irq_domain, but the infrastructure
 is required for doing OpenFirmware style irq translations.  It will
 even work on SPARC even though SPARC has it's own mechanism for
 translating irqs at boot time.  MIPS, microblaze, embedded x86 and c6x
 are converted too.
 
 The resulting irq_domain code is probably still too verbose and can be
 optimized more, but that can be done incrementally and is a task for
 follow-on patches.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.11 (GNU/Linux)
 
 iQIcBAABAgAGBQJPZ1yiAAoJEEFnBt12D9kB4yIQAJvCfTPL65sCYVD6i9RnVHtR
 ahwddtd0AtT+UYLU8Xg2fZgVi6cmupDGnqkBixzZD3xxSTERqm7Snqa0ugklfeAi
 B6Zqf/K17H5hJNaoQ3fkNauow8m7ZYOeEH2vVUvkb3woWS9Wm7OGd+BvcIBgYSGe
 Aaoumhu7kDxFkii0qz3x/+kvsb6DRp2HtSPWj+APL/kNjdiO4JBOihtcc/lX6d47
 bsZLiEMzHUFV4ApJNwqmfDnf54oMrHmrRJxgQHIMjeJC5or9I3Do8wDGe/aTF5xO
 5GVpxCQsTlJMjTBWlAFtpTwCJB6y76EHQrHc7WzLlq8OJSsxApOke8M0BzXFrfMy
 CU7UUpTvNZTLpZibLCEQKemv1+oNOkfFylsHxfek2MCqx0W6W4FHEGV3qE/GtgV9
 +vurA9hNNp7VM0FGRGigcUr3woYdHLdEVQrlnL7Z9AgBu1W44MZLaai7iRVZOeCT
 ZQ9++v2PJJ8vHT8kdkgTdiRpnEhmv84MX/GBT7ilWFEMIVeT5zhGkIBojzNgyzGc
 7cvermmM0P8h+unkDgmzmSbDxo0PboqVKeoO71AOBhA6MmR9iom7XkuNdHhoOwy2
 4A5xT1srbhJDbuv15BBREBV24TywpZ4a1+4nwQT4L1fXe+HfCxeEWexGcKQMRcIt
 dAelOHTQ+ZGkOKvXeW05
 =ruGA
 -----END PGP SIGNATURE-----

Merge tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux-2.6

Pull irq_domain support for all architectures from Grant Likely:
 "Generialize powerpc's irq_host as irq_domain

  This branch takes the PowerPC irq_host infrastructure (reverse mapping
  from Linux IRQ numbers to hardware irq numbering), generalizes it,
  renames it to irq_domain, and makes it available to all architectures.

  Originally the plan has been to create an all-new irq_domain
  implementation which addresses some of the powerpc shortcomings such
  as not handling 1:1 mappings well, but doing that proved to be far
  more difficult and invasive than generalizing the working code and
  refactoring it in-place.  So, this branch rips out the 'new'
  irq_domain and replaces it with the modified powerpc version (in a
  fully bisectable way of course).  It converts all users over to the
  new API and makes irq_domain selectable on any architecture.

  No architecture is forced to enable irq_domain, but the infrastructure
  is required for doing OpenFirmware style irq translations.  It will
  even work on SPARC even though SPARC has it's own mechanism for
  translating irqs at boot time.  MIPS, microblaze, embedded x86 and c6x
  are converted too.

  The resulting irq_domain code is probably still too verbose and can be
  optimized more, but that can be done incrementally and is a task for
  follow-on patches."

* tag 'irqdomain-for-linus' of git://git.secretlab.ca/git/linux-2.6: (31 commits)
  dt: fix twl4030 for non-dt compile on x86
  mfd: twl-core: Add IRQ_DOMAIN dependency
  devicetree: Add empty of_platform_populate() for !CONFIG_OF_ADDRESS (sparc)
  irq_domain: Centralize definition of irq_dispose_mapping()
  irq_domain/mips: Allow irq_domain on MIPS
  irq_domain/x86: Convert x86 (embedded) to use common irq_domain
  ppc-6xx: fix build failure in flipper-pic.c and hlwd-pic.c
  irq_domain/microblaze: Convert microblaze to use irq_domains
  irq_domain/powerpc: Replace custom xlate functions with library functions
  irq_domain/powerpc: constify irq_domain_ops
  irq_domain/c6x: Use library of xlate functions
  irq_domain/c6x: constify irq_domain structures
  irq_domain/c6x: Convert c6x to use generic irq_domain support.
  irq_domain: constify irq_domain_ops
  irq_domain: Create common xlate functions that device drivers can use
  irq_domain: Remove irq_domain_add_simple()
  irq_domain: Remove 'new' irq_domain in favour of the ppc one
  mfd: twl-core.c: Fix the number of interrupts managed by twl4030
  of/address: add empty static inlines for !CONFIG_OF
  irq_domain: Add support for base irq and hwirq in legacy mappings
  ...
2012-03-21 10:27:19 -07:00

556 lines
14 KiB
C

/*
* Derived from arch/i386/kernel/irq.c
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
* Updated and modified by Cort Dougan <cort@fsmlabs.com>
* Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
* interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
* mask register (of which only 16 are defined), hence the weird shifting
* and complement of the cached_irq_mask. I want to be able to stuff
* this right into the SIU SMASK register.
* Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
* to reduce code space and undefined function references.
*/
#undef DEBUG
#include <linux/export.h>
#include <linux/threads.h>
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/radix-tree.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
#include <asm/machdep.h>
#include <asm/udbg.h>
#include <asm/smp.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/firmware.h>
#include <asm/lv1call.h>
#endif
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
EXPORT_PER_CPU_SYMBOL(irq_stat);
int __irq_offset_value;
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(__irq_offset_value);
atomic_t ppc_n_lost_interrupts;
#ifdef CONFIG_TAU_INT
extern int tau_initialized;
extern int tau_interrupts(int);
#endif
#endif /* CONFIG_PPC32 */
#ifdef CONFIG_PPC64
#ifndef CONFIG_SPARSE_IRQ
EXPORT_SYMBOL(irq_desc);
#endif
int distribute_irqs = 1;
static inline notrace unsigned long get_hard_enabled(void)
{
unsigned long enabled;
__asm__ __volatile__("lbz %0,%1(13)"
: "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
return enabled;
}
static inline notrace void set_soft_enabled(unsigned long enable)
{
__asm__ __volatile__("stb %0,%1(13)"
: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
}
static inline notrace void decrementer_check_overflow(void)
{
u64 now = get_tb_or_rtc();
u64 *next_tb;
preempt_disable();
next_tb = &__get_cpu_var(decrementers_next_tb);
if (now >= *next_tb)
set_dec(1);
preempt_enable();
}
notrace void arch_local_irq_restore(unsigned long en)
{
/*
* get_paca()->soft_enabled = en;
* Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
* That was allowed before, and in such a case we do need to take care
* that gcc will set soft_enabled directly via r13, not choose to use
* an intermediate register, lest we're preempted to a different cpu.
*/
set_soft_enabled(en);
if (!en)
return;
#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/*
* Do we need to disable preemption here? Not really: in the
* unlikely event that we're preempted to a different cpu in
* between getting r13, loading its lppaca_ptr, and loading
* its any_int, we might call iseries_handle_interrupts without
* an interrupt pending on the new cpu, but that's no disaster,
* is it? And the business of preempting us off the old cpu
* would itself involve a local_irq_restore which handles the
* interrupt to that cpu.
*
* But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
* to avoid any preemption checking added into get_paca().
*/
if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts();
}
#endif /* CONFIG_PPC_STD_MMU_64 */
/*
* if (get_paca()->hard_enabled) return;
* But again we need to take care that gcc gets hard_enabled directly
* via r13, not choose to use an intermediate register, lest we're
* preempted to a different cpu in between the two instructions.
*/
if (get_hard_enabled())
return;
/*
* Need to hard-enable interrupts here. Since currently disabled,
* no need to take further asm precautions against preemption; but
* use local_paca instead of get_paca() to avoid preemption checking.
*/
local_paca->hard_enabled = en;
/*
* Trigger the decrementer if we have a pending event. Some processors
* only trigger on edge transitions of the sign bit. We might also
* have disabled interrupts long enough that the decrementer wrapped
* to positive.
*/
decrementer_check_overflow();
/*
* Force the delivery of pending soft-disabled interrupts on PS3.
* Any HV call will have this side effect.
*/
if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
u64 tmp, tmp2;
lv1_get_version_info(&tmp, &tmp2);
}
__hard_irq_enable();
}
EXPORT_SYMBOL(arch_local_irq_restore);
#endif /* CONFIG_PPC64 */
int arch_show_interrupts(struct seq_file *p, int prec)
{
int j;
#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
if (tau_initialized) {
seq_printf(p, "%*s: ", prec, "TAU");
for_each_online_cpu(j)
seq_printf(p, "%10u ", tau_interrupts(j));
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
}
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
seq_printf(p, "%*s: ", prec, "LOC");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
seq_printf(p, " Local timer interrupts\n");
seq_printf(p, "%*s: ", prec, "SPU");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
seq_printf(p, " Spurious interrupts\n");
seq_printf(p, "%*s: ", prec, "CNT");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
seq_printf(p, "%*s: ", prec, "MCE");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
return 0;
}
/*
* /proc/stat helpers
*/
u64 arch_irq_stat_cpu(unsigned int cpu)
{
u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
return sum;
}
#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void)
{
struct irq_desc *desc;
unsigned int irq;
static int warned;
cpumask_var_t mask;
const struct cpumask *map = cpu_online_mask;
alloc_cpumask_var(&mask, GFP_KERNEL);
for_each_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
desc = irq_to_desc(irq);
if (!desc)
continue;
data = irq_desc_get_irq_data(desc);
if (irqd_is_per_cpu(data))
continue;
chip = irq_data_get_irq_chip(data);
cpumask_and(mask, data->affinity, map);
if (cpumask_any(mask) >= nr_cpu_ids) {
printk("Breaking affinity for irq %i\n", irq);
cpumask_copy(mask, map);
}
if (chip->irq_set_affinity)
chip->irq_set_affinity(data, mask, true);
else if (desc->action && !(warned++))
printk("Cannot set affinity for irq %i\n", irq);
}
free_cpumask_var(mask);
local_irq_enable();
mdelay(1);
local_irq_disable();
}
#endif
static inline void handle_one_irq(unsigned int irq)
{
struct thread_info *curtp, *irqtp;
unsigned long saved_sp_limit;
struct irq_desc *desc;
desc = irq_to_desc(irq);
if (!desc)
return;
/* Switch to the irq stack to handle this */
curtp = current_thread_info();
irqtp = hardirq_ctx[smp_processor_id()];
if (curtp == irqtp) {
/* We're already on the irq stack, just handle it */
desc->handle_irq(irq, desc);
return;
}
saved_sp_limit = current->thread.ksp_limit;
irqtp->task = curtp->task;
irqtp->flags = 0;
/* Copy the softirq bits in preempt_count so that the
* softirq checks work in the hardirq context. */
irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
(curtp->preempt_count & SOFTIRQ_MASK);
current->thread.ksp_limit = (unsigned long)irqtp +
_ALIGN_UP(sizeof(struct thread_info), 16);
call_handle_irq(irq, desc, irqtp, desc->handle_irq);
current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
/* Set any flag that may have been set on the
* alternate stack
*/
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
static inline void check_stack_overflow(void)
{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
long sp;
sp = __get_SP() & (THREAD_SIZE-1);
/* check for stack overflow: is there less than 2KB free? */
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
printk("do_IRQ: stack overflow: %ld\n",
sp - sizeof(struct thread_info));
dump_stack();
}
#endif
}
void do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned int irq;
trace_irq_entry(regs);
irq_enter();
check_stack_overflow();
irq = ppc_md.get_irq();
if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
handle_one_irq(irq);
else if (irq != NO_IRQ_IGNORE)
__get_cpu_var(irq_stat).spurious_irqs++;
irq_exit();
set_irq_regs(old_regs);
#ifdef CONFIG_PPC_ISERIES
if (firmware_has_feature(FW_FEATURE_ISERIES) &&
get_lppaca()->int_dword.fields.decr_int) {
get_lppaca()->int_dword.fields.decr_int = 0;
/* Signal a fake decrementer interrupt */
timer_interrupt(regs);
}
#endif
trace_irq_exit(regs);
}
void __init init_IRQ(void)
{
if (ppc_md.init_IRQ)
ppc_md.init_IRQ();
exc_lvl_ctx_init();
irq_ctx_init();
}
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
void exc_lvl_ctx_init(void)
{
struct thread_info *tp;
int i, cpu_nr;
for_each_possible_cpu(i) {
#ifdef CONFIG_PPC64
cpu_nr = i;
#else
cpu_nr = get_hard_smp_processor_id(i);
#endif
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
tp->preempt_count = 0;
#ifdef CONFIG_BOOKE
memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = dbgirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
tp->preempt_count = 0;
memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = mcheckirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
tp->preempt_count = HARDIRQ_OFFSET;
#endif
}
}
#endif
struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
void irq_ctx_init(void)
{
struct thread_info *tp;
int i;
for_each_possible_cpu(i) {
memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
tp = softirq_ctx[i];
tp->cpu = i;
tp->preempt_count = 0;
memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
tp = hardirq_ctx[i];
tp->cpu = i;
tp->preempt_count = HARDIRQ_OFFSET;
}
}
static inline void do_softirq_onstack(void)
{
struct thread_info *curtp, *irqtp;
unsigned long saved_sp_limit = current->thread.ksp_limit;
curtp = current_thread_info();
irqtp = softirq_ctx[smp_processor_id()];
irqtp->task = curtp->task;
irqtp->flags = 0;
current->thread.ksp_limit = (unsigned long)irqtp +
_ALIGN_UP(sizeof(struct thread_info), 16);
call_do_softirq(irqtp);
current->thread.ksp_limit = saved_sp_limit;
irqtp->task = NULL;
/* Set any flag that may have been set on the
* alternate stack
*/
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
void do_softirq(void)
{
unsigned long flags;
if (in_interrupt())
return;
local_irq_save(flags);
if (local_softirq_pending())
do_softirq_onstack();
local_irq_restore(flags);
}
irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
{
return d->hwirq;
}
EXPORT_SYMBOL_GPL(irqd_to_hwirq);
irq_hw_number_t virq_to_hw(unsigned int virq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
}
EXPORT_SYMBOL_GPL(virq_to_hw);
#ifdef CONFIG_SMP
int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
if (cpumask_equal(mask, cpu_all_mask)) {
static int irq_rover;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
/* Round-robin distribution... */
do_round_robin:
raw_spin_lock_irqsave(&irq_rover_lock, flags);
irq_rover = cpumask_next(irq_rover, cpu_online_mask);
if (irq_rover >= nr_cpu_ids)
irq_rover = cpumask_first(cpu_online_mask);
cpuid = irq_rover;
raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
} else {
cpuid = cpumask_first_and(mask, cpu_online_mask);
if (cpuid >= nr_cpu_ids)
goto do_round_robin;
}
return get_hard_smp_processor_id(cpuid);
}
#else
int irq_choose_cpu(const struct cpumask *mask)
{
return hard_smp_processor_id();
}
#endif
int arch_early_irq_init(void)
{
return 0;
}
#ifdef CONFIG_PPC64
static int __init setup_noirqdistrib(char *str)
{
distribute_irqs = 0;
return 1;
}
__setup("noirqdistrib", setup_noirqdistrib);
#endif /* CONFIG_PPC64 */