2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00
linux-next/include/linux/irqdesc.h

179 lines
5.0 KiB
C
Raw Normal View History

#ifndef _LINUX_IRQDESC_H
#define _LINUX_IRQDESC_H
/*
* Core internal functions to deal with irq descriptors
*
* This include will move to kernel/irq once we cleaned up the tree.
* For now it's included from <linux/irq.h>
*/
struct irq_affinity_notify;
struct proc_dir_entry;
struct module;
struct irq_desc;
/**
* struct irq_desc - interrupt descriptor
* @irq_data: per irq and chip data passed down to chip functions
* @kstat_irqs: irq stats per cpu
* @handle_irq: highlevel irq-events handler
* @preflow_handler: handler called before the flow handler (currently used by sparc)
* @action: the irq action chain
* @status: status information
* @core_internal_state__do_not_mess_with_it: core internal status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple irq_set_irq_wake() callers
* @irq_count: stats field to detect stalled irqs
* @last_unhandled: aging timer for unhandled count
* @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity_hint: hint to user space for preferred irq affinity
* @affinity_notify: context for notification of affinity changes
* @pending_mask: pending rebalanced interrupts
* @threads_oneshot: bitfield to handle shared oneshot threads
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
struct irq_desc {
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
#endif
struct irqaction *action; /* IRQ action list */
unsigned int status_use_accessors;
unsigned int core_internal_state__do_not_mess_with_it;
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
unsigned int irq_count; /* For detecting broken IRQs */
unsigned long last_unhandled; /* Aging timer for unhandled count */
unsigned int irqs_unhandled;
raw_spinlock_t lock;
genirq: Add support for per-cpu dev_id interrupts The ARM GIC interrupt controller offers per CPU interrupts (PPIs), which are usually used to connect local timers to each core. Each CPU has its own private interface to the GIC, and only sees the PPIs that are directly connect to it. While these timers are separate devices and have a separate interrupt line to a core, they all use the same IRQ number. For these devices, request_irq() is not the right API as it assumes that an IRQ number is visible by a number of CPUs (through the affinity setting), but makes it very awkward to express that an IRQ number can be handled by all CPUs, and yet be a different interrupt line on each CPU, requiring a different dev_id cookie to be passed back to the handler. The *_percpu_irq() functions is designed to overcome these limitations, by providing a per-cpu dev_id vector: int request_percpu_irq(unsigned int irq, irq_handler_t handler, const char *devname, void __percpu *percpu_dev_id); void free_percpu_irq(unsigned int, void __percpu *); int setup_percpu_irq(unsigned int irq, struct irqaction *new); void remove_percpu_irq(unsigned int irq, struct irqaction *act); void enable_percpu_irq(unsigned int irq); void disable_percpu_irq(unsigned int irq); The API has a number of limitations: - no interrupt sharing - no threading - common handler across all the CPUs Once the interrupt is requested using setup_percpu_irq() or request_percpu_irq(), it must be enabled by each core that wishes its local interrupt to be delivered. Based on an initial patch by Thomas Gleixner. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1316793788-14500-2-git-send-email-marc.zyngier@arm.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2011-09-24 00:03:06 +08:00
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
const struct cpumask *affinity_hint;
struct irq_affinity_notify *affinity_notify;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
#endif
unsigned long threads_oneshot;
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
int parent_irq;
struct module *owner;
const char *name;
} ____cacheline_internodealigned_in_smp;
#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
#endif
#ifdef CONFIG_GENERIC_HARDIRQS
static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
{
return &desc->irq_data;
}
static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc)
{
return desc->irq_data.chip;
}
static inline void *irq_desc_get_chip_data(struct irq_desc *desc)
{
return desc->irq_data.chip_data;
}
static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
{
return desc->irq_data.handler_data;
}
static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
{
return desc->irq_data.msi_desc;
}
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt. If the descriptor is attached to an
* irqchip-style controller then we call the ->handle_irq() handler,
* and it calls __do_IRQ() if it's attached to an irqtype-style controller.
*/
static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc)
{
desc->handle_irq(irq, desc);
}
int generic_handle_irq(unsigned int irq);
/* Test to see if a driver has successfully requested an irq */
static inline int irq_has_action(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
return desc->action != NULL;
}
/* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->handle_irq = handler;
}
/* caller has locked the irq_desc and both params are valid */
static inline void
__irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip,
irq_flow_handler_t handler, const char *name)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
irq_desc_get_irq_data(desc)->chip = chip;
desc->handle_irq = handler;
desc->name = name;
}
static inline int irq_balancing_disabled(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
}
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc)
lockdep_set_class(&desc->lock, class);
}
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
static inline void
__irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
desc->preflow_handler = handler;
}
#endif
#endif
#endif