mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-29 15:43:59 +08:00
23d0b8b053
This patch adds support for systems that cannot receive every interrupt on a single cpu simultaneously, in the check to see if we have enough HARDIRQ_BITS. MAX_HARDIRQS_PER_CPU becomes the count of the maximum number of hardare generated interrupts per cpu. On architectures that support per cpu interrupt delivery this can be a significant space savings and scalability bonus. This patch adds support for systems that cannot receive every interrupt on Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rajesh Shah <rajesh.shah@intel.com> Cc: Andi Kleen <ak@muc.de> Cc: "Protasevich, Natalie" <Natalie.Protasevich@UNISYS.com> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
24 lines
579 B
C
24 lines
579 B
C
#ifndef __ASM_HARDIRQ_H
|
|
#define __ASM_HARDIRQ_H
|
|
|
|
#include <linux/threads.h>
|
|
#include <linux/irq.h>
|
|
#include <asm/pda.h>
|
|
#include <asm/apic.h>
|
|
|
|
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
|
|
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
|
|
|
|
#define __ARCH_IRQ_STAT 1
|
|
|
|
#define local_softirq_pending() read_pda(__softirq_pending)
|
|
|
|
#define __ARCH_SET_SOFTIRQ_PENDING 1
|
|
|
|
#define set_softirq_pending(x) write_pda(__softirq_pending, (x))
|
|
#define or_softirq_pending(x) or_pda(__softirq_pending, (x))
|
|
|
|
extern void ack_bad_irq(unsigned int irq);
|
|
|
|
#endif /* __ASM_HARDIRQ_H */
|