Merge branch 'irqchip/mvebu' into irqchip/core

This commit is contained in:
Jason Cooper 2015-03-15 01:46:04 +00:00
commit 9df5126b69

View File

@ -38,6 +38,8 @@
/* Interrupt Controller Registers Map */
#define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48)
#define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C)
#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54)
#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu)
#define ARMADA_370_XP_INT_CONTROL (0x00)
#define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30)
@ -56,6 +58,7 @@
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
#define ARMADA_370_XP_FABRIC_IRQ (3)
#define IPI_DOORBELL_START (0)
#define IPI_DOORBELL_END (8)
@ -69,6 +72,7 @@ static void __iomem *per_cpu_int_base;
static void __iomem *main_int_base;
static struct irq_domain *armada_370_xp_mpic_domain;
static u32 doorbell_mask_reg;
static int parent_irq;
#ifdef CONFIG_PCI_MSI
static struct irq_domain *armada_370_xp_msi_domain;
static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR);
@ -76,6 +80,17 @@ static DEFINE_MUTEX(msi_used_lock);
static phys_addr_t msi_doorbell_addr;
#endif
static inline bool is_percpu_irq(irq_hw_number_t irq)
{
switch (irq) {
case ARMADA_370_XP_TIMER0_PER_CPU_IRQ:
case ARMADA_370_XP_FABRIC_IRQ:
return true;
default:
return false;
}
}
/*
* In SMP mode:
* For shared global interrupts, mask/unmask global enable bit
@ -85,7 +100,7 @@ static void armada_370_xp_irq_mask(struct irq_data *d)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
if (!is_percpu_irq(hwirq))
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
else
@ -97,7 +112,7 @@ static void armada_370_xp_irq_unmask(struct irq_data *d)
{
irq_hw_number_t hwirq = irqd_to_hwirq(d);
if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
if (!is_percpu_irq(hwirq))
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
else
@ -286,14 +301,14 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
if (!is_percpu_irq(hw))
writel(hw, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
else
writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
irq_set_status_flags(virq, IRQ_LEVEL);
if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
if (is_percpu_irq(hw)) {
irq_set_percpu_devid(virq);
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_percpu_devid_irq);
@ -307,28 +322,6 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
return 0;
}
#ifdef CONFIG_SMP
static void armada_mpic_send_doorbell(const struct cpumask *mask,
unsigned int irq)
{
int cpu;
unsigned long map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
dsb();
/* submit softirq */
writel((map << 8) | irq, main_int_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
static void armada_xp_mpic_smp_cpu_init(void)
{
u32 control;
@ -351,11 +344,45 @@ static void armada_xp_mpic_smp_cpu_init(void)
writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
}
static void armada_xp_mpic_perf_init(void)
{
unsigned long cpuid = cpu_logical_map(smp_processor_id());
/* Enable Performance Counter Overflow interrupts */
writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid),
per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS);
}
#ifdef CONFIG_SMP
static void armada_mpic_send_doorbell(const struct cpumask *mask,
unsigned int irq)
{
int cpu;
unsigned long map = 0;
/* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask)
map |= 1 << cpu_logical_map(cpu);
/*
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
dsb();
/* submit softirq */
writel((map << 8) | irq, main_int_base +
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
armada_xp_mpic_perf_init();
armada_xp_mpic_smp_cpu_init();
}
return NOTIFY_OK;
}
@ -364,6 +391,21 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
.priority = 100,
};
static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
armada_xp_mpic_perf_init();
enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
}
return NOTIFY_OK;
}
static struct notifier_block mpic_cascaded_cpu_notifier = {
.notifier_call = mpic_cascaded_secondary_init,
.priority = 100,
};
#endif /* CONFIG_SMP */
static struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
@ -539,7 +581,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
struct device_node *parent)
{
struct resource main_int_res, per_cpu_int_res;
int parent_irq, nr_irqs, i;
int nr_irqs, i;
u32 control;
BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@ -572,9 +614,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
BUG_ON(!armada_370_xp_mpic_domain);
#ifdef CONFIG_SMP
/* Setup for the boot CPU */
armada_xp_mpic_perf_init();
armada_xp_mpic_smp_cpu_init();
#endif
armada_370_xp_msi_init(node, main_int_res.start);
@ -587,6 +629,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
#endif
} else {
#ifdef CONFIG_SMP
register_cpu_notifier(&mpic_cascaded_cpu_notifier);
#endif
irq_set_chained_handler(parent_irq,
armada_370_xp_mpic_handle_cascade_irq);
}