irq_domain: Replace irq_alloc_host() with revmap-specific initializers

Each revmap type has different arguments for setting up the revmap.
This patch splits up the generator functions so that each revmap type
can do its own setup and the user doesn't need to keep track of how
each revmap type handles the arguments.

This patch also adds a host_data argument to the generators.  There are
cases where the host_data pointer will be needed before the function returns.
ie. the legacy map calls the .map callback for each irq before returning.

v2: - Add void *host_data argument to irq_domain_add_*() functions
    - fixed failure to compile
    - Moved IRQ_DOMAIN_MAP_* defines into irqdomain.c

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Cc: Rob Herring <rob.herring@calxeda.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Milton Miller <miltonm@bga.com>
Tested-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Grant Likely 2012-02-14 14:06:54 -07:00
parent 68700650e7
commit a8db8cf0d8
35 changed files with 196 additions and 183 deletions

View File

@ -190,8 +190,7 @@ mpc5121_ads_cpld_pic_init(void)
cpld_pic_node = of_node_get(np); cpld_pic_node = of_node_get(np);
cpld_pic_host = cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL);
irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, 16, &cpld_pic_host_ops, 16);
if (!cpld_pic_host) { if (!cpld_pic_host) {
printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n");
goto end; goto end;

View File

@ -173,15 +173,12 @@ static void __init media5200_init_irq(void)
spin_lock_init(&media5200_irq.lock); spin_lock_init(&media5200_irq.lock);
media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_DOMAIN_MAP_LINEAR, media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
MEDIA5200_NUM_IRQS, MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
&media5200_irq_ops, -1);
if (!media5200_irq.irqhost) if (!media5200_irq.irqhost)
goto out; goto out;
pr_debug("%s: allocated irqhost\n", __func__); pr_debug("%s: allocated irqhost\n", __func__);
media5200_irq.irqhost->host_data = &media5200_irq;
irq_set_handler_data(cascade_virq, &media5200_irq); irq_set_handler_data(cascade_virq, &media5200_irq);
irq_set_chained_handler(cascade_virq, media5200_irq_cascade); irq_set_chained_handler(cascade_virq, media5200_irq_cascade);

View File

@ -252,14 +252,12 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
if (!cascade_virq) if (!cascade_virq)
return; return;
gpt->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, 1, gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
&mpc52xx_gpt_irq_ops, -1);
if (!gpt->irqhost) { if (!gpt->irqhost) {
dev_err(gpt->dev, "irq_alloc_host() failed\n"); dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
return; return;
} }
gpt->irqhost->host_data = gpt;
irq_set_handler_data(cascade_virq, gpt); irq_set_handler_data(cascade_virq, gpt);
irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);

View File

@ -444,9 +444,9 @@ void __init mpc52xx_init_irq(void)
* As last step, add an irq host to translate the real * As last step, add an irq host to translate the real
* hw irq information provided by the ofw to linux virq * hw irq information provided by the ofw to linux virq
*/ */
mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_DOMAIN_MAP_LINEAR, mpc52xx_irqhost = irq_domain_add_linear(picnode,
MPC52xx_IRQ_HIGHTESTHWIRQ, MPC52xx_IRQ_HIGHTESTHWIRQ,
&mpc52xx_irqhost_ops, -1); &mpc52xx_irqhost_ops, NULL);
if (!mpc52xx_irqhost) if (!mpc52xx_irqhost)
panic(__FILE__ ": Cannot allocate the IRQ host\n"); panic(__FILE__ ": Cannot allocate the IRQ host\n");

View File

@ -156,17 +156,13 @@ int __init pq2ads_pci_init_irq(void)
out_be32(&priv->regs->mask, ~0); out_be32(&priv->regs->mask, ~0);
mb(); mb();
host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, NUM_IRQS, host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
&pci_pic_host_ops, NUM_IRQS);
if (!host) { if (!host) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_unmap_regs; goto out_unmap_regs;
} }
host->host_data = priv;
priv->host = host; priv->host = host;
host->host_data = priv;
irq_set_handler_data(irq, priv); irq_set_handler_data(irq, priv);
irq_set_chained_handler(irq, pq2ads_pci_irq_demux); irq_set_chained_handler(irq, pq2ads_pci_irq_demux);

View File

@ -280,9 +280,8 @@ void socrates_fpga_pic_init(struct device_node *pic)
int i; int i;
/* Setup an irq_domain structure */ /* Setup an irq_domain structure */
socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_DOMAIN_MAP_LINEAR, socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
SOCRATES_FPGA_NUM_IRQS);
if (socrates_fpga_pic_irq_host == NULL) { if (socrates_fpga_pic_irq_host == NULL) {
pr_err("FPGA PIC: Unable to allocate host\n"); pr_err("FPGA PIC: Unable to allocate host\n");
return; return;

View File

@ -212,9 +212,8 @@ void __init gef_pic_init(struct device_node *np)
} }
/* Setup an irq_domain structure */ /* Setup an irq_domain structure */
gef_pic_irq_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
GEF_PIC_NUM_IRQS, &gef_pic_host_ops, NULL);
&gef_pic_host_ops, NO_IRQ);
if (gef_pic_irq_host == NULL) if (gef_pic_irq_host == NULL)
return; return;

View File

@ -392,16 +392,13 @@ static int axon_msi_probe(struct platform_device *device)
} }
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
msic->irq_domain = irq_alloc_host(dn, IRQ_DOMAIN_MAP_NOMAP, msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
NR_IRQS, &msic_host_ops, 0);
if (!msic->irq_domain) { if (!msic->irq_domain) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name); dn->full_name);
goto out_free_fifo; goto out_free_fifo;
} }
msic->irq_domain->host_data = msic;
irq_set_handler_data(virq, msic); irq_set_handler_data(virq, msic);
irq_set_chained_handler(virq, axon_msi_cascade); irq_set_chained_handler(virq, axon_msi_cascade);
pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);

View File

@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void)
ppc_md.get_irq = beatic_get_irq; ppc_md.get_irq = beatic_get_irq;
/* Allocate an irq host */ /* Allocate an irq host */
beatic_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
&beatic_pic_host_ops,
0);
BUG_ON(beatic_host == NULL); BUG_ON(beatic_host == NULL);
irq_set_default_host(beatic_host); irq_set_default_host(beatic_host);
} }

View File

@ -378,8 +378,8 @@ static int __init setup_iic(void)
void __init iic_init_IRQ(void) void __init iic_init_IRQ(void)
{ {
/* Setup an irq host data structure */ /* Setup an irq host data structure */
iic_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_LINEAR, IIC_SOURCE_COUNT, iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
&iic_host_ops, IIC_IRQ_INVALID); NULL);
BUG_ON(iic_host == NULL); BUG_ON(iic_host == NULL);
irq_set_default_host(iic_host); irq_set_default_host(iic_host);

View File

@ -299,12 +299,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
panic("spider_pic: can't map registers !"); panic("spider_pic: can't map registers !");
/* Allocate a host */ /* Allocate a host */
pic->host = irq_alloc_host(of_node, IRQ_DOMAIN_MAP_LINEAR, pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
SPIDER_SRC_COUNT, &spider_host_ops, &spider_host_ops, pic);
SPIDER_IRQ_INVALID);
if (pic->host == NULL) if (pic->host == NULL)
panic("spider_pic: can't allocate irq host !"); panic("spider_pic: can't allocate irq host !");
pic->host->host_data = pic;
/* Go through all sources and disable them */ /* Go through all sources and disable them */
for (i = 0; i < SPIDER_SRC_COUNT; i++) { for (i = 0; i < SPIDER_SRC_COUNT; i++) {

View File

@ -159,15 +159,13 @@ struct irq_domain * __init flipper_pic_init(struct device_node *np)
__flipper_quiesce(io_base); __flipper_quiesce(io_base);
irq_domain = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, FLIPPER_NR_IRQS, irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
&flipper_irq_domain_ops, -1); &flipper_irq_domain_ops, io_base);
if (!irq_domain) { if (!irq_domain) {
pr_err("failed to allocate irq_domain\n"); pr_err("failed to allocate irq_domain\n");
return NULL; return NULL;
} }
irq_domain->host_data = io_base;
out: out:
return irq_domain; return irq_domain;
} }

View File

@ -177,13 +177,12 @@ struct irq_domain *hlwd_pic_init(struct device_node *np)
__hlwd_quiesce(io_base); __hlwd_quiesce(io_base);
irq_domain = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, HLWD_NR_IRQS, irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
&hlwd_irq_domain_ops, -1); &hlwd_irq_domain_ops, io_base);
if (!irq_domain) { if (!irq_domain) {
pr_err("failed to allocate irq_domain\n"); pr_err("failed to allocate irq_domain\n");
return NULL; return NULL;
} }
irq_domain->host_data = io_base;
return irq_domain; return irq_domain;
} }

View File

@ -380,8 +380,7 @@ void __init iSeries_init_IRQ(void)
/* Create irq host. No need for a revmap since HV will give us /* Create irq host. No need for a revmap since HV will give us
* back our virtual irq number * back our virtual irq number
*/ */
host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, host = irq_domain_add_nomap(NULL, &iseries_irq_domain_ops, NULL);
&iseries_irq_domain_ops, 0);
BUG_ON(host == NULL); BUG_ON(host == NULL);
irq_set_default_host(host); irq_set_default_host(host);

View File

@ -352,9 +352,8 @@ static void __init pmac_pic_probe_oldstyle(void)
/* /*
* Allocate an irq host * Allocate an irq host
*/ */
pmac_pic_host = irq_alloc_host(master, IRQ_DOMAIN_MAP_LINEAR, max_irqs, pmac_pic_host = irq_domain_add_linear(master, max_irqs,
&pmac_pic_host_ops, &pmac_pic_host_ops, NULL);
max_irqs);
BUG_ON(pmac_pic_host == NULL); BUG_ON(pmac_pic_host == NULL);
irq_set_default_host(pmac_pic_host); irq_set_default_host(pmac_pic_host);

View File

@ -192,8 +192,7 @@ static int psurge_secondary_ipi_init(void)
{ {
int rc = -ENOMEM; int rc = -ENOMEM;
psurge_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
&psurge_host_ops, 0);
if (psurge_host) if (psurge_host)
psurge_secondary_virq = irq_create_direct_mapping(psurge_host); psurge_secondary_virq = irq_create_direct_mapping(psurge_host);

View File

@ -753,8 +753,7 @@ void __init ps3_init_IRQ(void)
unsigned cpu; unsigned cpu;
struct irq_domain *host; struct irq_domain *host;
host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_NOMAP, 0, &ps3_host_ops, host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
PS3_INVALID_OUTLET);
irq_set_default_host(host); irq_set_default_host(host);
irq_set_virq_count(PS3_PLUG_MAX + 1); irq_set_virq_count(PS3_PLUG_MAX + 1);

View File

@ -263,13 +263,11 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
goto free_opb; goto free_opb;
} }
/* Allocate an irq host so that Linux knows that despite only /* Allocate an irq domain so that Linux knows that despite only
* having one interrupt to issue, we're the controller for multiple * having one interrupt to issue, we're the controller for multiple
* hardware IRQs, so later we can lookup their virtual IRQs. */ * hardware IRQs, so later we can lookup their virtual IRQs. */
opb->host = irq_alloc_host(dn, IRQ_DOMAIN_MAP_LINEAR, opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
OPB_NR_IRQS, &opb_host_ops, -1);
if (!opb->host) { if (!opb->host) {
printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
goto free_regs; goto free_regs;
@ -277,7 +275,6 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
opb->index = opb_index++; opb->index = opb_index++;
spin_lock_init(&opb->lock); spin_lock_init(&opb->lock);
opb->host->host_data = opb;
/* Disable all interrupts by default */ /* Disable all interrupts by default */
opb_out(opb, OPB_MLSASIER, 0); opb_out(opb, OPB_MLSASIER, 0);

View File

@ -164,8 +164,7 @@ unsigned int cpm_pic_init(void)
out_be32(&cpic_reg->cpic_cimr, 0); out_be32(&cpic_reg->cpic_cimr, 0);
cpm_pic_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
64, &cpm_pic_host_ops, 64);
if (cpm_pic_host == NULL) { if (cpm_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
sirq = NO_IRQ; sirq = NO_IRQ;

View File

@ -275,8 +275,7 @@ void cpm2_pic_init(struct device_node *node)
out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
/* create a legacy host */ /* create a legacy host */
cpm2_pic_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
64, &cpm2_pic_host_ops, 64);
if (cpm2_pic_host == NULL) { if (cpm2_pic_host == NULL) {
printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
return; return;

View File

@ -275,9 +275,8 @@ void __init ehv_pic_init(void)
return; return;
} }
ehv_pic->irqhost = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); &ehv_pic_host_ops, ehv_pic);
if (!ehv_pic->irqhost) { if (!ehv_pic->irqhost) {
of_node_put(np); of_node_put(np);
kfree(ehv_pic); kfree(ehv_pic);
@ -293,7 +292,6 @@ void __init ehv_pic_init(void)
of_node_put(np2); of_node_put(np2);
} }
ehv_pic->irqhost->host_data = ehv_pic;
ehv_pic->hc_irq = ehv_pic_irq_chip; ehv_pic->hc_irq = ehv_pic_irq_chip;
ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
ehv_pic->coreint_flag = coreint_flag; ehv_pic->coreint_flag = coreint_flag;

View File

@ -387,8 +387,8 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
} }
platform_set_drvdata(dev, msi); platform_set_drvdata(dev, msi);
msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_DOMAIN_MAP_LINEAR, msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
NR_MSI_IRQS, &fsl_msi_host_ops, 0); NR_MSI_IRQS, &fsl_msi_host_ops, msi);
if (msi->irqhost == NULL) { if (msi->irqhost == NULL) {
dev_err(&dev->dev, "No memory for MSI irqhost\n"); dev_err(&dev->dev, "No memory for MSI irqhost\n");
@ -420,8 +420,6 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
msi->feature = features->fsl_pic_ip; msi->feature = features->fsl_pic_ip;
msi->irqhost->host_data = msi;
/* /*
* Remember the phandle, so that we can match with any PCI nodes * Remember the phandle, so that we can match with any PCI nodes
* that have an "fsl,msi" property. * that have an "fsl,msi" property.

View File

@ -263,8 +263,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
raw_spin_unlock_irqrestore(&i8259_lock, flags); raw_spin_unlock_irqrestore(&i8259_lock, flags);
/* create a legacy host */ /* create a legacy host */
i8259_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LEGACY, i8259_host = irq_domain_add_legacy(node, &i8259_host_ops, NULL);
0, &i8259_host_ops, 0);
if (i8259_host == NULL) { if (i8259_host == NULL) {
printk(KERN_ERR "i8259: failed to allocate irq host !\n"); printk(KERN_ERR "i8259: failed to allocate irq host !\n");
return; return;

View File

@ -728,9 +728,8 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
if (ipic == NULL) if (ipic == NULL)
return NULL; return NULL;
ipic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
NR_IPIC_INTS, &ipic_host_ops, ipic);
&ipic_host_ops, 0);
if (ipic->irqhost == NULL) { if (ipic->irqhost == NULL) {
kfree(ipic); kfree(ipic);
return NULL; return NULL;
@ -738,8 +737,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
ipic->regs = ioremap(res.start, resource_size(&res)); ipic->regs = ioremap(res.start, resource_size(&res));
ipic->irqhost->host_data = ipic;
/* init hw */ /* init hw */
ipic_write(ipic->regs, IPIC_SICNR, 0x0); ipic_write(ipic->regs, IPIC_SICNR, 0x0);

View File

@ -171,8 +171,7 @@ int mpc8xx_pic_init(void)
goto out; goto out;
} }
mpc8xx_pic_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
64, &mpc8xx_pic_host_ops, 64);
if (mpc8xx_pic_host == NULL) { if (mpc8xx_pic_host == NULL) {
printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
ret = -ENOMEM; ret = -ENOMEM;

View File

@ -1345,10 +1345,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
mpic->isu_mask = (1 << mpic->isu_shift) - 1; mpic->isu_mask = (1 << mpic->isu_shift) - 1;
mpic->irqhost = irq_alloc_host(mpic->node, IRQ_DOMAIN_MAP_LINEAR, mpic->irqhost = irq_domain_add_linear(mpic->node,
isu_size ? isu_size : mpic->num_sources, isu_size ? isu_size : mpic->num_sources,
&mpic_host_ops, &mpic_host_ops, mpic);
flags & MPIC_LARGE_VECTORS ? 2048 : 256);
/* /*
* FIXME: The code leaks the MPIC object and mappings here; this * FIXME: The code leaks the MPIC object and mappings here; this
@ -1357,8 +1356,6 @@ struct mpic * __init mpic_alloc(struct device_node *node,
if (mpic->irqhost == NULL) if (mpic->irqhost == NULL)
return NULL; return NULL;
mpic->irqhost->host_data = mpic;
/* Display version */ /* Display version */
switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
case 1: case 1:

View File

@ -250,9 +250,8 @@ void __init mv64x60_init_irq(void)
paddr = of_translate_address(np, reg); paddr = of_translate_address(np, reg);
mv64x60_irq_reg_base = ioremap(paddr, reg[1]); mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
mv64x60_irq_host = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
MV64x60_NUM_IRQS, &mv64x60_host_ops, NULL);
&mv64x60_host_ops, MV64x60_NUM_IRQS);
spin_lock_irqsave(&mv64x60_lock, flags); spin_lock_irqsave(&mv64x60_lock, flags);
out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,

View File

@ -339,8 +339,8 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
if (qe_ic == NULL) if (qe_ic == NULL)
return; return;
qe_ic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
NR_QE_IC_INTS, &qe_ic_host_ops, 0); &qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) { if (qe_ic->irqhost == NULL) {
kfree(qe_ic); kfree(qe_ic);
return; return;
@ -348,7 +348,6 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
qe_ic->regs = ioremap(res.start, resource_size(&res)); qe_ic->regs = ioremap(res.start, resource_size(&res));
qe_ic->irqhost->host_data = qe_ic;
qe_ic->hc_irq = qe_ic_irq_chip; qe_ic->hc_irq = qe_ic_irq_chip;
qe_ic->virq_high = irq_of_parse_and_map(node, 0); qe_ic->virq_high = irq_of_parse_and_map(node, 0);

View File

@ -419,8 +419,7 @@ void __init tsi108_pci_int_init(struct device_node *node)
{ {
DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
pci_irq_host = irq_alloc_host(node, IRQ_DOMAIN_MAP_LEGACY, pci_irq_host = irq_domain_add_legacy(node, &pci_irq_domain_ops, NULL);
0, &pci_irq_domain_ops, 0);
if (pci_irq_host == NULL) { if (pci_irq_host == NULL) {
printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n"); printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
return; return;

View File

@ -270,13 +270,11 @@ static struct uic * __init uic_init_one(struct device_node *node)
} }
uic->dcrbase = *dcrreg; uic->dcrbase = *dcrreg;
uic->irqhost = irq_alloc_host(node, IRQ_DOMAIN_MAP_LINEAR, uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
NR_UIC_INTS, &uic_host_ops, -1); uic);
if (! uic->irqhost) if (! uic->irqhost)
return NULL; /* FIXME: panic? */ return NULL; /* FIXME: panic? */
uic->irqhost->host_data = uic;
/* Start with all interrupts disabled, level and non-critical */ /* Start with all interrupts disabled, level and non-critical */
mtdcr(uic->dcrbase + UIC_ER, 0); mtdcr(uic->dcrbase + UIC_ER, 0);
mtdcr(uic->dcrbase + UIC_CR, 0); mtdcr(uic->dcrbase + UIC_CR, 0);

View File

@ -374,8 +374,7 @@ static struct irq_domain_ops xics_host_ops = {
static void __init xics_init_host(void) static void __init xics_init_host(void)
{ {
xics_host = irq_alloc_host(NULL, IRQ_DOMAIN_MAP_TREE, 0, &xics_host_ops, xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
XICS_IRQ_SPURIOUS);
BUG_ON(xics_host == NULL); BUG_ON(xics_host == NULL);
irq_set_default_host(xics_host); irq_set_default_host(xics_host);
} }

View File

@ -201,11 +201,10 @@ xilinx_intc_init(struct device_node *np)
out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
/* Allocate and initialize an irq_domain structure. */ /* Allocate and initialize an irq_domain structure. */
irq = irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, XILINX_INTC_MAXIRQS, irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
&xilinx_intc_ops, -1); regs);
if (!irq) if (!irq)
panic(__FILE__ ": Cannot allocate IRQ host\n"); panic(__FILE__ ": Cannot allocate IRQ host\n");
irq->host_data = regs;
return irq; return irq;
} }

View File

@ -364,9 +364,8 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
if (hwirq == NO_IRQ) if (hwirq == NO_IRQ)
goto skip_irq; goto skip_irq;
mpc8xxx_gc->irq = mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
irq_alloc_host(np, IRQ_DOMAIN_MAP_LINEAR, MPC8XXX_GPIO_PINS, &mpc8xxx_gpio_irq_ops, mpc8xxx_gc);
&mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS);
if (!mpc8xxx_gc->irq) if (!mpc8xxx_gc->irq)
goto skip_irq; goto skip_irq;
@ -374,8 +373,6 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
if (id) if (id)
mpc8xxx_gc->of_dev_id_data = id->data; mpc8xxx_gc->of_dev_id_data = id->data;
mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
/* ack and mask all irqs */ /* ack and mask all irqs */
out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
out_be32(mm_gc->regs + GPIO_IMR, 0); out_be32(mm_gc->regs + GPIO_IMR, 0);

View File

@ -95,10 +95,6 @@ struct irq_domain {
/* type of reverse mapping_technique */ /* type of reverse mapping_technique */
unsigned int revmap_type; unsigned int revmap_type;
#define IRQ_DOMAIN_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
union { union {
struct { struct {
unsigned int size; unsigned int size;
@ -120,11 +116,21 @@ struct irq_domain {
#ifdef CONFIG_IRQ_DOMAIN #ifdef CONFIG_IRQ_DOMAIN
#ifdef CONFIG_PPC #ifdef CONFIG_PPC
extern struct irq_domain *irq_alloc_host(struct device_node *of_node, struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int revmap_type, struct irq_domain_ops *ops,
unsigned int revmap_arg, void *host_data);
struct irq_domain_ops *ops, struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
irq_hw_number_t inval_irq); unsigned int size,
struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
struct irq_domain_ops *ops,
void *host_data);
extern struct irq_domain *irq_find_host(struct device_node *node); extern struct irq_domain *irq_find_host(struct device_node *node);
extern void irq_set_default_host(struct irq_domain *host); extern void irq_set_default_host(struct irq_domain *host);
extern void irq_set_virq_count(unsigned int count); extern void irq_set_virq_count(unsigned int count);

View File

@ -13,6 +13,11 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/fs.h> #include <linux/fs.h>
#define IRQ_DOMAIN_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
static LIST_HEAD(irq_domain_list); static LIST_HEAD(irq_domain_list);
static DEFINE_MUTEX(irq_domain_mutex); static DEFINE_MUTEX(irq_domain_mutex);
@ -27,100 +32,158 @@ static int default_irq_domain_match(struct irq_domain *d, struct device_node *np
} }
/** /**
* irq_alloc_host() - Allocate a new irq_domain data structure * irq_domain_alloc() - Allocate a new irq_domain data structure
* @of_node: optional device-tree node of the interrupt controller * @of_node: optional device-tree node of the interrupt controller
* @revmap_type: type of reverse mapping to use * @revmap_type: type of reverse mapping to use
* @revmap_arg: for IRQ_DOMAIN_MAP_LINEAR linear only: size of the map
* @ops: map/unmap domain callbacks * @ops: map/unmap domain callbacks
* @inval_irq: provide a hw number in that domain space that is always invalid * @host_data: Controller private data pointer
* *
* Allocates and initialize and irq_domain structure. Note that in the case of * Allocates and initialize and irq_domain structure. Caller is expected to
* IRQ_DOMAIN_MAP_LEGACY, the map() callback will be called before this returns * register allocated irq_domain with irq_domain_register(). Returns pointer
* for all legacy interrupts except 0 (which is always the invalid irq for * to IRQ domain, or NULL on failure.
* a legacy controller). For a IRQ_DOMAIN_MAP_LINEAR, the map is allocated by
* this call as well. For a IRQ_DOMAIN_MAP_TREE, the radix tree will be
* allocated later during boot automatically (the reverse mapping will use the
* slow path until that happens).
*/ */
struct irq_domain *irq_alloc_host(struct device_node *of_node, static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
unsigned int revmap_type, unsigned int revmap_type,
unsigned int revmap_arg, struct irq_domain_ops *ops,
struct irq_domain_ops *ops, void *host_data)
irq_hw_number_t inval_irq)
{ {
struct irq_domain *domain, *h; struct irq_domain *domain;
unsigned int size = sizeof(struct irq_domain);
unsigned int i;
unsigned int *rmap;
/* Allocate structure and revmap table if using linear mapping */ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (revmap_type == IRQ_DOMAIN_MAP_LINEAR) if (WARN_ON(!domain))
size += revmap_arg * sizeof(unsigned int);
domain = kzalloc(size, GFP_KERNEL);
if (domain == NULL)
return NULL; return NULL;
/* Fill structure */ /* Fill structure */
domain->revmap_type = revmap_type; domain->revmap_type = revmap_type;
domain->inval_irq = inval_irq;
domain->ops = ops; domain->ops = ops;
domain->host_data = host_data;
domain->of_node = of_node_get(of_node); domain->of_node = of_node_get(of_node);
if (domain->ops->match == NULL) if (domain->ops->match == NULL)
domain->ops->match = default_irq_domain_match; domain->ops->match = default_irq_domain_match;
return domain;
}
static void irq_domain_add(struct irq_domain *domain)
{
mutex_lock(&irq_domain_mutex);
list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex);
pr_debug("irq: Allocated domain of type %d @0x%p\n",
domain->revmap_type, domain);
}
/**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
* Note: the map() callback will be called before this function returns
* for all legacy interrupts except 0 (which is always the invalid irq for
* a legacy controller).
*/
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain, *h;
unsigned int i;
domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
if (!domain)
return NULL;
mutex_lock(&irq_domain_mutex); mutex_lock(&irq_domain_mutex);
/* Make sure only one legacy controller can be created */ /* Make sure only one legacy controller can be created */
if (revmap_type == IRQ_DOMAIN_MAP_LEGACY) { list_for_each_entry(h, &irq_domain_list, link) {
list_for_each_entry(h, &irq_domain_list, link) { if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) {
if (WARN_ON(h->revmap_type == IRQ_DOMAIN_MAP_LEGACY)) { mutex_unlock(&irq_domain_mutex);
mutex_unlock(&irq_domain_mutex); of_node_put(domain->of_node);
of_node_put(domain->of_node); kfree(domain);
kfree(domain); return NULL;
return NULL;
}
} }
} }
list_add(&domain->link, &irq_domain_list); list_add(&domain->link, &irq_domain_list);
mutex_unlock(&irq_domain_mutex); mutex_unlock(&irq_domain_mutex);
/* Additional setups per revmap type */ /* setup us as the domain for all legacy interrupts */
switch(revmap_type) { for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
case IRQ_DOMAIN_MAP_LEGACY: struct irq_data *irq_data = irq_get_irq_data(i);
/* 0 is always the invalid number for legacy */ irq_data->hwirq = i;
domain->inval_irq = 0; irq_data->domain = domain;
/* setup us as the domain for all legacy interrupts */
for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
struct irq_data *irq_data = irq_get_irq_data(i);
irq_data->hwirq = i;
irq_data->domain = domain;
/* Legacy flags are left to default at this point, /* Legacy flags are left to default at this point,
* one can then use irq_create_mapping() to * one can then use irq_create_mapping() to
* explicitly change them * explicitly change them
*/ */
ops->map(domain, i, i); ops->map(domain, i, i);
/* Clear norequest flags */ /* Clear norequest flags */
irq_clear_status_flags(i, IRQ_NOREQUEST); irq_clear_status_flags(i, IRQ_NOREQUEST);
}
break;
case IRQ_DOMAIN_MAP_LINEAR:
rmap = (unsigned int *)(domain + 1);
for (i = 0; i < revmap_arg; i++)
rmap[i] = 0;
domain->revmap_data.linear.size = revmap_arg;
domain->revmap_data.linear.revmap = rmap;
break;
case IRQ_DOMAIN_MAP_TREE:
INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
break;
default:
break;
} }
return domain;
}
pr_debug("irq: Allocated domain of type %d @0x%p\n", revmap_type, domain); /**
* irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*/
struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
unsigned int size,
struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain;
unsigned int *revmap;
revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
if (WARN_ON(!revmap))
return NULL;
domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
if (!domain) {
kfree(revmap);
return NULL;
}
domain->revmap_data.linear.size = size;
domain->revmap_data.linear.revmap = revmap;
irq_domain_add(domain);
return domain;
}
struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
if (domain)
irq_domain_add(domain);
return domain;
}
/**
* irq_domain_add_tree()
* @of_node: pointer to interrupt controller's device tree node.
* @ops: map/unmap domain callbacks
*
* Note: The radix tree will be allocated later during boot automatically
* (the reverse mapping will use the slow path until that happens).
*/
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_TREE, ops, host_data);
if (domain) {
INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
irq_domain_add(domain);
}
return domain; return domain;
} }
@ -393,9 +456,6 @@ void irq_dispose_mapping(unsigned int virq)
break; break;
} }
/* Destroy map */
irq_data->hwirq = domain->inval_irq;
irq_free_desc(virq); irq_free_desc(virq);
} }
EXPORT_SYMBOL_GPL(irq_dispose_mapping); EXPORT_SYMBOL_GPL(irq_dispose_mapping);