mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 01:34:14 +08:00
x86: coding style fixes to arch/x86/kernel/io_apic_32.c
Before: total: 91 errors, 73 warnings, 2850 lines checked After: total: 1 errors, 47 warnings, 2848 lines checked Compile tested: paolo@paolo-desktop:/tmp$ size io* text data bss dec hex filename 13836 1756 11104 26696 6848 io_apic_32.o.after 13836 1756 11104 26696 6848 io_apic_32.o.before Signed-off-by: Paolo Ciarrocchi <paolo.ciarrocchi@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
46b3b4ef1e
commit
360624484c
@ -239,7 +239,7 @@ static void __init replace_pin_at_irq(unsigned int irq,
|
||||
}
|
||||
}
|
||||
|
||||
static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
|
||||
static void __modify_IO_APIC_irq(unsigned int irq, unsigned long enable, unsigned long disable)
|
||||
{
|
||||
struct irq_pin_list *entry = irq_2_pin + irq;
|
||||
unsigned int pin, reg;
|
||||
@ -259,32 +259,32 @@ static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsign
|
||||
}
|
||||
|
||||
/* mask = 1 */
|
||||
static void __mask_IO_APIC_irq (unsigned int irq)
|
||||
static void __mask_IO_APIC_irq(unsigned int irq)
|
||||
{
|
||||
__modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED, 0);
|
||||
}
|
||||
|
||||
/* mask = 0 */
|
||||
static void __unmask_IO_APIC_irq (unsigned int irq)
|
||||
static void __unmask_IO_APIC_irq(unsigned int irq)
|
||||
{
|
||||
__modify_IO_APIC_irq(irq, 0, IO_APIC_REDIR_MASKED);
|
||||
}
|
||||
|
||||
/* mask = 1, trigger = 0 */
|
||||
static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
|
||||
static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
|
||||
{
|
||||
__modify_IO_APIC_irq(irq, IO_APIC_REDIR_MASKED,
|
||||
IO_APIC_REDIR_LEVEL_TRIGGER);
|
||||
}
|
||||
|
||||
/* mask = 0, trigger = 1 */
|
||||
static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
|
||||
static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
|
||||
{
|
||||
__modify_IO_APIC_irq(irq, IO_APIC_REDIR_LEVEL_TRIGGER,
|
||||
IO_APIC_REDIR_MASKED);
|
||||
}
|
||||
|
||||
static void mask_IO_APIC_irq (unsigned int irq)
|
||||
static void mask_IO_APIC_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -293,7 +293,7 @@ static void mask_IO_APIC_irq (unsigned int irq)
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
}
|
||||
|
||||
static void unmask_IO_APIC_irq (unsigned int irq)
|
||||
static void unmask_IO_APIC_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -305,7 +305,7 @@ static void unmask_IO_APIC_irq (unsigned int irq)
|
||||
static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
|
||||
{
|
||||
struct IO_APIC_route_entry entry;
|
||||
|
||||
|
||||
/* Check delivery_mode to be sure we're not clearing an SMI pin */
|
||||
entry = ioapic_read_entry(apic, pin);
|
||||
if (entry.delivery_mode == dest_SMI)
|
||||
@ -317,7 +317,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
|
||||
ioapic_mask_entry(apic, pin);
|
||||
}
|
||||
|
||||
static void clear_IO_APIC (void)
|
||||
static void clear_IO_APIC(void)
|
||||
{
|
||||
int apic, pin;
|
||||
|
||||
@ -334,7 +334,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
|
||||
struct irq_pin_list *entry = irq_2_pin + irq;
|
||||
unsigned int apicid_value;
|
||||
cpumask_t tmp;
|
||||
|
||||
|
||||
cpus_and(tmp, cpumask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
tmp = TARGET_CPUS;
|
||||
@ -363,7 +363,7 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
|
||||
# include <linux/kernel_stat.h> /* kstat */
|
||||
# include <linux/slab.h> /* kmalloc() */
|
||||
# include <linux/timer.h>
|
||||
|
||||
|
||||
#define IRQBALANCE_CHECK_ARCH -999
|
||||
#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
|
||||
#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
|
||||
@ -375,14 +375,14 @@ static int physical_balance __read_mostly;
|
||||
static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
|
||||
|
||||
static struct irq_cpu_info {
|
||||
unsigned long * last_irq;
|
||||
unsigned long * irq_delta;
|
||||
unsigned long *last_irq;
|
||||
unsigned long *irq_delta;
|
||||
unsigned long irq;
|
||||
} irq_cpu_data[NR_CPUS];
|
||||
|
||||
#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
|
||||
#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
|
||||
#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
|
||||
#define LAST_CPU_IRQ(cpu, irq) (irq_cpu_data[cpu].last_irq[irq])
|
||||
#define IRQ_DELTA(cpu, irq) (irq_cpu_data[cpu].irq_delta[irq])
|
||||
|
||||
#define IDLE_ENOUGH(cpu,now) \
|
||||
(idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
|
||||
@ -421,8 +421,8 @@ inside:
|
||||
if (cpu == -1)
|
||||
cpu = NR_CPUS-1;
|
||||
}
|
||||
} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
|
||||
(search_idle && !IDLE_ENOUGH(cpu,now)));
|
||||
} while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu, allowed_mask) ||
|
||||
(search_idle && !IDLE_ENOUGH(cpu, now)));
|
||||
|
||||
return cpu;
|
||||
}
|
||||
@ -432,15 +432,14 @@ static inline void balance_irq(int cpu, int irq)
|
||||
unsigned long now = jiffies;
|
||||
cpumask_t allowed_mask;
|
||||
unsigned int new_cpu;
|
||||
|
||||
|
||||
if (irqbalance_disabled)
|
||||
return;
|
||||
return;
|
||||
|
||||
cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
|
||||
new_cpu = move(cpu, allowed_mask, now, 1);
|
||||
if (cpu != new_cpu) {
|
||||
if (cpu != new_cpu)
|
||||
set_pending_irq(irq, cpumask_of_cpu(new_cpu));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
|
||||
@ -452,14 +451,14 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
|
||||
if (!irq_desc[j].action)
|
||||
continue;
|
||||
/* Is it a significant load ? */
|
||||
if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
|
||||
if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i), j) <
|
||||
useful_load_threshold)
|
||||
continue;
|
||||
balance_irq(i, j);
|
||||
}
|
||||
}
|
||||
balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
|
||||
balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
|
||||
balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -488,22 +487,22 @@ static void do_irq_balance(void)
|
||||
/* Is this an active IRQ or balancing disabled ? */
|
||||
if (!irq_desc[j].action || irq_balancing_disabled(j))
|
||||
continue;
|
||||
if ( package_index == i )
|
||||
IRQ_DELTA(package_index,j) = 0;
|
||||
if (package_index == i)
|
||||
IRQ_DELTA(package_index, j) = 0;
|
||||
/* Determine the total count per processor per IRQ */
|
||||
value_now = (unsigned long) kstat_cpu(i).irqs[j];
|
||||
|
||||
/* Determine the activity per processor per IRQ */
|
||||
delta = value_now - LAST_CPU_IRQ(i,j);
|
||||
delta = value_now - LAST_CPU_IRQ(i, j);
|
||||
|
||||
/* Update last_cpu_irq[][] for the next time */
|
||||
LAST_CPU_IRQ(i,j) = value_now;
|
||||
LAST_CPU_IRQ(i, j) = value_now;
|
||||
|
||||
/* Ignore IRQs whose rate is less than the clock */
|
||||
if (delta < useful_load_threshold)
|
||||
continue;
|
||||
/* update the load for the processor or package total */
|
||||
IRQ_DELTA(package_index,j) += delta;
|
||||
IRQ_DELTA(package_index, j) += delta;
|
||||
|
||||
/* Keep track of the higher numbered sibling as well */
|
||||
if (i != package_index)
|
||||
@ -529,7 +528,8 @@ static void do_irq_balance(void)
|
||||
max_cpu_irq = ULONG_MAX;
|
||||
|
||||
tryanothercpu:
|
||||
/* Look for heaviest loaded processor.
|
||||
/*
|
||||
* Look for heaviest loaded processor.
|
||||
* We may come back to get the next heaviest loaded processor.
|
||||
* Skip processors with trivial loads.
|
||||
*/
|
||||
@ -538,7 +538,7 @@ tryanothercpu:
|
||||
for_each_online_cpu(i) {
|
||||
if (i != CPU_TO_PACKAGEINDEX(i))
|
||||
continue;
|
||||
if (max_cpu_irq <= CPU_IRQ(i))
|
||||
if (max_cpu_irq <= CPU_IRQ(i))
|
||||
continue;
|
||||
if (tmp_cpu_irq < CPU_IRQ(i)) {
|
||||
tmp_cpu_irq = CPU_IRQ(i);
|
||||
@ -547,8 +547,9 @@ tryanothercpu:
|
||||
}
|
||||
|
||||
if (tmp_loaded == -1) {
|
||||
/* In the case of small number of heavy interrupt sources,
|
||||
* loading some of the cpus too much. We use Ingo's original
|
||||
/*
|
||||
* In the case of small number of heavy interrupt sources,
|
||||
* loading some of the cpus too much. We use Ingo's original
|
||||
* approach to rotate them around.
|
||||
*/
|
||||
if (!first_attempt && imbalance >= useful_load_threshold) {
|
||||
@ -557,13 +558,14 @@ tryanothercpu:
|
||||
}
|
||||
goto not_worth_the_effort;
|
||||
}
|
||||
|
||||
|
||||
first_attempt = 0; /* heaviest search */
|
||||
max_cpu_irq = tmp_cpu_irq; /* load */
|
||||
max_loaded = tmp_loaded; /* processor */
|
||||
imbalance = (max_cpu_irq - min_cpu_irq) / 2;
|
||||
|
||||
/* if imbalance is less than approx 10% of max load, then
|
||||
|
||||
/*
|
||||
* if imbalance is less than approx 10% of max load, then
|
||||
* observe diminishing returns action. - quit
|
||||
*/
|
||||
if (imbalance < (max_cpu_irq >> 3))
|
||||
@ -579,26 +581,25 @@ tryanotherirq:
|
||||
/* Is this an active IRQ? */
|
||||
if (!irq_desc[j].action)
|
||||
continue;
|
||||
if (imbalance <= IRQ_DELTA(max_loaded,j))
|
||||
if (imbalance <= IRQ_DELTA(max_loaded, j))
|
||||
continue;
|
||||
/* Try to find the IRQ that is closest to the imbalance
|
||||
* without going over.
|
||||
*/
|
||||
if (move_this_load < IRQ_DELTA(max_loaded,j)) {
|
||||
move_this_load = IRQ_DELTA(max_loaded,j);
|
||||
if (move_this_load < IRQ_DELTA(max_loaded, j)) {
|
||||
move_this_load = IRQ_DELTA(max_loaded, j);
|
||||
selected_irq = j;
|
||||
}
|
||||
}
|
||||
if (selected_irq == -1) {
|
||||
if (selected_irq == -1)
|
||||
goto tryanothercpu;
|
||||
}
|
||||
|
||||
imbalance = move_this_load;
|
||||
|
||||
|
||||
/* For physical_balance case, we accumulated both load
|
||||
* values in the one of the siblings cpu_irq[],
|
||||
* to use the same code for physical and logical processors
|
||||
* as much as possible.
|
||||
* as much as possible.
|
||||
*
|
||||
* NOTE: the cpu_irq[] array holds the sum of the load for
|
||||
* sibling A and sibling B in the slot for the lowest numbered
|
||||
@ -627,11 +628,11 @@ tryanotherirq:
|
||||
/* mark for change destination */
|
||||
set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
|
||||
|
||||
/* Since we made a change, come back sooner to
|
||||
/* Since we made a change, come back sooner to
|
||||
* check for more variation.
|
||||
*/
|
||||
balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
|
||||
balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
|
||||
balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
|
||||
return;
|
||||
}
|
||||
goto tryanotherirq;
|
||||
@ -642,7 +643,7 @@ not_worth_the_effort:
|
||||
* upward
|
||||
*/
|
||||
balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
|
||||
balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
|
||||
balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -681,13 +682,13 @@ static int __init balanced_irq_init(void)
|
||||
cpumask_t tmp;
|
||||
|
||||
cpus_shift_right(tmp, cpu_online_map, 2);
|
||||
c = &boot_cpu_data;
|
||||
c = &boot_cpu_data;
|
||||
/* When not overwritten by the command line ask subarchitecture. */
|
||||
if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
|
||||
irqbalance_disabled = NO_BALANCE_IRQ;
|
||||
if (irqbalance_disabled)
|
||||
return 0;
|
||||
|
||||
|
||||
/* disable irqbalance completely if there is only one processor online */
|
||||
if (num_online_cpus() < 2) {
|
||||
irqbalance_disabled = 1;
|
||||
@ -707,10 +708,10 @@ static int __init balanced_irq_init(void)
|
||||
printk(KERN_ERR "balanced_irq_init: out of memory");
|
||||
goto failed;
|
||||
}
|
||||
memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
|
||||
memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
|
||||
memset(irq_cpu_data[i].irq_delta, 0, sizeof(unsigned long) * NR_IRQS);
|
||||
memset(irq_cpu_data[i].last_irq, 0, sizeof(unsigned long) * NR_IRQS);
|
||||
}
|
||||
|
||||
|
||||
printk(KERN_INFO "Starting balanced_irq\n");
|
||||
if (!IS_ERR(kthread_run(balanced_irq, NULL, "kirqd")))
|
||||
return 0;
|
||||
@ -845,7 +846,7 @@ static int __init find_isa_irq_apic(int irq, int type)
|
||||
}
|
||||
if (i < mp_irq_entries) {
|
||||
int apic;
|
||||
for(apic = 0; apic < nr_ioapics; apic++) {
|
||||
for (apic = 0; apic < nr_ioapics; apic++) {
|
||||
if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
|
||||
return apic;
|
||||
}
|
||||
@ -882,7 +883,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
|
||||
!mp_irqs[i].mpc_irqtype &&
|
||||
(bus == lbus) &&
|
||||
(slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
|
||||
int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
|
||||
int irq = pin_2_irq(i, apic, mp_irqs[i].mpc_dstirq);
|
||||
|
||||
if (!(apic || IO_APIC_IRQ(irq)))
|
||||
continue;
|
||||
@ -902,7 +903,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
|
||||
EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
|
||||
|
||||
/*
|
||||
* This function currently is only a helper for the i386 smp boot process where
|
||||
* This function currently is only a helper for the i386 smp boot process where
|
||||
* we need to reprogram the ioredtbls to cater for the cpus which have come online
|
||||
* so mask in all cases should simply be TARGET_CPUS
|
||||
*/
|
||||
@ -977,37 +978,36 @@ static int MPBIOS_polarity(int idx)
|
||||
/*
|
||||
* Determine IRQ line polarity (high active or low active):
|
||||
*/
|
||||
switch (mp_irqs[idx].mpc_irqflag & 3)
|
||||
switch (mp_irqs[idx].mpc_irqflag & 3) {
|
||||
case 0: /* conforms, ie. bus-type dependent polarity */
|
||||
{
|
||||
case 0: /* conforms, ie. bus-type dependent polarity */
|
||||
{
|
||||
polarity = test_bit(bus, mp_bus_not_pci)?
|
||||
default_ISA_polarity(idx):
|
||||
default_PCI_polarity(idx);
|
||||
break;
|
||||
}
|
||||
case 1: /* high active */
|
||||
{
|
||||
polarity = 0;
|
||||
break;
|
||||
}
|
||||
case 2: /* reserved */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
polarity = 1;
|
||||
break;
|
||||
}
|
||||
case 3: /* low active */
|
||||
{
|
||||
polarity = 1;
|
||||
break;
|
||||
}
|
||||
default: /* invalid */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
polarity = 1;
|
||||
break;
|
||||
}
|
||||
polarity = test_bit(bus, mp_bus_not_pci)?
|
||||
default_ISA_polarity(idx):
|
||||
default_PCI_polarity(idx);
|
||||
break;
|
||||
}
|
||||
case 1: /* high active */
|
||||
{
|
||||
polarity = 0;
|
||||
break;
|
||||
}
|
||||
case 2: /* reserved */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
polarity = 1;
|
||||
break;
|
||||
}
|
||||
case 3: /* low active */
|
||||
{
|
||||
polarity = 1;
|
||||
break;
|
||||
}
|
||||
default: /* invalid */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
polarity = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return polarity;
|
||||
}
|
||||
@ -1020,68 +1020,66 @@ static int MPBIOS_trigger(int idx)
|
||||
/*
|
||||
* Determine IRQ trigger mode (edge or level sensitive):
|
||||
*/
|
||||
switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
|
||||
switch ((mp_irqs[idx].mpc_irqflag>>2) & 3) {
|
||||
case 0: /* conforms, ie. bus-type dependent */
|
||||
{
|
||||
case 0: /* conforms, ie. bus-type dependent */
|
||||
{
|
||||
trigger = test_bit(bus, mp_bus_not_pci)?
|
||||
default_ISA_trigger(idx):
|
||||
default_PCI_trigger(idx);
|
||||
trigger = test_bit(bus, mp_bus_not_pci)?
|
||||
default_ISA_trigger(idx):
|
||||
default_PCI_trigger(idx);
|
||||
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
|
||||
switch (mp_bus_id_to_type[bus])
|
||||
{
|
||||
case MP_BUS_ISA: /* ISA pin */
|
||||
{
|
||||
/* set before the switch */
|
||||
break;
|
||||
}
|
||||
case MP_BUS_EISA: /* EISA pin */
|
||||
{
|
||||
trigger = default_EISA_trigger(idx);
|
||||
break;
|
||||
}
|
||||
case MP_BUS_PCI: /* PCI pin */
|
||||
{
|
||||
/* set before the switch */
|
||||
break;
|
||||
}
|
||||
case MP_BUS_MCA: /* MCA pin */
|
||||
{
|
||||
trigger = default_MCA_trigger(idx);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
trigger = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
switch (mp_bus_id_to_type[bus]) {
|
||||
case MP_BUS_ISA: /* ISA pin */
|
||||
{
|
||||
/* set before the switch */
|
||||
break;
|
||||
}
|
||||
case MP_BUS_EISA: /* EISA pin */
|
||||
{
|
||||
trigger = default_EISA_trigger(idx);
|
||||
break;
|
||||
}
|
||||
case MP_BUS_PCI: /* PCI pin */
|
||||
{
|
||||
/* set before the switch */
|
||||
break;
|
||||
}
|
||||
case MP_BUS_MCA: /* MCA pin */
|
||||
{
|
||||
trigger = default_MCA_trigger(idx);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
trigger = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
case 1: /* edge */
|
||||
{
|
||||
trigger = 0;
|
||||
break;
|
||||
}
|
||||
case 2: /* reserved */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
trigger = 1;
|
||||
break;
|
||||
}
|
||||
case 3: /* level */
|
||||
{
|
||||
trigger = 1;
|
||||
break;
|
||||
}
|
||||
default: /* invalid */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
trigger = 0;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 1: /* edge */
|
||||
{
|
||||
trigger = 0;
|
||||
break;
|
||||
}
|
||||
case 2: /* reserved */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
trigger = 1;
|
||||
break;
|
||||
}
|
||||
case 3: /* level */
|
||||
{
|
||||
trigger = 1;
|
||||
break;
|
||||
}
|
||||
default: /* invalid */
|
||||
{
|
||||
printk(KERN_WARNING "broken BIOS!!\n");
|
||||
trigger = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return trigger;
|
||||
}
|
||||
@ -1150,8 +1148,8 @@ static inline int IO_APIC_irq_trigger(int irq)
|
||||
|
||||
for (apic = 0; apic < nr_ioapics; apic++) {
|
||||
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
|
||||
idx = find_irq_entry(apic,pin,mp_INT);
|
||||
if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
|
||||
idx = find_irq_entry(apic, pin, mp_INT);
|
||||
if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
|
||||
return irq_trigger(idx);
|
||||
}
|
||||
}
|
||||
@ -1166,7 +1164,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
|
||||
|
||||
static int __assign_irq_vector(int irq)
|
||||
{
|
||||
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
|
||||
static int current_vector = FIRST_DEVICE_VECTOR, current_offset;
|
||||
int vector, offset;
|
||||
|
||||
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
|
||||
@ -1239,15 +1237,15 @@ static void __init setup_IO_APIC_irqs(void)
|
||||
/*
|
||||
* add it to the IO-APIC irq-routing table:
|
||||
*/
|
||||
memset(&entry,0,sizeof(entry));
|
||||
memset(&entry, 0, sizeof(entry));
|
||||
|
||||
entry.delivery_mode = INT_DELIVERY_MODE;
|
||||
entry.dest_mode = INT_DEST_MODE;
|
||||
entry.mask = 0; /* enable IRQ */
|
||||
entry.dest.logical.logical_dest =
|
||||
entry.dest.logical.logical_dest =
|
||||
cpu_mask_to_apicid(TARGET_CPUS);
|
||||
|
||||
idx = find_irq_entry(apic,pin,mp_INT);
|
||||
idx = find_irq_entry(apic, pin, mp_INT);
|
||||
if (idx == -1) {
|
||||
if (first_notcon) {
|
||||
apic_printk(APIC_VERBOSE, KERN_DEBUG
|
||||
@ -1291,7 +1289,7 @@ static void __init setup_IO_APIC_irqs(void)
|
||||
vector = assign_irq_vector(irq);
|
||||
entry.vector = vector;
|
||||
ioapic_register_intr(irq, vector, IOAPIC_AUTO);
|
||||
|
||||
|
||||
if (!apic && (irq < 16))
|
||||
disable_8259A_irq(irq);
|
||||
}
|
||||
@ -1311,7 +1309,7 @@ static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
|
||||
{
|
||||
struct IO_APIC_route_entry entry;
|
||||
|
||||
memset(&entry,0,sizeof(entry));
|
||||
memset(&entry, 0, sizeof(entry));
|
||||
|
||||
/*
|
||||
* We use logical delivery to get the timer IRQ
|
||||
@ -1349,7 +1347,7 @@ void __init print_IO_APIC(void)
|
||||
if (apic_verbosity == APIC_QUIET)
|
||||
return;
|
||||
|
||||
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
|
||||
printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
|
||||
for (i = 0; i < nr_ioapics; i++)
|
||||
printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
|
||||
mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
|
||||
@ -1454,7 +1452,7 @@ void __init print_IO_APIC(void)
|
||||
|
||||
#if 0
|
||||
|
||||
static void print_APIC_bitfield (int base)
|
||||
static void print_APIC_bitfield(int base)
|
||||
{
|
||||
unsigned int v;
|
||||
int i, j;
|
||||
@ -1475,7 +1473,7 @@ static void print_APIC_bitfield (int base)
|
||||
}
|
||||
}
|
||||
|
||||
void /*__init*/ print_local_APIC(void * dummy)
|
||||
void /*__init*/ print_local_APIC(void *dummy)
|
||||
{
|
||||
unsigned int v, ver, maxlvt;
|
||||
|
||||
@ -1558,7 +1556,7 @@ void /*__init*/ print_local_APIC(void * dummy)
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
void print_all_local_APICs (void)
|
||||
void print_all_local_APICs(void)
|
||||
{
|
||||
on_each_cpu(print_local_APIC, NULL, 1, 1);
|
||||
}
|
||||
@ -1581,11 +1579,11 @@ void /*__init*/ print_PIC(void)
|
||||
v = inb(0xa0) << 8 | inb(0x20);
|
||||
printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
|
||||
|
||||
outb(0x0b,0xa0);
|
||||
outb(0x0b,0x20);
|
||||
outb(0x0b, 0xa0);
|
||||
outb(0x0b, 0x20);
|
||||
v = inb(0xa0) << 8 | inb(0x20);
|
||||
outb(0x0a,0xa0);
|
||||
outb(0x0a,0x20);
|
||||
outb(0x0a, 0xa0);
|
||||
outb(0x0a, 0x20);
|
||||
|
||||
spin_unlock_irqrestore(&i8259A_lock, flags);
|
||||
|
||||
@ -1621,7 +1619,7 @@ static void __init enable_IO_APIC(void)
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
nr_ioapic_registers[apic] = reg_01.bits.entries+1;
|
||||
}
|
||||
for(apic = 0; apic < nr_ioapics; apic++) {
|
||||
for (apic = 0; apic < nr_ioapics; apic++) {
|
||||
int pin;
|
||||
/* See if any of the pins is in ExtINT mode */
|
||||
for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
|
||||
@ -1743,7 +1741,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
|
||||
spin_lock_irqsave(&ioapic_lock, flags);
|
||||
reg_00.raw = io_apic_read(apic, 0);
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
|
||||
|
||||
old_id = mp_ioapics[apic].mpc_apicid;
|
||||
|
||||
if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
|
||||
@ -1795,7 +1793,7 @@ static void __init setup_ioapic_ids_from_mpc(void)
|
||||
/*
|
||||
* Read the right value from the MPC table and
|
||||
* write it into the ID register.
|
||||
*/
|
||||
*/
|
||||
apic_printk(APIC_VERBOSE, KERN_INFO
|
||||
"...changing IO-APIC physical APIC ID to %d ...",
|
||||
mp_ioapics[apic].mpc_apicid);
|
||||
@ -2015,7 +2013,7 @@ static void ack_apic(unsigned int irq)
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
static void mask_lapic_irq (unsigned int irq)
|
||||
static void mask_lapic_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long v;
|
||||
|
||||
@ -2023,7 +2021,7 @@ static void mask_lapic_irq (unsigned int irq)
|
||||
apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
|
||||
}
|
||||
|
||||
static void unmask_lapic_irq (unsigned int irq)
|
||||
static void unmask_lapic_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long v;
|
||||
|
||||
@ -2041,14 +2039,14 @@ static struct irq_chip lapic_chip __read_mostly = {
|
||||
static void __init setup_nmi(void)
|
||||
{
|
||||
/*
|
||||
* Dirty trick to enable the NMI watchdog ...
|
||||
* Dirty trick to enable the NMI watchdog ...
|
||||
* We put the 8259A master into AEOI mode and
|
||||
* unmask on all local APICs LVT0 as NMI.
|
||||
*
|
||||
* The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
|
||||
* is from Maciej W. Rozycki - so we do not have to EOI from
|
||||
* the NMI handler or the timer interrupt.
|
||||
*/
|
||||
*/
|
||||
apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
|
||||
|
||||
enable_NMI_through_LVT0();
|
||||
@ -2312,10 +2310,10 @@ void __init setup_IO_APIC(void)
|
||||
* Called after all the initialization is done. If we didnt find any
|
||||
* APIC bugs then we can allow the modify fast path
|
||||
*/
|
||||
|
||||
|
||||
static int __init io_apic_bug_finalize(void)
|
||||
{
|
||||
if(sis_apic_bug == -1)
|
||||
if (sis_apic_bug == -1)
|
||||
sis_apic_bug = 0;
|
||||
return 0;
|
||||
}
|
||||
@ -2326,17 +2324,17 @@ struct sysfs_ioapic_data {
|
||||
struct sys_device dev;
|
||||
struct IO_APIC_route_entry entry[0];
|
||||
};
|
||||
static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
|
||||
static struct sysfs_ioapic_data *mp_ioapic_data[MAX_IO_APICS];
|
||||
|
||||
static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
|
||||
{
|
||||
struct IO_APIC_route_entry *entry;
|
||||
struct sysfs_ioapic_data *data;
|
||||
int i;
|
||||
|
||||
|
||||
data = container_of(dev, struct sysfs_ioapic_data, dev);
|
||||
entry = data->entry;
|
||||
for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
|
||||
for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
|
||||
entry[i] = ioapic_read_entry(dev->id, i);
|
||||
|
||||
return 0;
|
||||
@ -2349,7 +2347,7 @@ static int ioapic_resume(struct sys_device *dev)
|
||||
unsigned long flags;
|
||||
union IO_APIC_reg_00 reg_00;
|
||||
int i;
|
||||
|
||||
|
||||
data = container_of(dev, struct sysfs_ioapic_data, dev);
|
||||
entry = data->entry;
|
||||
|
||||
@ -2360,7 +2358,7 @@ static int ioapic_resume(struct sys_device *dev)
|
||||
io_apic_write(dev->id, 0, reg_00.raw);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
|
||||
for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
|
||||
ioapic_write_entry(dev->id, i, entry[i]);
|
||||
|
||||
return 0;
|
||||
@ -2374,15 +2372,15 @@ static struct sysdev_class ioapic_sysdev_class = {
|
||||
|
||||
static int __init ioapic_init_sysfs(void)
|
||||
{
|
||||
struct sys_device * dev;
|
||||
struct sys_device *dev;
|
||||
int i, size, error = 0;
|
||||
|
||||
error = sysdev_class_register(&ioapic_sysdev_class);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
for (i = 0; i < nr_ioapics; i++ ) {
|
||||
size = sizeof(struct sys_device) + nr_ioapic_registers[i]
|
||||
for (i = 0; i < nr_ioapics; i++) {
|
||||
size = sizeof(struct sys_device) + nr_ioapic_registers[i]
|
||||
* sizeof(struct IO_APIC_route_entry);
|
||||
mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
|
||||
if (!mp_ioapic_data[i]) {
|
||||
@ -2391,7 +2389,7 @@ static int __init ioapic_init_sysfs(void)
|
||||
}
|
||||
memset(mp_ioapic_data[i], 0, size);
|
||||
dev = &mp_ioapic_data[i]->dev;
|
||||
dev->id = i;
|
||||
dev->id = i;
|
||||
dev->cls = &ioapic_sysdev_class;
|
||||
error = sysdev_register(dev);
|
||||
if (error) {
|
||||
@ -2466,7 +2464,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
|
||||
msg->address_lo =
|
||||
MSI_ADDR_BASE_LO |
|
||||
((INT_DEST_MODE == 0) ?
|
||||
MSI_ADDR_DEST_MODE_PHYSICAL:
|
||||
MSI_ADDR_DEST_MODE_PHYSICAL:
|
||||
MSI_ADDR_DEST_MODE_LOGICAL) |
|
||||
((INT_DELIVERY_MODE != dest_LowestPrio) ?
|
||||
MSI_ADDR_REDIRECTION_CPU:
|
||||
@ -2477,7 +2475,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
|
||||
MSI_DATA_TRIGGER_EDGE |
|
||||
MSI_DATA_LEVEL_ASSERT |
|
||||
((INT_DELIVERY_MODE != dest_LowestPrio) ?
|
||||
MSI_DATA_DELIVERY_FIXED:
|
||||
MSI_DATA_DELIVERY_FIXED:
|
||||
MSI_DATA_DELIVERY_LOWPRI) |
|
||||
MSI_DATA_VECTOR(vector);
|
||||
}
|
||||
@ -2648,12 +2646,12 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
#endif /* CONFIG_HT_IRQ */
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
ACPI-based IOAPIC Configuration
|
||||
ACPI-based IOAPIC Configuration
|
||||
-------------------------------------------------------------------------- */
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
int __init io_apic_get_unique_id (int ioapic, int apic_id)
|
||||
int __init io_apic_get_unique_id(int ioapic, int apic_id)
|
||||
{
|
||||
union IO_APIC_reg_00 reg_00;
|
||||
static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
|
||||
@ -2662,10 +2660,10 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
|
||||
int i = 0;
|
||||
|
||||
/*
|
||||
* The P4 platform supports up to 256 APIC IDs on two separate APIC
|
||||
* buses (one for LAPICs, one for IOAPICs), where predecessors only
|
||||
* The P4 platform supports up to 256 APIC IDs on two separate APIC
|
||||
* buses (one for LAPICs, one for IOAPICs), where predecessors only
|
||||
* supports up to 16 on one shared APIC bus.
|
||||
*
|
||||
*
|
||||
* TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
|
||||
* advantage of new APIC bus architecture.
|
||||
*/
|
||||
@ -2684,7 +2682,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
|
||||
}
|
||||
|
||||
/*
|
||||
* Every APIC in a system must have a unique ID or we get lots of nice
|
||||
* Every APIC in a system must have a unique ID or we get lots of nice
|
||||
* 'stuck on smp_invalidate_needed IPI wait' messages.
|
||||
*/
|
||||
if (check_apicid_used(apic_id_map, apic_id)) {
|
||||
@ -2701,7 +2699,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
|
||||
"trying %d\n", ioapic, apic_id, i);
|
||||
|
||||
apic_id = i;
|
||||
}
|
||||
}
|
||||
|
||||
tmp = apicid_to_cpu_present(apic_id);
|
||||
physids_or(apic_id_map, apic_id_map, tmp);
|
||||
@ -2728,7 +2726,7 @@ int __init io_apic_get_unique_id (int ioapic, int apic_id)
|
||||
}
|
||||
|
||||
|
||||
int __init io_apic_get_version (int ioapic)
|
||||
int __init io_apic_get_version(int ioapic)
|
||||
{
|
||||
union IO_APIC_reg_01 reg_01;
|
||||
unsigned long flags;
|
||||
@ -2741,7 +2739,7 @@ int __init io_apic_get_version (int ioapic)
|
||||
}
|
||||
|
||||
|
||||
int __init io_apic_get_redir_entries (int ioapic)
|
||||
int __init io_apic_get_redir_entries(int ioapic)
|
||||
{
|
||||
union IO_APIC_reg_01 reg_01;
|
||||
unsigned long flags;
|
||||
@ -2754,7 +2752,7 @@ int __init io_apic_get_redir_entries (int ioapic)
|
||||
}
|
||||
|
||||
|
||||
int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
|
||||
int io_apic_set_pci_routing(int ioapic, int pin, int irq, int edge_level, int active_high_low)
|
||||
{
|
||||
struct IO_APIC_route_entry entry;
|
||||
|
||||
@ -2770,7 +2768,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
|
||||
* corresponding device driver registers for this IRQ.
|
||||
*/
|
||||
|
||||
memset(&entry,0,sizeof(entry));
|
||||
memset(&entry, 0, sizeof(entry));
|
||||
|
||||
entry.delivery_mode = INT_DELIVERY_MODE;
|
||||
entry.dest_mode = INT_DEST_MODE;
|
||||
|
Loading…
Reference in New Issue
Block a user