mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
[PATCH] more for_each_cpu() conversions
When we stop allocating percpu memory for not-possible CPUs we must not touch the percpu data for not-possible CPUs at all. The correct way of doing this is to test cpu_possible() or to use for_each_cpu(). This patch is a kernel-wide sweep of all instances of NR_CPUS. I found very few instances of this bug, if any. But the patch converts lots of open-coded test to use the preferred helper macros. Cc: Mikael Starvik <starvik@axis.com> Cc: David Howells <dhowells@redhat.com> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Cc: Anton Blanchard <anton@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: William Lee Irwin III <wli@holomorphy.com> Cc: Andi Kleen <ak@muc.de> Cc: Christian Zankel <chris@zankel.net> Cc: Philippe Elie <phil.el@wanadoo.fr> Cc: Nathan Scott <nathans@sgi.com> Cc: Jens Axboe <axboe@suse.de> Cc: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
63872f87a1
commit
394e3902c5
@ -52,9 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_printf(p, " ");
|
seq_printf(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -67,9 +66,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||||
seq_printf(p, " %s", action->name);
|
seq_printf(p, " %s", action->name);
|
||||||
|
@ -75,9 +75,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
switch (i) {
|
switch (i) {
|
||||||
case 0:
|
case 0:
|
||||||
seq_printf(p, " ");
|
seq_printf(p, " ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
break;
|
break;
|
||||||
@ -100,9 +99,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
|
||||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i - 1]);
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
level = group->sources[ix]->level - frv_irq_levels;
|
level = group->sources[ix]->level - frv_irq_levels;
|
||||||
|
@ -1145,9 +1145,7 @@ static int __cpuinit powernowk8_init(void)
|
|||||||
{
|
{
|
||||||
unsigned int i, supported_cpus = 0;
|
unsigned int i, supported_cpus = 0;
|
||||||
|
|
||||||
for (i=0; i<NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
if (check_supported_cpu(i))
|
if (check_supported_cpu(i))
|
||||||
supported_cpus++;
|
supported_cpus++;
|
||||||
}
|
}
|
||||||
|
@ -351,8 +351,8 @@ static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
|
|||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
Dprintk("Rotating IRQs among CPUs.\n");
|
Dprintk("Rotating IRQs among CPUs.\n");
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
for (j = 0; cpu_online(i) && (j < NR_IRQS); j++) {
|
for (j = 0; j < NR_IRQS; j++) {
|
||||||
if (!irq_desc[j].action)
|
if (!irq_desc[j].action)
|
||||||
continue;
|
continue;
|
||||||
/* Is it a significant load ? */
|
/* Is it a significant load ? */
|
||||||
@ -381,7 +381,7 @@ static void do_irq_balance(void)
|
|||||||
unsigned long imbalance = 0;
|
unsigned long imbalance = 0;
|
||||||
cpumask_t allowed_mask, target_cpu_mask, tmp;
|
cpumask_t allowed_mask, target_cpu_mask, tmp;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
int package_index;
|
int package_index;
|
||||||
CPU_IRQ(i) = 0;
|
CPU_IRQ(i) = 0;
|
||||||
if (!cpu_online(i))
|
if (!cpu_online(i))
|
||||||
@ -422,9 +422,7 @@ static void do_irq_balance(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Find the least loaded processor package */
|
/* Find the least loaded processor package */
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
if (i != CPU_TO_PACKAGEINDEX(i))
|
if (i != CPU_TO_PACKAGEINDEX(i))
|
||||||
continue;
|
continue;
|
||||||
if (min_cpu_irq > CPU_IRQ(i)) {
|
if (min_cpu_irq > CPU_IRQ(i)) {
|
||||||
@ -441,9 +439,7 @@ tryanothercpu:
|
|||||||
*/
|
*/
|
||||||
tmp_cpu_irq = 0;
|
tmp_cpu_irq = 0;
|
||||||
tmp_loaded = -1;
|
tmp_loaded = -1;
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
if (i != CPU_TO_PACKAGEINDEX(i))
|
if (i != CPU_TO_PACKAGEINDEX(i))
|
||||||
continue;
|
continue;
|
||||||
if (max_cpu_irq <= CPU_IRQ(i))
|
if (max_cpu_irq <= CPU_IRQ(i))
|
||||||
@ -619,9 +615,7 @@ static int __init balanced_irq_init(void)
|
|||||||
if (smp_num_siblings > 1 && !cpus_empty(tmp))
|
if (smp_num_siblings > 1 && !cpus_empty(tmp))
|
||||||
physical_balance = 1;
|
physical_balance = 1;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
|
irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
|
||||||
irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
|
irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
|
||||||
if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
|
if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
|
||||||
@ -638,9 +632,11 @@ static int __init balanced_irq_init(void)
|
|||||||
else
|
else
|
||||||
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
|
printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
|
||||||
failed:
|
failed:
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
kfree(irq_cpu_data[i].irq_delta);
|
kfree(irq_cpu_data[i].irq_delta);
|
||||||
|
irq_cpu_data[i].irq_delta = NULL;
|
||||||
kfree(irq_cpu_data[i].last_irq);
|
kfree(irq_cpu_data[i].last_irq);
|
||||||
|
irq_cpu_data[i].last_irq = NULL;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -143,7 +143,7 @@ static int __init check_nmi_watchdog(void)
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
||||||
|
|
||||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
for_each_cpu(cpu) {
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Check cpu_callin_map here because that is set
|
/* Check cpu_callin_map here because that is set
|
||||||
after the timer is started. */
|
after the timer is started. */
|
||||||
@ -510,7 +510,7 @@ void touch_nmi_watchdog (void)
|
|||||||
* Just reset the alert counters, (other CPUs might be
|
* Just reset the alert counters, (other CPUs might be
|
||||||
* spinning on locks we hold):
|
* spinning on locks we hold):
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_cpu(i)
|
||||||
alert_counter[i] = 0;
|
alert_counter[i] = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -122,7 +122,7 @@ static void nmi_save_registers(void * dummy)
|
|||||||
static void free_msrs(void)
|
static void free_msrs(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < NR_CPUS; ++i) {
|
for_each_cpu(i) {
|
||||||
kfree(cpu_msrs[i].counters);
|
kfree(cpu_msrs[i].counters);
|
||||||
cpu_msrs[i].counters = NULL;
|
cpu_msrs[i].counters = NULL;
|
||||||
kfree(cpu_msrs[i].controls);
|
kfree(cpu_msrs[i].controls);
|
||||||
@ -138,10 +138,7 @@ static int allocate_msrs(void)
|
|||||||
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
|
size_t counters_size = sizeof(struct op_msr) * model->num_counters;
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < NR_CPUS; ++i) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
|
cpu_msrs[i].counters = kmalloc(counters_size, GFP_KERNEL);
|
||||||
if (!cpu_msrs[i].counters) {
|
if (!cpu_msrs[i].counters) {
|
||||||
success = 0;
|
success = 0;
|
||||||
|
@ -37,9 +37,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_printf(p, " ");
|
seq_printf(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,9 +51,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||||
seq_printf(p, " %s", action->name);
|
seq_printf(p, " %s", action->name);
|
||||||
|
@ -68,9 +68,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_printf(p, " ");
|
seq_printf(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,9 +82,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||||
seq_printf(p, " %s", action->name);
|
seq_printf(p, " %s", action->name);
|
||||||
|
@ -167,8 +167,8 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
|
|||||||
mb();
|
mb();
|
||||||
|
|
||||||
/* Send a message to all other CPUs and wait for them to respond */
|
/* Send a message to all other CPUs and wait for them to respond */
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i) && i != cpu)
|
if (i != cpu)
|
||||||
core_send_ipi(i, SMP_CALL_FUNCTION);
|
core_send_ipi(i, SMP_CALL_FUNCTION);
|
||||||
|
|
||||||
/* Wait for response */
|
/* Wait for response */
|
||||||
|
@ -88,12 +88,9 @@ static inline int find_level(cpuid_t *cpunum, int irq)
|
|||||||
{
|
{
|
||||||
int cpu, i;
|
int cpu, i;
|
||||||
|
|
||||||
for (cpu = 0; cpu <= NR_CPUS; cpu++) {
|
for_each_online_cpu(cpu) {
|
||||||
struct slice_data *si = cpu_data[cpu].data;
|
struct slice_data *si = cpu_data[cpu].data;
|
||||||
|
|
||||||
if (!cpu_online(cpu))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
|
for (i = BASE_PCI_IRQ; i < LEVELS_PER_SLICE; i++)
|
||||||
if (si->level_to_irq[i] == irq) {
|
if (si->level_to_irq[i] == irq) {
|
||||||
*cpunum = cpu;
|
*cpunum = cpu;
|
||||||
|
@ -298,8 +298,8 @@ send_IPI_allbutself(enum ipi_message_type op)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
if (cpu_online(i) && i != smp_processor_id())
|
if (i != smp_processor_id())
|
||||||
send_IPI_single(i, op);
|
send_IPI_single(i, op);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -643,14 +643,13 @@ int sys_cpus(int argc, char **argv)
|
|||||||
if ( argc == 1 ){
|
if ( argc == 1 ){
|
||||||
|
|
||||||
#ifdef DUMP_MORE_STATE
|
#ifdef DUMP_MORE_STATE
|
||||||
for(i=0; i<NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
int cpus_per_line = 4;
|
int cpus_per_line = 4;
|
||||||
if(cpu_online(i)) {
|
|
||||||
if (j++ % cpus_per_line)
|
if (j++ % cpus_per_line)
|
||||||
printk(" %3d",i);
|
printk(" %3d",i);
|
||||||
else
|
else
|
||||||
printk("\n %3d",i);
|
printk("\n %3d",i);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
printk("\n");
|
printk("\n");
|
||||||
#else
|
#else
|
||||||
@ -659,9 +658,7 @@ int sys_cpus(int argc, char **argv)
|
|||||||
} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
|
} else if((argc==2) && !(strcmp(argv[1],"-l"))) {
|
||||||
printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
|
printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
|
||||||
#ifdef DUMP_MORE_STATE
|
#ifdef DUMP_MORE_STATE
|
||||||
for(i=0;i<NR_CPUS;i++) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
if (cpu_data[i].cpuid != NO_PROC_ID) {
|
if (cpu_data[i].cpuid != NO_PROC_ID) {
|
||||||
switch(cpu_data[i].state) {
|
switch(cpu_data[i].state) {
|
||||||
case STATE_RENDEZVOUS:
|
case STATE_RENDEZVOUS:
|
||||||
@ -695,9 +692,7 @@ int sys_cpus(int argc, char **argv)
|
|||||||
} else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
|
} else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
|
||||||
#ifdef DUMP_MORE_STATE
|
#ifdef DUMP_MORE_STATE
|
||||||
printk("\nCPUSTATE CPUID\n");
|
printk("\nCPUSTATE CPUID\n");
|
||||||
for (i=0;i<NR_CPUS;i++) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
if (cpu_data[i].cpuid != NO_PROC_ID) {
|
if (cpu_data[i].cpuid != NO_PROC_ID) {
|
||||||
switch(cpu_data[i].state) {
|
switch(cpu_data[i].state) {
|
||||||
case STATE_RENDEZVOUS:
|
case STATE_RENDEZVOUS:
|
||||||
|
@ -135,9 +135,8 @@ skip:
|
|||||||
#ifdef CONFIG_TAU_INT
|
#ifdef CONFIG_TAU_INT
|
||||||
if (tau_initialized){
|
if (tau_initialized){
|
||||||
seq_puts(p, "TAU: ");
|
seq_puts(p, "TAU: ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", tau_interrupts(j));
|
||||||
seq_printf(p, "%10u ", tau_interrupts(j));
|
|
||||||
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
|
seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -162,9 +162,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||||||
#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
|
#if defined(CONFIG_SMP) && defined(CONFIG_PPC32)
|
||||||
unsigned long bogosum = 0;
|
unsigned long bogosum = 0;
|
||||||
int i;
|
int i;
|
||||||
for (i = 0; i < NR_CPUS; ++i)
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
bogosum += loops_per_jiffy;
|
||||||
bogosum += loops_per_jiffy;
|
|
||||||
seq_printf(m, "total bogomips\t: %lu.%02lu\n",
|
seq_printf(m, "total bogomips\t: %lu.%02lu\n",
|
||||||
bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
|
bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
|
||||||
#endif /* CONFIG_SMP && CONFIG_PPC32 */
|
#endif /* CONFIG_SMP && CONFIG_PPC32 */
|
||||||
|
@ -272,9 +272,8 @@ int __init ppc_init(void)
|
|||||||
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
|
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
|
||||||
|
|
||||||
/* register CPU devices */
|
/* register CPU devices */
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_cpu(i)
|
||||||
if (cpu_possible(i))
|
register_cpu(&cpu_devices[i], i, NULL);
|
||||||
register_cpu(&cpu_devices[i], i, NULL);
|
|
||||||
|
|
||||||
/* call platform init */
|
/* call platform init */
|
||||||
if (ppc_md.init != NULL) {
|
if (ppc_md.init != NULL) {
|
||||||
|
@ -191,9 +191,7 @@ static void smp_psurge_message_pass(int target, int msg)
|
|||||||
if (num_online_cpus() < 2)
|
if (num_online_cpus() < 2)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
if (!cpu_online(i))
|
|
||||||
continue;
|
|
||||||
if (target == MSG_ALL
|
if (target == MSG_ALL
|
||||||
|| (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
|
|| (target == MSG_ALL_BUT_SELF && i != smp_processor_id())
|
||||||
|| target == i) {
|
|| target == i) {
|
||||||
|
@ -168,9 +168,8 @@ int show_cpuinfo(struct seq_file *m, void *v)
|
|||||||
/* Show summary information */
|
/* Show summary information */
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned long bogosum = 0;
|
unsigned long bogosum = 0;
|
||||||
for (i = 0; i < NR_CPUS; ++i)
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
bogosum += cpu_data[i].loops_per_jiffy;
|
||||||
bogosum += cpu_data[i].loops_per_jiffy;
|
|
||||||
seq_printf(m, "total bogomips\t: %lu.%02lu\n",
|
seq_printf(m, "total bogomips\t: %lu.%02lu\n",
|
||||||
bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
|
bogosum/(500000/HZ), bogosum/(5000/HZ) % 100);
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
@ -712,9 +711,8 @@ int __init ppc_init(void)
|
|||||||
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
|
if ( ppc_md.progress ) ppc_md.progress(" ", 0xffff);
|
||||||
|
|
||||||
/* register CPU devices */
|
/* register CPU devices */
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_cpu(i)
|
||||||
if (cpu_possible(i))
|
register_cpu(&cpu_devices[i], i, NULL);
|
||||||
register_cpu(&cpu_devices[i], i, NULL);
|
|
||||||
|
|
||||||
/* call platform init */
|
/* call platform init */
|
||||||
if (ppc_md.init != NULL) {
|
if (ppc_md.init != NULL) {
|
||||||
|
@ -799,9 +799,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||||||
*/
|
*/
|
||||||
print_cpu_info(&S390_lowcore.cpu_data);
|
print_cpu_info(&S390_lowcore.cpu_data);
|
||||||
|
|
||||||
for(i = 0; i < NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
if (!cpu_possible(i))
|
|
||||||
continue;
|
|
||||||
lowcore_ptr[i] = (struct _lowcore *)
|
lowcore_ptr[i] = (struct _lowcore *)
|
||||||
__get_free_pages(GFP_KERNEL|GFP_DMA,
|
__get_free_pages(GFP_KERNEL|GFP_DMA,
|
||||||
sizeof(void*) == 8 ? 1 : 0);
|
sizeof(void*) == 8 ? 1 : 0);
|
||||||
|
@ -35,9 +35,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_puts(p, " ");
|
seq_puts(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -404,9 +404,8 @@ static int __init topology_init(void)
|
|||||||
{
|
{
|
||||||
int cpu_id;
|
int cpu_id;
|
||||||
|
|
||||||
for (cpu_id = 0; cpu_id < NR_CPUS; cpu_id++)
|
for_each_cpu(cpu_id)
|
||||||
if (cpu_possible(cpu_id))
|
register_cpu(&cpu[cpu_id], cpu_id, NULL);
|
||||||
register_cpu(&cpu[cpu_id], cpu_id, NULL);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -53,9 +53,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_puts(p, " ");
|
seq_puts(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,9 +184,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++) {
|
for_each_online_cpu(j) {
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ",
|
||||||
seq_printf(p, "%10u ",
|
|
||||||
kstat_cpu(cpu_logical_map(j)).irqs[i]);
|
kstat_cpu(cpu_logical_map(j)).irqs[i]);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -243,9 +243,8 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&prof_setup_lock, flags);
|
spin_lock_irqsave(&prof_setup_lock, flags);
|
||||||
for(i = 0; i < NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
if (cpu_possible(i))
|
load_profile_irq(i, lvl14_resolution / multiplier);
|
||||||
load_profile_irq(i, lvl14_resolution / multiplier);
|
|
||||||
prof_multiplier(i) = multiplier;
|
prof_multiplier(i) = multiplier;
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&prof_setup_lock, flags);
|
spin_unlock_irqrestore(&prof_setup_lock, flags);
|
||||||
@ -273,13 +272,12 @@ void smp_bogo(struct seq_file *m)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i) {
|
||||||
if (cpu_online(i))
|
seq_printf(m,
|
||||||
seq_printf(m,
|
"Cpu%dBogo\t: %lu.%02lu\n",
|
||||||
"Cpu%dBogo\t: %lu.%02lu\n",
|
i,
|
||||||
i,
|
cpu_data(i).udelay_val/(500000/HZ),
|
||||||
cpu_data(i).udelay_val/(500000/HZ),
|
(cpu_data(i).udelay_val/(5000/HZ))%100);
|
||||||
(cpu_data(i).udelay_val/(5000/HZ))%100);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -288,8 +286,6 @@ void smp_info(struct seq_file *m)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
seq_printf(m, "State:\n");
|
seq_printf(m, "State:\n");
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
seq_printf(m, "CPU%d\t\t: online\n", i);
|
||||||
seq_printf(m, "CPU%d\t\t: online\n", i);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -103,11 +103,9 @@ found_it: seq_printf(p, "%3d: ", i);
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (x = 0; x < NR_CPUS; x++) {
|
for_each_online_cpu(x)
|
||||||
if (cpu_online(x))
|
seq_printf(p, "%10u ",
|
||||||
seq_printf(p, "%10u ",
|
kstat_cpu(cpu_logical_map(x)).irqs[i]);
|
||||||
kstat_cpu(cpu_logical_map(x)).irqs[i]);
|
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, "%c %s",
|
seq_printf(p, "%c %s",
|
||||||
(action->flags & SA_INTERRUPT) ? '+' : ' ',
|
(action->flags & SA_INTERRUPT) ? '+' : ' ',
|
||||||
|
@ -249,11 +249,9 @@ void __init smp4d_boot_cpus(void)
|
|||||||
} else {
|
} else {
|
||||||
unsigned long bogosum = 0;
|
unsigned long bogosum = 0;
|
||||||
|
|
||||||
for(i = 0; i < NR_CPUS; i++) {
|
for_each_present_cpu(i) {
|
||||||
if (cpu_isset(i, cpu_present_map)) {
|
bogosum += cpu_data(i).udelay_val;
|
||||||
bogosum += cpu_data(i).udelay_val;
|
smp_highest_cpu = i;
|
||||||
smp_highest_cpu = i;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
|
SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
|
||||||
printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
|
printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
|
||||||
|
@ -218,10 +218,8 @@ void __init smp4m_boot_cpus(void)
|
|||||||
cpu_present_map = cpumask_of_cpu(smp_processor_id());
|
cpu_present_map = cpumask_of_cpu(smp_processor_id());
|
||||||
} else {
|
} else {
|
||||||
unsigned long bogosum = 0;
|
unsigned long bogosum = 0;
|
||||||
for(i = 0; i < NR_CPUS; i++) {
|
for_each_present_cpu(i)
|
||||||
if (cpu_isset(i, cpu_present_map))
|
bogosum += cpu_data(i).udelay_val;
|
||||||
bogosum += cpu_data(i).udelay_val;
|
|
||||||
}
|
|
||||||
printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
|
printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
|
||||||
cpucount + 1,
|
cpucount + 1,
|
||||||
bogosum/(500000/HZ),
|
bogosum/(500000/HZ),
|
||||||
|
@ -117,9 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++) {
|
for_each_online_cpu(j) {
|
||||||
if (!cpu_online(j))
|
|
||||||
continue;
|
|
||||||
seq_printf(p, "%10u ",
|
seq_printf(p, "%10u ",
|
||||||
kstat_cpu(j).irqs[i]);
|
kstat_cpu(j).irqs[i]);
|
||||||
}
|
}
|
||||||
|
@ -57,25 +57,21 @@ void smp_info(struct seq_file *m)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
seq_printf(m, "State:\n");
|
seq_printf(m, "State:\n");
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
seq_printf(m, "CPU%d:\t\tonline\n", i);
|
||||||
seq_printf(m,
|
|
||||||
"CPU%d:\t\tonline\n", i);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_bogo(struct seq_file *m)
|
void smp_bogo(struct seq_file *m)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
seq_printf(m,
|
||||||
seq_printf(m,
|
"Cpu%dBogo\t: %lu.%02lu\n"
|
||||||
"Cpu%dBogo\t: %lu.%02lu\n"
|
"Cpu%dClkTck\t: %016lx\n",
|
||||||
"Cpu%dClkTck\t: %016lx\n",
|
i, cpu_data(i).udelay_val / (500000/HZ),
|
||||||
i, cpu_data(i).udelay_val / (500000/HZ),
|
(cpu_data(i).udelay_val / (5000/HZ)) % 100,
|
||||||
(cpu_data(i).udelay_val / (5000/HZ)) % 100,
|
i, cpu_data(i).clock_tick);
|
||||||
i, cpu_data(i).clock_tick);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init smp_store_cpu_info(int id)
|
void __init smp_store_cpu_info(int id)
|
||||||
@ -1282,7 +1278,7 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
spin_lock_irqsave(&prof_setup_lock, flags);
|
spin_lock_irqsave(&prof_setup_lock, flags);
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_cpu(i)
|
||||||
prof_multiplier(i) = multiplier;
|
prof_multiplier(i) = multiplier;
|
||||||
current_tick_offset = (timer_tick_offset / multiplier);
|
current_tick_offset = (timer_tick_offset / multiplier);
|
||||||
spin_unlock_irqrestore(&prof_setup_lock, flags);
|
spin_unlock_irqrestore(&prof_setup_lock, flags);
|
||||||
@ -1384,10 +1380,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
|||||||
unsigned long bogosum = 0;
|
unsigned long bogosum = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
bogosum += cpu_data(i).udelay_val;
|
||||||
bogosum += cpu_data(i).udelay_val;
|
|
||||||
}
|
|
||||||
printk("Total of %ld processors activated "
|
printk("Total of %ld processors activated "
|
||||||
"(%lu.%02lu BogoMIPS).\n",
|
"(%lu.%02lu BogoMIPS).\n",
|
||||||
(long) num_online_cpus(),
|
(long) num_online_cpus(),
|
||||||
|
@ -38,9 +38,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_printf(p, " ");
|
seq_printf(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,10 +52,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||||
seq_printf(p, "%10u ",
|
|
||||||
kstat_cpu(j).irqs[i]);
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||||
|
|
||||||
@ -68,15 +65,13 @@ skip:
|
|||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_printf(p, "NMI: ");
|
seq_printf(p, "NMI: ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
|
||||||
seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
#ifdef CONFIG_X86_LOCAL_APIC
|
#ifdef CONFIG_X86_LOCAL_APIC
|
||||||
seq_printf(p, "LOC: ");
|
seq_printf(p, "LOC: ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
|
||||||
seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||||
|
@ -162,9 +162,7 @@ int __init check_nmi_watchdog (void)
|
|||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
mdelay((10*1000)/nmi_hz); // wait 10 ticks
|
||||||
|
|
||||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
for_each_online_cpu(cpu) {
|
||||||
if (!cpu_online(cpu))
|
|
||||||
continue;
|
|
||||||
if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
|
if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
|
||||||
endflag = 1;
|
endflag = 1;
|
||||||
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
|
printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
|
||||||
|
@ -83,9 +83,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
|
|
||||||
if (i == 0) {
|
if (i == 0) {
|
||||||
seq_printf(p, " ");
|
seq_printf(p, " ");
|
||||||
for (j=0; j<NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "CPU%d ",j);
|
||||||
seq_printf(p, "CPU%d ",j);
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,9 +97,8 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||||||
#ifndef CONFIG_SMP
|
#ifndef CONFIG_SMP
|
||||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||||
#else
|
#else
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
|
||||||
#endif
|
#endif
|
||||||
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
seq_printf(p, " %14s", irq_desc[i].handler->typename);
|
||||||
seq_printf(p, " %s", action->name);
|
seq_printf(p, " %s", action->name);
|
||||||
@ -113,9 +111,8 @@ skip:
|
|||||||
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
spin_unlock_irqrestore(&irq_desc[i].lock, flags);
|
||||||
} else if (i == NR_IRQS) {
|
} else if (i == NR_IRQS) {
|
||||||
seq_printf(p, "NMI: ");
|
seq_printf(p, "NMI: ");
|
||||||
for (j = 0; j < NR_CPUS; j++)
|
for_each_online_cpu(j)
|
||||||
if (cpu_online(j))
|
seq_printf(p, "%10u ", nmi_count(j));
|
||||||
seq_printf(p, "%10u ", nmi_count(j));
|
|
||||||
seq_putc(p, '\n');
|
seq_putc(p, '\n');
|
||||||
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
|
||||||
}
|
}
|
||||||
|
@ -172,11 +172,9 @@ static struct net_device_stats *get_stats(struct net_device *dev)
|
|||||||
|
|
||||||
memset(stats, 0, sizeof(struct net_device_stats));
|
memset(stats, 0, sizeof(struct net_device_stats));
|
||||||
|
|
||||||
for (i=0; i < NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
struct net_device_stats *lb_stats;
|
struct net_device_stats *lb_stats;
|
||||||
|
|
||||||
if (!cpu_possible(i))
|
|
||||||
continue;
|
|
||||||
lb_stats = &per_cpu(loopback_stats, i);
|
lb_stats = &per_cpu(loopback_stats, i);
|
||||||
stats->rx_bytes += lb_stats->rx_bytes;
|
stats->rx_bytes += lb_stats->rx_bytes;
|
||||||
stats->tx_bytes += lb_stats->tx_bytes;
|
stats->tx_bytes += lb_stats->tx_bytes;
|
||||||
|
@ -38,9 +38,8 @@ void free_cpu_buffers(void)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i)
|
||||||
vfree(cpu_buffer[i].buffer);
|
vfree(cpu_buffer[i].buffer);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int alloc_cpu_buffers(void)
|
int alloc_cpu_buffers(void)
|
||||||
|
@ -62,18 +62,15 @@ xfs_read_xfsstats(
|
|||||||
while (j < xstats[i].endpoint) {
|
while (j < xstats[i].endpoint) {
|
||||||
val = 0;
|
val = 0;
|
||||||
/* sum over all cpus */
|
/* sum over all cpus */
|
||||||
for (c = 0; c < NR_CPUS; c++) {
|
for_each_cpu(c)
|
||||||
if (!cpu_possible(c)) continue;
|
|
||||||
val += *(((__u32*)&per_cpu(xfsstats, c) + j));
|
val += *(((__u32*)&per_cpu(xfsstats, c) + j));
|
||||||
}
|
|
||||||
len += sprintf(buffer + len, " %u", val);
|
len += sprintf(buffer + len, " %u", val);
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
buffer[len++] = '\n';
|
buffer[len++] = '\n';
|
||||||
}
|
}
|
||||||
/* extra precision counters */
|
/* extra precision counters */
|
||||||
for (i = 0; i < NR_CPUS; i++) {
|
for_each_cpu(i) {
|
||||||
if (!cpu_possible(i)) continue;
|
|
||||||
xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
|
xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes;
|
||||||
xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
|
xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes;
|
||||||
xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
|
xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes;
|
||||||
|
@ -38,8 +38,7 @@ xfs_stats_clear_proc_handler(
|
|||||||
|
|
||||||
if (!ret && write && *valp) {
|
if (!ret && write && *valp) {
|
||||||
printk("XFS Clearing xfsstats\n");
|
printk("XFS Clearing xfsstats\n");
|
||||||
for (c = 0; c < NR_CPUS; c++) {
|
for_each_cpu(c) {
|
||||||
if (!cpu_possible(c)) continue;
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
/* save vn_active, it's a universal truth! */
|
/* save vn_active, it's a universal truth! */
|
||||||
vn_active = per_cpu(xfsstats, c).vn_active;
|
vn_active = per_cpu(xfsstats, c).vn_active;
|
||||||
|
@ -231,9 +231,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < NR_CPUS; i++)
|
for_each_online_cpu(i)
|
||||||
if (cpu_online(i))
|
mm->context[i] = 0;
|
||||||
mm->context[i] = 0;
|
|
||||||
if (tsk != current)
|
if (tsk != current)
|
||||||
task_thread_info(tsk)->pcb.ptbr
|
task_thread_info(tsk)->pcb.ptbr
|
||||||
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
|
= ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
|
||||||
|
@ -27,8 +27,8 @@ static inline cpumask_t node_to_cpumask(int node)
|
|||||||
cpumask_t node_cpu_mask = CPU_MASK_NONE;
|
cpumask_t node_cpu_mask = CPU_MASK_NONE;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for(cpu = 0; cpu < NR_CPUS; cpu++) {
|
for_each_online_cpu(cpu) {
|
||||||
if (cpu_online(cpu) && (cpu_to_node(cpu) == node))
|
if (cpu_to_node(cpu) == node)
|
||||||
cpu_set(cpu, node_cpu_mask);
|
cpu_set(cpu, node_cpu_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,10 +19,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
|||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for (__i = 0; __i < NR_CPUS; __i++) \
|
for_each_cpu(__i) \
|
||||||
if (cpu_possible(__i)) \
|
memcpy((pcpudst)+__per_cpu_offset[__i], \
|
||||||
memcpy((pcpudst)+__per_cpu_offset[__i], \
|
(src), (size)); \
|
||||||
(src), (size)); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
#else /* ! SMP */
|
#else /* ! SMP */
|
||||||
|
|
||||||
|
@ -27,10 +27,9 @@
|
|||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for (__i = 0; __i < NR_CPUS; __i++) \
|
for_each_cpu(__i) \
|
||||||
if (cpu_possible(__i)) \
|
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
||||||
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
(src), (size)); \
|
||||||
(src), (size)); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
extern void setup_per_cpu_areas(void);
|
extern void setup_per_cpu_areas(void);
|
||||||
|
@ -46,10 +46,9 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
|||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for (__i = 0; __i < NR_CPUS; __i++) \
|
for_each_cpu(__i) \
|
||||||
if (cpu_possible(__i)) \
|
memcpy((pcpudst)+__per_cpu_offset[__i], \
|
||||||
memcpy((pcpudst)+__per_cpu_offset[__i], \
|
(src), (size)); \
|
||||||
(src), (size)); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#else /* ! SMP */
|
#else /* ! SMP */
|
||||||
|
@ -26,10 +26,9 @@ register unsigned long __local_per_cpu_offset asm("g5");
|
|||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for (__i = 0; __i < NR_CPUS; __i++) \
|
for_each_cpu(__i) \
|
||||||
if (cpu_possible(__i)) \
|
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
||||||
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
(src), (size)); \
|
||||||
(src), (size)); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
#else /* ! SMP */
|
#else /* ! SMP */
|
||||||
|
|
||||||
|
@ -26,10 +26,9 @@
|
|||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for (__i = 0; __i < NR_CPUS; __i++) \
|
for_each_cpu(__i) \
|
||||||
if (cpu_possible(__i)) \
|
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
||||||
memcpy((pcpudst)+__per_cpu_offset(__i), \
|
(src), (size)); \
|
||||||
(src), (size)); \
|
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
extern void setup_per_cpu_areas(void);
|
extern void setup_per_cpu_areas(void);
|
||||||
|
@ -149,22 +149,16 @@ struct disk_attribute {
|
|||||||
({ \
|
({ \
|
||||||
typeof(gendiskp->dkstats->field) res = 0; \
|
typeof(gendiskp->dkstats->field) res = 0; \
|
||||||
int i; \
|
int i; \
|
||||||
for (i=0; i < NR_CPUS; i++) { \
|
for_each_cpu(i) \
|
||||||
if (!cpu_possible(i)) \
|
|
||||||
continue; \
|
|
||||||
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
|
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
|
||||||
} \
|
|
||||||
res; \
|
res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
|
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
|
||||||
int i;
|
int i;
|
||||||
for (i=0; i < NR_CPUS; i++) {
|
for_each_cpu(i)
|
||||||
if (cpu_possible(i)) {
|
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
||||||
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
sizeof (struct disk_stats));
|
||||||
sizeof (struct disk_stats));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
Loading…
Reference in New Issue
Block a user