mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-13 07:44:00 +08:00
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/travis/linux-2.6-cpus4096-for-ingo into cpus4096
This commit is contained in:
commit
9466d6036f
@ -591,19 +591,20 @@ config IOMMU_HELPER
|
||||
|
||||
config MAXSMP
|
||||
bool "Configure Maximum number of SMP Processors and NUMA Nodes"
|
||||
depends on X86_64 && SMP && BROKEN
|
||||
depends on X86_64 && SMP && DEBUG_KERNEL && EXPERIMENTAL
|
||||
select CPUMASK_OFFSTACK
|
||||
default n
|
||||
help
|
||||
Configure maximum number of CPUS and NUMA Nodes for this architecture.
|
||||
If unsure, say N.
|
||||
|
||||
config NR_CPUS
|
||||
int "Maximum number of CPUs (2-512)" if !MAXSMP
|
||||
range 2 512
|
||||
depends on SMP
|
||||
int "Maximum number of CPUs" if SMP && !MAXSMP
|
||||
range 2 512 if SMP && !MAXSMP
|
||||
default "1" if !SMP
|
||||
default "4096" if MAXSMP
|
||||
default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
|
||||
default "8"
|
||||
default "32" if SMP && (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000)
|
||||
default "8" if SMP
|
||||
help
|
||||
This allows you to specify the maximum number of CPUs which this
|
||||
kernel will support. The maximum supported value is 512 and the
|
||||
|
@ -9,12 +9,12 @@ static inline int apic_id_registered(void)
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline cpumask_t target_cpus(void)
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_map;
|
||||
return &cpu_online_map;
|
||||
#else
|
||||
return cpumask_of_cpu(0);
|
||||
return &cpumask_of_cpu(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -79,7 +79,7 @@ static inline int apicid_to_node(int logical_apicid)
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < NR_CPUS)
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
@ -94,7 +94,7 @@ extern u8 cpu_2_logical_apicid[];
|
||||
/* Mapping from cpu number to logical apicid */
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= NR_CPUS)
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return cpu_physical_id(cpu);
|
||||
}
|
||||
@ -119,16 +119,32 @@ static inline int check_phys_apicid_present(int boot_cpu_physical_apicid)
|
||||
}
|
||||
|
||||
/* As we are using single CPU as destination, pick only one CPU here */
|
||||
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
cpu = first_cpu(cpumask);
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, andmask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(cpu);
|
||||
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
|
@ -1,25 +1,22 @@
|
||||
#ifndef __ASM_MACH_IPI_H
|
||||
#define __ASM_MACH_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(cpumask_t mask, int vector);
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(cpumask_t mask, int vector)
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
send_IPI_mask(mask, vector);
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_map, vector);
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_MACH_IPI_H */
|
||||
|
@ -9,14 +9,14 @@ static inline int apic_id_registered(void)
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline cpumask_t target_cpus_cluster(void)
|
||||
static inline const cpumask_t *target_cpus_cluster(void)
|
||||
{
|
||||
return CPU_MASK_ALL;
|
||||
return &CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
static inline cpumask_t target_cpus(void)
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
return cpumask_of_cpu(smp_processor_id());
|
||||
return &cpumask_of_cpu(smp_processor_id());
|
||||
}
|
||||
|
||||
#define APIC_DFR_VALUE_CLUSTER (APIC_DFR_CLUSTER)
|
||||
@ -80,9 +80,10 @@ extern int apic_version [MAX_APICS];
|
||||
static inline void setup_apic_routing(void)
|
||||
{
|
||||
int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id());
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n",
|
||||
(apic_version[apic] == 0x14) ?
|
||||
"Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(target_cpus())[0]);
|
||||
"Physical Cluster" : "Logical Cluster",
|
||||
nr_ioapics, cpus_addr(*target_cpus())[0]);
|
||||
}
|
||||
|
||||
static inline int multi_timer_check(int apic, int irq)
|
||||
@ -100,7 +101,7 @@ static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (!mps_cpu)
|
||||
return boot_cpu_physical_apicid;
|
||||
else if (mps_cpu < NR_CPUS)
|
||||
else if (mps_cpu < nr_cpu_ids)
|
||||
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
@ -120,9 +121,9 @@ extern u8 cpu_2_logical_apicid[];
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= NR_CPUS)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
#endif
|
||||
@ -146,14 +147,15 @@ static inline int check_phys_apicid_present(int cpu_physical_apicid)
|
||||
return (1);
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
|
||||
static inline unsigned int
|
||||
cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpus_weight(cpumask);
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
return 0xFF;
|
||||
@ -161,10 +163,10 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
cpu = cpumask_first(cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpu_isset(cpu, cpumask)) {
|
||||
if (cpumask_test_cpu(cpu, cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
@ -179,14 +181,14 @@ static inline unsigned int cpu_mask_to_apicid_cluster(cpumask_t cpumask)
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpus_weight(cpumask);
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
return cpu_to_logical_apicid(0);
|
||||
@ -194,10 +196,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpu_isset(cpu, cpumask)) {
|
||||
if (cpu_isset(cpu, *cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
@ -212,6 +214,54 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int num_bits_set2;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid = 0;
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
num_bits_set2 = cpumask_weight(andmask);
|
||||
num_bits_set = min(num_bits_set, num_bits_set2);
|
||||
/* Return id to all */
|
||||
if (num_bits_set >= nr_cpu_ids)
|
||||
#if defined CONFIG_ES7000_CLUSTERED_APIC
|
||||
return 0xFF;
|
||||
#else
|
||||
return cpu_to_logical_apicid(0);
|
||||
#endif
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = cpumask_first_and(cpumask, andmask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpumask_test_cpu(cpu, cpumask) &&
|
||||
cpumask_test_cpu(cpu, andmask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: Not a valid mask!\n", __func__);
|
||||
#if defined CONFIG_ES7000_CLUSTERED_APIC
|
||||
return 0xFF;
|
||||
#else
|
||||
return cpu_to_logical_apicid(0);
|
||||
#endif
|
||||
}
|
||||
apicid = new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
{
|
||||
return cpuid_apic >> index_msb;
|
||||
|
@ -1,24 +1,22 @@
|
||||
#ifndef __ASM_ES7000_IPI_H
|
||||
#define __ASM_ES7000_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(cpumask_t mask, int vector);
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(cpumask_t mask, int vector)
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
if (!cpus_empty(mask))
|
||||
send_IPI_mask(mask, vector);
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_map, vector);
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_ES7000_IPI_H */
|
||||
|
@ -24,7 +24,7 @@ struct genapic {
|
||||
int (*probe)(void);
|
||||
|
||||
int (*apic_id_registered)(void);
|
||||
cpumask_t (*target_cpus)(void);
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
int int_delivery_mode;
|
||||
int int_dest_mode;
|
||||
int ESR_DISABLE;
|
||||
@ -57,12 +57,16 @@ struct genapic {
|
||||
|
||||
unsigned (*get_apic_id)(unsigned long x);
|
||||
unsigned long apic_id_mask;
|
||||
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
|
||||
cpumask_t (*vector_allocation_domain)(int cpu);
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(cpumask_t mask, int vector);
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
#endif
|
||||
@ -114,6 +118,7 @@ struct genapic {
|
||||
APICFUNC(get_apic_id) \
|
||||
.apic_id_mask = APIC_ID_MASK, \
|
||||
APICFUNC(cpu_mask_to_apicid) \
|
||||
APICFUNC(cpu_mask_to_apicid_and) \
|
||||
APICFUNC(vector_allocation_domain) \
|
||||
APICFUNC(acpi_madt_oem_check) \
|
||||
IPIFUNC(send_IPI_mask) \
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef _ASM_X86_GENAPIC_64_H
|
||||
#define _ASM_X86_GENAPIC_64_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
/*
|
||||
* Copyright 2004 James Cleverdon, IBM.
|
||||
* Subject to the GNU Public License, v.2
|
||||
@ -18,16 +20,20 @@ struct genapic {
|
||||
u32 int_delivery_mode;
|
||||
u32 int_dest_mode;
|
||||
int (*apic_id_registered)(void);
|
||||
cpumask_t (*target_cpus)(void);
|
||||
cpumask_t (*vector_allocation_domain)(int cpu);
|
||||
const struct cpumask *(*target_cpus)(void);
|
||||
void (*vector_allocation_domain)(int cpu, struct cpumask *retmask);
|
||||
void (*init_apic_ldr)(void);
|
||||
/* ipi */
|
||||
void (*send_IPI_mask)(cpumask_t mask, int vector);
|
||||
void (*send_IPI_mask)(const struct cpumask *mask, int vector);
|
||||
void (*send_IPI_mask_allbutself)(const struct cpumask *mask,
|
||||
int vector);
|
||||
void (*send_IPI_allbutself)(int vector);
|
||||
void (*send_IPI_all)(int vector);
|
||||
void (*send_IPI_self)(int vector);
|
||||
/* */
|
||||
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid)(const struct cpumask *cpumask);
|
||||
unsigned int (*cpu_mask_to_apicid_and)(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask);
|
||||
unsigned int (*phys_pkg_id)(int index_msb);
|
||||
unsigned int (*get_apic_id)(unsigned long x);
|
||||
unsigned long (*set_apic_id)(unsigned int id);
|
||||
|
@ -117,7 +117,8 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector,
|
||||
native_apic_mem_write(APIC_ICR, cfg);
|
||||
}
|
||||
|
||||
static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
|
||||
static inline void send_IPI_mask_sequence(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
@ -128,11 +129,29 @@ static inline void send_IPI_mask_sequence(cpumask_t mask, int vector)
|
||||
* - mbligh
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
for_each_cpu_mask_nr(query_cpu, mask) {
|
||||
for_each_cpu(query_cpu, mask) {
|
||||
__send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void send_IPI_mask_allbutself(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int query_cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
/* See Hack comment above */
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
if (query_cpu != this_cpu)
|
||||
__send_IPI_dest_field(
|
||||
per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#endif /* _ASM_X86_IPI_H */
|
||||
|
@ -37,7 +37,7 @@ extern int irqbalance_disable(char *str);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <linux/cpumask.h>
|
||||
extern void fixup_irqs(cpumask_t map);
|
||||
extern void fixup_irqs(void);
|
||||
#endif
|
||||
|
||||
extern unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
@ -8,12 +8,12 @@
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_FLAT)
|
||||
|
||||
static inline cpumask_t target_cpus(void)
|
||||
static inline const struct cpumask *target_cpus(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
#else
|
||||
return cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -28,6 +28,7 @@ static inline cpumask_t target_cpus(void)
|
||||
#define apic_id_registered (genapic->apic_id_registered)
|
||||
#define init_apic_ldr (genapic->init_apic_ldr)
|
||||
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
|
||||
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
|
||||
#define phys_pkg_id (genapic->phys_pkg_id)
|
||||
#define vector_allocation_domain (genapic->vector_allocation_domain)
|
||||
#define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID)))
|
||||
@ -61,9 +62,18 @@ static inline int apic_id_registered(void)
|
||||
return physid_isset(read_apic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpus_addr(cpumask)[0];
|
||||
return cpumask_bits(cpumask)[0];
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0];
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0];
|
||||
|
||||
return (unsigned int)(mask1 & mask2);
|
||||
}
|
||||
|
||||
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)
|
||||
@ -88,7 +98,7 @@ static inline int apicid_to_node(int logical_apicid)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline cpumask_t vector_allocation_domain(int cpu)
|
||||
static inline void vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -98,8 +108,7 @@ static inline cpumask_t vector_allocation_domain(int cpu)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
*retmask = (cpumask_t) { { [0] = APIC_ALL_CPUS } };
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -131,7 +140,7 @@ static inline int cpu_to_logical_apicid(int cpu)
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < NR_CPUS && cpu_present(mps_cpu))
|
||||
if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
|
@ -4,7 +4,8 @@
|
||||
/* Avoid include hell */
|
||||
#define NMI_VECTOR 0x02
|
||||
|
||||
void send_IPI_mask_bitmask(cpumask_t mask, int vector);
|
||||
void send_IPI_mask_bitmask(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
void __send_IPI_shortcut(unsigned int shortcut, int vector);
|
||||
|
||||
extern int no_broadcast;
|
||||
@ -12,28 +13,27 @@ extern int no_broadcast;
|
||||
#ifdef CONFIG_X86_64
|
||||
#include <asm/genapic.h>
|
||||
#define send_IPI_mask (genapic->send_IPI_mask)
|
||||
#define send_IPI_mask_allbutself (genapic->send_IPI_mask_allbutself)
|
||||
#else
|
||||
static inline void send_IPI_mask(cpumask_t mask, int vector)
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_bitmask(mask, vector);
|
||||
}
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
#endif
|
||||
|
||||
static inline void __local_send_IPI_allbutself(int vector)
|
||||
{
|
||||
if (no_broadcast || vector == NMI_VECTOR) {
|
||||
cpumask_t mask = cpu_online_map;
|
||||
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
send_IPI_mask(mask, vector);
|
||||
} else
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
else
|
||||
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector);
|
||||
}
|
||||
|
||||
static inline void __local_send_IPI_all(int vector)
|
||||
{
|
||||
if (no_broadcast || vector == NMI_VECTOR)
|
||||
send_IPI_mask(cpu_online_map, vector);
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
else
|
||||
__send_IPI_shortcut(APIC_DEST_ALLINC, vector);
|
||||
}
|
||||
|
@ -24,6 +24,7 @@
|
||||
#define check_phys_apicid_present (genapic->check_phys_apicid_present)
|
||||
#define check_apicid_used (genapic->check_apicid_used)
|
||||
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
|
||||
#define cpu_mask_to_apicid_and (genapic->cpu_mask_to_apicid_and)
|
||||
#define vector_allocation_domain (genapic->vector_allocation_domain)
|
||||
#define enable_apic_mode (genapic->enable_apic_mode)
|
||||
#define phys_pkg_id (genapic->phys_pkg_id)
|
||||
|
@ -7,9 +7,9 @@
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static inline cpumask_t target_cpus(void)
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
return CPU_MASK_ALL;
|
||||
return &CPU_MASK_ALL;
|
||||
}
|
||||
|
||||
#define NO_BALANCE_IRQ (1)
|
||||
@ -122,7 +122,13 @@ static inline void enable_apic_mode(void)
|
||||
* We use physical apicids here, not logical, so just return the default
|
||||
* physical broadcast to stop people from breaking us
|
||||
*/
|
||||
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
return (int) 0xF;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
return (int) 0xF;
|
||||
}
|
||||
|
@ -1,25 +1,22 @@
|
||||
#ifndef __ASM_NUMAQ_IPI_H
|
||||
#define __ASM_NUMAQ_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(cpumask_t, int vector);
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(cpumask_t mask, int vector)
|
||||
static inline void send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
send_IPI_mask(mask, vector);
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_map, vector);
|
||||
send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_NUMAQ_IPI_H */
|
||||
|
@ -60,7 +60,7 @@ struct smp_ops {
|
||||
void (*cpu_die)(unsigned int cpu);
|
||||
void (*play_dead)(void);
|
||||
|
||||
void (*send_call_func_ipi)(cpumask_t mask);
|
||||
void (*send_call_func_ipi)(const struct cpumask *mask);
|
||||
void (*send_call_func_single_ipi)(int cpu);
|
||||
};
|
||||
|
||||
@ -125,7 +125,7 @@ static inline void arch_send_call_function_single_ipi(int cpu)
|
||||
|
||||
static inline void arch_send_call_function_ipi(cpumask_t mask)
|
||||
{
|
||||
smp_ops.send_call_func_ipi(mask);
|
||||
smp_ops.send_call_func_ipi(&mask);
|
||||
}
|
||||
|
||||
void cpu_disable_common(void);
|
||||
@ -138,7 +138,7 @@ void native_cpu_die(unsigned int cpu);
|
||||
void native_play_dead(void);
|
||||
void play_dead_common(void);
|
||||
|
||||
void native_send_call_func_ipi(cpumask_t mask);
|
||||
void native_send_call_func_ipi(const struct cpumask *mask);
|
||||
void native_send_call_func_single_ipi(int cpu);
|
||||
|
||||
extern void prefill_possible_map(void);
|
||||
|
@ -14,13 +14,13 @@
|
||||
|
||||
#define APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static inline cpumask_t target_cpus(void)
|
||||
static inline const cpumask_t *target_cpus(void)
|
||||
{
|
||||
/* CPU_MASK_ALL (0xff) has undefined behaviour with
|
||||
* dest_LowestPrio mode logical clustered apic interrupt routing
|
||||
* Just start on cpu 0. IRQ balancing will spread load
|
||||
*/
|
||||
return cpumask_of_cpu(0);
|
||||
return &cpumask_of_cpu(0);
|
||||
}
|
||||
|
||||
#define INT_DELIVERY_MODE (dest_LowestPrio)
|
||||
@ -137,14 +137,14 @@ static inline void enable_apic_mode(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid;
|
||||
|
||||
num_bits_set = cpus_weight(cpumask);
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
return (int) 0xFF;
|
||||
@ -152,10 +152,10 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
cpu = first_cpu(*cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpu_isset(cpu, cpumask)) {
|
||||
if (cpu_isset(cpu, *cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
@ -170,6 +170,45 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
return apicid;
|
||||
}
|
||||
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int num_bits_set2;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid = 0;
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
num_bits_set2 = cpumask_weight(andmask);
|
||||
num_bits_set = min(num_bits_set, num_bits_set2);
|
||||
/* Return id to all */
|
||||
if (num_bits_set >= nr_cpu_ids)
|
||||
return 0xFF;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = cpumask_first_and(cpumask, andmask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpumask_test_cpu(cpu, cpumask)
|
||||
&& cpumask_test_cpu(cpu, andmask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)) {
|
||||
printk(KERN_WARNING
|
||||
"%s: Not a valid mask!\n", __func__);
|
||||
return 0xFF;
|
||||
}
|
||||
apicid = apicid | new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
return apicid;
|
||||
}
|
||||
|
||||
/* cpuid returns the value latched in the HW at reset, not the APIC ID
|
||||
* register's value. For any box whose BIOS changes APIC IDs, like
|
||||
* clustered APIC systems, we must use hard_smp_processor_id.
|
||||
|
@ -1,9 +1,10 @@
|
||||
#ifndef __ASM_SUMMIT_IPI_H
|
||||
#define __ASM_SUMMIT_IPI_H
|
||||
|
||||
void send_IPI_mask_sequence(cpumask_t mask, int vector);
|
||||
void send_IPI_mask_sequence(const cpumask_t *mask, int vector);
|
||||
void send_IPI_mask_allbutself(const cpumask_t *mask, int vector);
|
||||
|
||||
static inline void send_IPI_mask(cpumask_t mask, int vector)
|
||||
static inline void send_IPI_mask(const cpumask_t *mask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(mask, vector);
|
||||
}
|
||||
@ -14,12 +15,12 @@ static inline void send_IPI_allbutself(int vector)
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
send_IPI_mask(mask, vector);
|
||||
send_IPI_mask(&mask, vector);
|
||||
}
|
||||
|
||||
static inline void send_IPI_all(int vector)
|
||||
{
|
||||
send_IPI_mask(cpu_online_map, vector);
|
||||
send_IPI_mask(&cpu_online_map, vector);
|
||||
}
|
||||
|
||||
#endif /* __ASM_SUMMIT_IPI_H */
|
||||
|
@ -226,6 +226,8 @@ extern cpumask_t cpu_coregroup_map(int cpu);
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (&per_cpu(cpu_core_map, cpu))
|
||||
#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
||||
|
||||
/* indicates that pointers to the topology cpumask_t maps are valid */
|
||||
#define arch_provides_topology_pointers yes
|
||||
|
@ -141,7 +141,7 @@ static int lapic_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask);
|
||||
static void lapic_timer_broadcast(const cpumask_t *mask);
|
||||
static void apic_pm_activate(void);
|
||||
|
||||
/*
|
||||
@ -453,10 +453,10 @@ static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
/*
|
||||
* Local APIC timer broadcast function
|
||||
*/
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask)
|
||||
static void lapic_timer_broadcast(const cpumask_t *mask)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
send_IPI_mask(*mask, LOCAL_TIMER_VECTOR);
|
||||
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -1903,8 +1903,8 @@ void __cpuinit generic_processor_info(int apicid, int version)
|
||||
}
|
||||
#endif
|
||||
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
set_cpu_possible(cpu, true);
|
||||
set_cpu_present(cpu, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -2106,7 +2106,7 @@ __cpuinit int apic_is_clustered_box(void)
|
||||
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
|
||||
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
/* are we being called early in kernel startup? */
|
||||
if (bios_cpu_apicid) {
|
||||
id = bios_cpu_apicid[i];
|
||||
|
@ -534,12 +534,29 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
}
|
||||
|
||||
static void get_cpu_leaves(void *_retval)
|
||||
{
|
||||
int j, *retval = _retval, cpu = smp_processor_id();
|
||||
|
||||
/* Do cpuid and store the results */
|
||||
for (j = 0; j < num_cache_leaves; j++) {
|
||||
struct _cpuid4_info *this_leaf;
|
||||
this_leaf = CPUID4_INFO_IDX(cpu, j);
|
||||
*retval = cpuid4_cache_lookup(j, this_leaf);
|
||||
if (unlikely(*retval < 0)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < j; i++)
|
||||
cache_remove_shared_cpu_map(cpu, i);
|
||||
break;
|
||||
}
|
||||
cache_shared_cpu_map_setup(cpu, j);
|
||||
}
|
||||
}
|
||||
|
||||
static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||
{
|
||||
struct _cpuid4_info *this_leaf;
|
||||
unsigned long j;
|
||||
int retval;
|
||||
cpumask_t oldmask;
|
||||
|
||||
if (num_cache_leaves == 0)
|
||||
return -ENOENT;
|
||||
@ -549,27 +566,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
|
||||
if (per_cpu(cpuid4_info, cpu) == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
oldmask = current->cpus_allowed;
|
||||
retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
/* Do cpuid and store the results */
|
||||
for (j = 0; j < num_cache_leaves; j++) {
|
||||
this_leaf = CPUID4_INFO_IDX(cpu, j);
|
||||
retval = cpuid4_cache_lookup(j, this_leaf);
|
||||
if (unlikely(retval < 0)) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < j; i++)
|
||||
cache_remove_shared_cpu_map(cpu, i);
|
||||
break;
|
||||
}
|
||||
cache_shared_cpu_map_setup(cpu, j);
|
||||
}
|
||||
set_cpus_allowed_ptr(current, &oldmask);
|
||||
|
||||
out:
|
||||
smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
|
||||
if (retval) {
|
||||
kfree(per_cpu(cpuid4_info, cpu));
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
|
@ -83,34 +83,41 @@ static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
|
||||
* CPU Initialization
|
||||
*/
|
||||
|
||||
struct thresh_restart {
|
||||
struct threshold_block *b;
|
||||
int reset;
|
||||
u16 old_limit;
|
||||
};
|
||||
|
||||
/* must be called with correct cpu affinity */
|
||||
static void threshold_restart_bank(struct threshold_block *b,
|
||||
int reset, u16 old_limit)
|
||||
static long threshold_restart_bank(void *_tr)
|
||||
{
|
||||
struct thresh_restart *tr = _tr;
|
||||
u32 mci_misc_hi, mci_misc_lo;
|
||||
|
||||
rdmsr(b->address, mci_misc_lo, mci_misc_hi);
|
||||
rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
|
||||
|
||||
if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
|
||||
reset = 1; /* limit cannot be lower than err count */
|
||||
if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
|
||||
tr->reset = 1; /* limit cannot be lower than err count */
|
||||
|
||||
if (reset) { /* reset err count and overflow bit */
|
||||
if (tr->reset) { /* reset err count and overflow bit */
|
||||
mci_misc_hi =
|
||||
(mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
|
||||
(THRESHOLD_MAX - b->threshold_limit);
|
||||
} else if (old_limit) { /* change limit w/o reset */
|
||||
(THRESHOLD_MAX - tr->b->threshold_limit);
|
||||
} else if (tr->old_limit) { /* change limit w/o reset */
|
||||
int new_count = (mci_misc_hi & THRESHOLD_MAX) +
|
||||
(old_limit - b->threshold_limit);
|
||||
(tr->old_limit - tr->b->threshold_limit);
|
||||
mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
|
||||
(new_count & THRESHOLD_MAX);
|
||||
}
|
||||
|
||||
b->interrupt_enable ?
|
||||
tr->b->interrupt_enable ?
|
||||
(mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
|
||||
(mci_misc_hi &= ~MASK_INT_TYPE_HI);
|
||||
|
||||
mci_misc_hi |= MASK_COUNT_EN_HI;
|
||||
wrmsr(b->address, mci_misc_lo, mci_misc_hi);
|
||||
wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* cpu init entry point, called from mce.c with preempt off */
|
||||
@ -120,6 +127,7 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
u8 lvt_off;
|
||||
u32 low = 0, high = 0, address = 0;
|
||||
struct thresh_restart tr;
|
||||
|
||||
for (bank = 0; bank < NR_BANKS; ++bank) {
|
||||
for (block = 0; block < NR_BLOCKS; ++block) {
|
||||
@ -162,7 +170,10 @@ void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
|
||||
wrmsr(address, low, high);
|
||||
|
||||
threshold_defaults.address = address;
|
||||
threshold_restart_bank(&threshold_defaults, 0, 0);
|
||||
tr.b = &threshold_defaults;
|
||||
tr.reset = 0;
|
||||
tr.old_limit = 0;
|
||||
threshold_restart_bank(&tr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -251,20 +262,6 @@ struct threshold_attr {
|
||||
ssize_t(*store) (struct threshold_block *, const char *, size_t count);
|
||||
};
|
||||
|
||||
static void affinity_set(unsigned int cpu, cpumask_t *oldmask,
|
||||
cpumask_t *newmask)
|
||||
{
|
||||
*oldmask = current->cpus_allowed;
|
||||
cpus_clear(*newmask);
|
||||
cpu_set(cpu, *newmask);
|
||||
set_cpus_allowed_ptr(current, newmask);
|
||||
}
|
||||
|
||||
static void affinity_restore(const cpumask_t *oldmask)
|
||||
{
|
||||
set_cpus_allowed_ptr(current, oldmask);
|
||||
}
|
||||
|
||||
#define SHOW_FIELDS(name) \
|
||||
static ssize_t show_ ## name(struct threshold_block * b, char *buf) \
|
||||
{ \
|
||||
@ -277,15 +274,16 @@ static ssize_t store_interrupt_enable(struct threshold_block *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
char *end;
|
||||
cpumask_t oldmask, newmask;
|
||||
struct thresh_restart tr;
|
||||
unsigned long new = simple_strtoul(buf, &end, 0);
|
||||
if (end == buf)
|
||||
return -EINVAL;
|
||||
b->interrupt_enable = !!new;
|
||||
|
||||
affinity_set(b->cpu, &oldmask, &newmask);
|
||||
threshold_restart_bank(b, 0, 0);
|
||||
affinity_restore(&oldmask);
|
||||
tr.b = b;
|
||||
tr.reset = 0;
|
||||
tr.old_limit = 0;
|
||||
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
|
||||
|
||||
return end - buf;
|
||||
}
|
||||
@ -294,8 +292,7 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
char *end;
|
||||
cpumask_t oldmask, newmask;
|
||||
u16 old;
|
||||
struct thresh_restart tr;
|
||||
unsigned long new = simple_strtoul(buf, &end, 0);
|
||||
if (end == buf)
|
||||
return -EINVAL;
|
||||
@ -303,34 +300,36 @@ static ssize_t store_threshold_limit(struct threshold_block *b,
|
||||
new = THRESHOLD_MAX;
|
||||
if (new < 1)
|
||||
new = 1;
|
||||
old = b->threshold_limit;
|
||||
tr.old_limit = b->threshold_limit;
|
||||
b->threshold_limit = new;
|
||||
tr.b = b;
|
||||
tr.reset = 0;
|
||||
|
||||
affinity_set(b->cpu, &oldmask, &newmask);
|
||||
threshold_restart_bank(b, 0, old);
|
||||
affinity_restore(&oldmask);
|
||||
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
|
||||
|
||||
return end - buf;
|
||||
}
|
||||
|
||||
static long local_error_count(void *_b)
|
||||
{
|
||||
struct threshold_block *b = _b;
|
||||
u32 low, high;
|
||||
|
||||
rdmsr(b->address, low, high);
|
||||
return (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit);
|
||||
}
|
||||
|
||||
static ssize_t show_error_count(struct threshold_block *b, char *buf)
|
||||
{
|
||||
u32 high, low;
|
||||
cpumask_t oldmask, newmask;
|
||||
affinity_set(b->cpu, &oldmask, &newmask);
|
||||
rdmsr(b->address, low, high);
|
||||
affinity_restore(&oldmask);
|
||||
return sprintf(buf, "%x\n",
|
||||
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
|
||||
return sprintf(buf, "%lx\n", work_on_cpu(b->cpu, local_error_count, b));
|
||||
}
|
||||
|
||||
static ssize_t store_error_count(struct threshold_block *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
cpumask_t oldmask, newmask;
|
||||
affinity_set(b->cpu, &oldmask, &newmask);
|
||||
threshold_restart_bank(b, 1, 0);
|
||||
affinity_restore(&oldmask);
|
||||
struct thresh_restart tr = { .b = b, .reset = 1, .old_limit = 0 };
|
||||
|
||||
work_on_cpu(b->cpu, threshold_restart_bank, &tr);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -463,12 +462,19 @@ out_free:
|
||||
return err;
|
||||
}
|
||||
|
||||
static long local_allocate_threshold_blocks(void *_bank)
|
||||
{
|
||||
unsigned int *bank = _bank;
|
||||
|
||||
return allocate_threshold_blocks(smp_processor_id(), *bank, 0,
|
||||
MSR_IA32_MC0_MISC + *bank * 4);
|
||||
}
|
||||
|
||||
/* symlinks sibling shared banks to first core. first core owns dir/files. */
|
||||
static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
{
|
||||
int i, err = 0;
|
||||
struct threshold_bank *b = NULL;
|
||||
cpumask_t oldmask, newmask;
|
||||
char name[32];
|
||||
|
||||
sprintf(name, "threshold_bank%i", bank);
|
||||
@ -519,11 +525,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
|
||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||
|
||||
affinity_set(cpu, &oldmask, &newmask);
|
||||
err = allocate_threshold_blocks(cpu, bank, 0,
|
||||
MSR_IA32_MC0_MISC + bank * 4);
|
||||
affinity_restore(&oldmask);
|
||||
|
||||
err = work_on_cpu(cpu, local_allocate_threshold_blocks, &bank);
|
||||
if (err)
|
||||
goto out_free;
|
||||
|
||||
|
@ -77,10 +77,7 @@ static int crash_nmi_callback(struct notifier_block *self,
|
||||
|
||||
static void smp_send_nmi_allbutself(void)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(safe_smp_processor_id(), mask);
|
||||
if (!cpus_empty(mask))
|
||||
send_IPI_mask(mask, NMI_VECTOR);
|
||||
send_IPI_allbutself(NMI_VECTOR);
|
||||
}
|
||||
|
||||
static struct notifier_block crash_nmi_nb = {
|
||||
|
@ -30,12 +30,12 @@ static int flat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static cpumask_t flat_target_cpus(void)
|
||||
static const struct cpumask *flat_target_cpus(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static cpumask_t flat_vector_allocation_domain(int cpu)
|
||||
static void flat_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -45,8 +45,8 @@ static cpumask_t flat_vector_allocation_domain(int cpu)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
cpumask_clear(retmask);
|
||||
cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -69,9 +69,8 @@ static void flat_init_apic_ldr(void)
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
|
||||
static inline void _flat_send_IPI_mask(unsigned long mask, int vector)
|
||||
{
|
||||
unsigned long mask = cpus_addr(cpumask)[0];
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
@ -79,20 +78,41 @@ static void flat_send_IPI_mask(cpumask_t cpumask, int vector)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
|
||||
{
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
|
||||
_flat_send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static void flat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (cpu < BITS_PER_LONG)
|
||||
clear_bit(cpu, &mask);
|
||||
_flat_send_IPI_mask(mask, vector);
|
||||
}
|
||||
|
||||
static void flat_send_IPI_allbutself(int vector)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
int hotplug = 1;
|
||||
#else
|
||||
int hotplug = 0;
|
||||
#endif
|
||||
if (hotplug || vector == NMI_VECTOR) {
|
||||
cpumask_t allbutme = cpu_online_map;
|
||||
if (!cpumask_equal(cpu_online_mask, cpumask_of(cpu))) {
|
||||
unsigned long mask = cpumask_bits(cpu_online_mask)[0];
|
||||
|
||||
cpu_clear(smp_processor_id(), allbutme);
|
||||
if (cpu < BITS_PER_LONG)
|
||||
clear_bit(cpu, &mask);
|
||||
|
||||
if (!cpus_empty(allbutme))
|
||||
flat_send_IPI_mask(allbutme, vector);
|
||||
_flat_send_IPI_mask(mask, vector);
|
||||
}
|
||||
} else if (num_online_cpus() > 1) {
|
||||
__send_IPI_shortcut(APIC_DEST_ALLBUT, vector,APIC_DEST_LOGICAL);
|
||||
}
|
||||
@ -101,7 +121,7 @@ static void flat_send_IPI_allbutself(int vector)
|
||||
static void flat_send_IPI_all(int vector)
|
||||
{
|
||||
if (vector == NMI_VECTOR)
|
||||
flat_send_IPI_mask(cpu_online_map, vector);
|
||||
flat_send_IPI_mask(cpu_online_mask, vector);
|
||||
else
|
||||
__send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
|
||||
}
|
||||
@ -135,9 +155,18 @@ static int flat_apic_id_registered(void)
|
||||
return physid_isset(read_xapic_id(), phys_cpu_present_map);
|
||||
}
|
||||
|
||||
static unsigned int flat_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static unsigned int flat_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
|
||||
return cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
|
||||
}
|
||||
|
||||
static unsigned int flat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
unsigned long mask1 = cpumask_bits(cpumask)[0] & APIC_ALL_CPUS;
|
||||
unsigned long mask2 = cpumask_bits(andmask)[0] & APIC_ALL_CPUS;
|
||||
|
||||
return mask1 & mask2;
|
||||
}
|
||||
|
||||
static unsigned int phys_pkg_id(int index_msb)
|
||||
@ -157,8 +186,10 @@ struct genapic apic_flat = {
|
||||
.send_IPI_all = flat_send_IPI_all,
|
||||
.send_IPI_allbutself = flat_send_IPI_allbutself,
|
||||
.send_IPI_mask = flat_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = flat_send_IPI_mask_allbutself,
|
||||
.send_IPI_self = apic_send_IPI_self,
|
||||
.cpu_mask_to_apicid = flat_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = flat_cpu_mask_to_apicid_and,
|
||||
.phys_pkg_id = phys_pkg_id,
|
||||
.get_apic_id = get_apic_id,
|
||||
.set_apic_id = set_apic_id,
|
||||
@ -188,35 +219,39 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cpumask_t physflat_target_cpus(void)
|
||||
static const struct cpumask *physflat_target_cpus(void)
|
||||
{
|
||||
return cpu_online_map;
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static cpumask_t physflat_vector_allocation_domain(int cpu)
|
||||
static void physflat_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
return cpumask_of_cpu(cpu);
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_mask(cpumask_t cpumask, int vector)
|
||||
static void physflat_send_IPI_mask(const struct cpumask *cpumask, int vector)
|
||||
{
|
||||
send_IPI_mask_sequence(cpumask, vector);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_mask_allbutself(const struct cpumask *cpumask,
|
||||
int vector)
|
||||
{
|
||||
send_IPI_mask_allbutself(cpumask, vector);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t allbutme = cpu_online_map;
|
||||
|
||||
cpu_clear(smp_processor_id(), allbutme);
|
||||
physflat_send_IPI_mask(allbutme, vector);
|
||||
send_IPI_mask_allbutself(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static void physflat_send_IPI_all(int vector)
|
||||
{
|
||||
physflat_send_IPI_mask(cpu_online_map, vector);
|
||||
physflat_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static unsigned int physflat_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -224,13 +259,29 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
cpu = cpumask_first(cpumask);
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, andmask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
struct genapic apic_physflat = {
|
||||
.name = "physical flat",
|
||||
.acpi_madt_oem_check = physflat_acpi_madt_oem_check,
|
||||
@ -243,8 +294,10 @@ struct genapic apic_physflat = {
|
||||
.send_IPI_all = physflat_send_IPI_all,
|
||||
.send_IPI_allbutself = physflat_send_IPI_allbutself,
|
||||
.send_IPI_mask = physflat_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = physflat_send_IPI_mask_allbutself,
|
||||
.send_IPI_self = apic_send_IPI_self,
|
||||
.cpu_mask_to_apicid = physflat_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = physflat_cpu_mask_to_apicid_and,
|
||||
.phys_pkg_id = phys_pkg_id,
|
||||
.get_apic_id = get_apic_id,
|
||||
.set_apic_id = set_apic_id,
|
||||
|
@ -22,19 +22,18 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
static cpumask_t x2apic_target_cpus(void)
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* for now each logical cpu is in its own vector allocation domain.
|
||||
*/
|
||||
static cpumask_t x2apic_vector_allocation_domain(int cpu)
|
||||
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_t domain = CPU_MASK_NONE;
|
||||
cpu_set(cpu, domain);
|
||||
return domain;
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
|
||||
@ -56,32 +55,53 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
|
||||
* at once. We have 16 cpu's in a cluster. This will minimize IPI register
|
||||
* writes.
|
||||
*/
|
||||
static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
|
||||
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu_mask(query_cpu, mask) {
|
||||
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, APIC_DEST_LOGICAL);
|
||||
}
|
||||
for_each_cpu(query_cpu, mask)
|
||||
__x2apic_send_IPI_dest(
|
||||
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, APIC_DEST_LOGICAL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
unsigned long this_cpu = smp_processor_id();
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
if (query_cpu != this_cpu)
|
||||
__x2apic_send_IPI_dest(
|
||||
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, APIC_DEST_LOGICAL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
unsigned long this_cpu = smp_processor_id();
|
||||
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
x2apic_send_IPI_mask(mask, vector);
|
||||
local_irq_save(flags);
|
||||
for_each_online_cpu(query_cpu)
|
||||
if (query_cpu != this_cpu)
|
||||
__x2apic_send_IPI_dest(
|
||||
per_cpu(x86_cpu_to_logical_apicid, query_cpu),
|
||||
vector, APIC_DEST_LOGICAL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_all(int vector)
|
||||
{
|
||||
x2apic_send_IPI_mask(cpu_online_map, vector);
|
||||
x2apic_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static int x2apic_apic_id_registered(void)
|
||||
@ -89,7 +109,7 @@ static int x2apic_apic_id_registered(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -97,13 +117,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
if ((unsigned)cpu < NR_CPUS)
|
||||
cpu = cpumask_first(cpumask);
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_logical_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, andmask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int id;
|
||||
@ -150,8 +185,10 @@ struct genapic apic_x2apic_cluster = {
|
||||
.send_IPI_all = x2apic_send_IPI_all,
|
||||
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
||||
.send_IPI_mask = x2apic_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
|
||||
.send_IPI_self = x2apic_send_IPI_self,
|
||||
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
|
||||
.phys_pkg_id = phys_pkg_id,
|
||||
.get_apic_id = get_apic_id,
|
||||
.set_apic_id = set_apic_id,
|
||||
|
@ -29,16 +29,15 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
static cpumask_t x2apic_target_cpus(void)
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
}
|
||||
|
||||
static cpumask_t x2apic_vector_allocation_domain(int cpu)
|
||||
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_t domain = CPU_MASK_NONE;
|
||||
cpu_set(cpu, domain);
|
||||
return domain;
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
|
||||
@ -54,32 +53,54 @@ static void __x2apic_send_IPI_dest(unsigned int apicid, int vector,
|
||||
x2apic_icr_write(cfg, apicid);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_mask(cpumask_t mask, int vector)
|
||||
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu_mask(query_cpu, mask) {
|
||||
for_each_cpu(query_cpu, mask) {
|
||||
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_mask_allbutself(const struct cpumask *mask,
|
||||
int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
unsigned long this_cpu = smp_processor_id();
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask) {
|
||||
if (query_cpu != this_cpu)
|
||||
__x2apic_send_IPI_dest(
|
||||
per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
unsigned long flags;
|
||||
unsigned long query_cpu;
|
||||
unsigned long this_cpu = smp_processor_id();
|
||||
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
x2apic_send_IPI_mask(mask, vector);
|
||||
local_irq_save(flags);
|
||||
for_each_online_cpu(query_cpu)
|
||||
if (query_cpu != this_cpu)
|
||||
__x2apic_send_IPI_dest(
|
||||
per_cpu(x86_cpu_to_apicid, query_cpu),
|
||||
vector, APIC_DEST_PHYSICAL);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_all(int vector)
|
||||
{
|
||||
x2apic_send_IPI_mask(cpu_online_map, vector);
|
||||
x2apic_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static int x2apic_apic_id_registered(void)
|
||||
@ -87,7 +108,7 @@ static int x2apic_apic_id_registered(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static unsigned int x2apic_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -95,13 +116,28 @@ static unsigned int x2apic_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
if ((unsigned)cpu < NR_CPUS)
|
||||
cpu = cpumask_first(cpumask);
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, andmask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int id;
|
||||
@ -145,8 +181,10 @@ struct genapic apic_x2apic_phys = {
|
||||
.send_IPI_all = x2apic_send_IPI_all,
|
||||
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
|
||||
.send_IPI_mask = x2apic_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
|
||||
.send_IPI_self = x2apic_send_IPI_self,
|
||||
.cpu_mask_to_apicid = x2apic_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = x2apic_cpu_mask_to_apicid_and,
|
||||
.phys_pkg_id = phys_pkg_id,
|
||||
.get_apic_id = get_apic_id,
|
||||
.set_apic_id = set_apic_id,
|
||||
|
@ -75,16 +75,15 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
static cpumask_t uv_target_cpus(void)
|
||||
static const struct cpumask *uv_target_cpus(void)
|
||||
{
|
||||
return cpumask_of_cpu(0);
|
||||
return cpumask_of(0);
|
||||
}
|
||||
|
||||
static cpumask_t uv_vector_allocation_domain(int cpu)
|
||||
static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
{
|
||||
cpumask_t domain = CPU_MASK_NONE;
|
||||
cpu_set(cpu, domain);
|
||||
return domain;
|
||||
cpumask_clear(retmask);
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
|
||||
@ -123,28 +122,37 @@ static void uv_send_IPI_one(int cpu, int vector)
|
||||
uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
|
||||
}
|
||||
|
||||
static void uv_send_IPI_mask(cpumask_t mask, int vector)
|
||||
static void uv_send_IPI_mask(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (cpu_isset(cpu, mask))
|
||||
for_each_cpu(cpu, mask)
|
||||
uv_send_IPI_one(cpu, vector);
|
||||
}
|
||||
|
||||
static void uv_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
if (cpu != this_cpu)
|
||||
uv_send_IPI_one(cpu, vector);
|
||||
}
|
||||
|
||||
static void uv_send_IPI_allbutself(int vector)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
unsigned int cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
uv_send_IPI_mask(mask, vector);
|
||||
for_each_online_cpu(cpu)
|
||||
if (cpu != this_cpu)
|
||||
uv_send_IPI_one(cpu, vector);
|
||||
}
|
||||
|
||||
static void uv_send_IPI_all(int vector)
|
||||
{
|
||||
uv_send_IPI_mask(cpu_online_map, vector);
|
||||
uv_send_IPI_mask(cpu_online_mask, vector);
|
||||
}
|
||||
|
||||
static int uv_apic_id_registered(void)
|
||||
@ -156,7 +164,7 @@ static void uv_init_apic_ldr(void)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
@ -164,13 +172,28 @@ static unsigned int uv_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
cpu = cpumask_first(cpumask);
|
||||
if ((unsigned)cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We're using fixed IRQ delivery, can only return one phys APIC ID.
|
||||
* May as well be the first.
|
||||
*/
|
||||
cpu = cpumask_any_and(cpumask, andmask);
|
||||
if (cpu < nr_cpu_ids)
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
||||
static unsigned int get_apic_id(unsigned long x)
|
||||
{
|
||||
unsigned int id;
|
||||
@ -218,8 +241,10 @@ struct genapic apic_x2apic_uv_x = {
|
||||
.send_IPI_all = uv_send_IPI_all,
|
||||
.send_IPI_allbutself = uv_send_IPI_allbutself,
|
||||
.send_IPI_mask = uv_send_IPI_mask,
|
||||
.send_IPI_mask_allbutself = uv_send_IPI_mask_allbutself,
|
||||
.send_IPI_self = uv_send_IPI_self,
|
||||
.cpu_mask_to_apicid = uv_cpu_mask_to_apicid,
|
||||
.cpu_mask_to_apicid_and = uv_cpu_mask_to_apicid_and,
|
||||
.phys_pkg_id = phys_pkg_id,
|
||||
.get_apic_id = get_apic_id,
|
||||
.set_apic_id = set_apic_id,
|
||||
|
@ -136,8 +136,8 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int cpu)
|
||||
|
||||
struct irq_cfg {
|
||||
struct irq_pin_list *irq_2_pin;
|
||||
cpumask_t domain;
|
||||
cpumask_t old_domain;
|
||||
cpumask_var_t domain;
|
||||
cpumask_var_t old_domain;
|
||||
unsigned move_cleanup_count;
|
||||
u8 vector;
|
||||
u8 move_in_progress : 1;
|
||||
@ -149,22 +149,22 @@ static struct irq_cfg irq_cfgx[] = {
|
||||
#else
|
||||
static struct irq_cfg irq_cfgx[NR_IRQS] = {
|
||||
#endif
|
||||
[0] = { .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
|
||||
[1] = { .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
|
||||
[2] = { .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
|
||||
[3] = { .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
|
||||
[4] = { .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
|
||||
[5] = { .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
|
||||
[6] = { .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
|
||||
[7] = { .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
|
||||
[8] = { .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
|
||||
[9] = { .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
|
||||
[10] = { .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
|
||||
[11] = { .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
|
||||
[12] = { .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
|
||||
[13] = { .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
|
||||
[14] = { .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
|
||||
[15] = { .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
|
||||
[0] = { .vector = IRQ0_VECTOR, },
|
||||
[1] = { .vector = IRQ1_VECTOR, },
|
||||
[2] = { .vector = IRQ2_VECTOR, },
|
||||
[3] = { .vector = IRQ3_VECTOR, },
|
||||
[4] = { .vector = IRQ4_VECTOR, },
|
||||
[5] = { .vector = IRQ5_VECTOR, },
|
||||
[6] = { .vector = IRQ6_VECTOR, },
|
||||
[7] = { .vector = IRQ7_VECTOR, },
|
||||
[8] = { .vector = IRQ8_VECTOR, },
|
||||
[9] = { .vector = IRQ9_VECTOR, },
|
||||
[10] = { .vector = IRQ10_VECTOR, },
|
||||
[11] = { .vector = IRQ11_VECTOR, },
|
||||
[12] = { .vector = IRQ12_VECTOR, },
|
||||
[13] = { .vector = IRQ13_VECTOR, },
|
||||
[14] = { .vector = IRQ14_VECTOR, },
|
||||
[15] = { .vector = IRQ15_VECTOR, },
|
||||
};
|
||||
|
||||
void __init arch_early_irq_init(void)
|
||||
@ -180,6 +180,10 @@ void __init arch_early_irq_init(void)
|
||||
for (i = 0; i < count; i++) {
|
||||
desc = irq_to_desc(i);
|
||||
desc->chip_data = &cfg[i];
|
||||
alloc_bootmem_cpumask_var(&cfg[i].domain);
|
||||
alloc_bootmem_cpumask_var(&cfg[i].old_domain);
|
||||
if (i < NR_IRQS_LEGACY)
|
||||
cpumask_setall(cfg[i].domain);
|
||||
}
|
||||
}
|
||||
|
||||
@ -204,6 +208,20 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
|
||||
node = cpu_to_node(cpu);
|
||||
|
||||
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
|
||||
if (cfg) {
|
||||
/* FIXME: needs alloc_cpumask_var_node() */
|
||||
if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
|
||||
free_cpumask_var(cfg->domain);
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else {
|
||||
cpumask_clear(cfg->domain);
|
||||
cpumask_clear(cfg->old_domain);
|
||||
}
|
||||
}
|
||||
printk(KERN_DEBUG " alloc irq_cfg on cpu %d node %d\n", cpu, node);
|
||||
|
||||
return cfg;
|
||||
@ -231,7 +249,8 @@ static struct irq_cfg *irq_cfg(unsigned int irq)
|
||||
|
||||
#endif
|
||||
|
||||
static inline void set_extra_move_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
static inline void
|
||||
set_extra_move_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
}
|
||||
|
||||
@ -361,6 +380,26 @@ static void ioapic_mask_entry(int apic, int pin)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void send_cleanup_vector(struct irq_cfg *cfg)
|
||||
{
|
||||
cpumask_var_t cleanup_mask;
|
||||
|
||||
if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
|
||||
unsigned int i;
|
||||
cfg->move_cleanup_count = 0;
|
||||
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
||||
cfg->move_cleanup_count++;
|
||||
for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
|
||||
send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
|
||||
} else {
|
||||
cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
|
||||
cfg->move_cleanup_count = cpumask_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
free_cpumask_var(cleanup_mask);
|
||||
}
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
|
||||
{
|
||||
int apic, pin;
|
||||
@ -396,42 +435,55 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
|
||||
}
|
||||
}
|
||||
|
||||
static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
|
||||
static int
|
||||
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask);
|
||||
|
||||
static void set_ioapic_affinity_irq_desc(struct irq_desc *desc,
|
||||
const struct cpumask *mask)
|
||||
/*
|
||||
* Either sets desc->affinity to a valid value, and returns cpu_mask_to_apicid
|
||||
* of that, or returns BAD_APICID and leaves desc->affinity untouched.
|
||||
*/
|
||||
static unsigned int
|
||||
set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
unsigned int irq;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return BAD_APICID;
|
||||
|
||||
irq = desc->irq;
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
return BAD_APICID;
|
||||
|
||||
cpumask_and(&desc->affinity, cfg->domain, mask);
|
||||
set_extra_move_desc(desc, mask);
|
||||
return cpu_mask_to_apicid_and(&desc->affinity, cpu_online_mask);
|
||||
}
|
||||
|
||||
static void
|
||||
set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
unsigned long flags;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
unsigned int irq;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return;
|
||||
|
||||
irq = desc->irq;
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, *mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, *mask);
|
||||
|
||||
cpumask_and(&tmp, &cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
/*
|
||||
* Only the high 8 bits are valid.
|
||||
*/
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
|
||||
spin_lock_irqsave(&ioapic_lock, flags);
|
||||
__target_IO_APIC_irq(irq, dest, cfg);
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest != BAD_APICID) {
|
||||
/* Only the high 8 bits are valid. */
|
||||
dest = SET_APIC_LOGICAL_ID(dest);
|
||||
__target_IO_APIC_irq(irq, dest, cfg);
|
||||
}
|
||||
spin_unlock_irqrestore(&ioapic_lock, flags);
|
||||
}
|
||||
|
||||
static void set_ioapic_affinity_irq(unsigned int irq,
|
||||
const struct cpumask *mask)
|
||||
static void
|
||||
set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc;
|
||||
|
||||
@ -1099,7 +1151,8 @@ void unlock_vector_lock(void)
|
||||
spin_unlock(&vector_lock);
|
||||
}
|
||||
|
||||
static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
static int
|
||||
__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
||||
{
|
||||
/*
|
||||
* NOTE! The local APIC isn't very good at handling
|
||||
@ -1114,36 +1167,39 @@ static int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
*/
|
||||
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
|
||||
unsigned int old_vector;
|
||||
int cpu;
|
||||
int cpu, err;
|
||||
cpumask_var_t tmp_mask;
|
||||
|
||||
if ((cfg->move_in_progress) || cfg->move_cleanup_count)
|
||||
return -EBUSY;
|
||||
|
||||
/* Only try and allocate irqs on cpus that are present */
|
||||
cpus_and(mask, mask, cpu_online_map);
|
||||
if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
|
||||
old_vector = cfg->vector;
|
||||
if (old_vector) {
|
||||
cpumask_t tmp;
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
if (!cpus_empty(tmp))
|
||||
cpumask_and(tmp_mask, mask, cpu_online_mask);
|
||||
cpumask_and(tmp_mask, cfg->domain, tmp_mask);
|
||||
if (!cpumask_empty(tmp_mask)) {
|
||||
free_cpumask_var(tmp_mask);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_cpu_mask_nr(cpu, mask) {
|
||||
cpumask_t domain, new_mask;
|
||||
/* Only try and allocate irqs on cpus that are present */
|
||||
err = -ENOSPC;
|
||||
for_each_cpu_and(cpu, mask, cpu_online_mask) {
|
||||
int new_cpu;
|
||||
int vector, offset;
|
||||
|
||||
domain = vector_allocation_domain(cpu);
|
||||
cpus_and(new_mask, domain, cpu_online_map);
|
||||
vector_allocation_domain(cpu, tmp_mask);
|
||||
|
||||
vector = current_vector;
|
||||
offset = current_offset;
|
||||
next:
|
||||
vector += 8;
|
||||
if (vector >= first_system_vector) {
|
||||
/* If we run out of vectors on large boxen, must share them. */
|
||||
/* If out of vectors on large boxen, must share them. */
|
||||
offset = (offset + 1) % 8;
|
||||
vector = FIRST_DEVICE_VECTOR + offset;
|
||||
}
|
||||
@ -1156,7 +1212,7 @@ next:
|
||||
if (vector == SYSCALL_VECTOR)
|
||||
goto next;
|
||||
#endif
|
||||
for_each_cpu_mask_nr(new_cpu, new_mask)
|
||||
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
|
||||
if (per_cpu(vector_irq, new_cpu)[vector] != -1)
|
||||
goto next;
|
||||
/* Found one! */
|
||||
@ -1164,18 +1220,21 @@ next:
|
||||
current_offset = offset;
|
||||
if (old_vector) {
|
||||
cfg->move_in_progress = 1;
|
||||
cfg->old_domain = cfg->domain;
|
||||
cpumask_copy(cfg->old_domain, cfg->domain);
|
||||
}
|
||||
for_each_cpu_mask_nr(new_cpu, new_mask)
|
||||
for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
|
||||
per_cpu(vector_irq, new_cpu)[vector] = irq;
|
||||
cfg->vector = vector;
|
||||
cfg->domain = domain;
|
||||
return 0;
|
||||
cpumask_copy(cfg->domain, tmp_mask);
|
||||
err = 0;
|
||||
break;
|
||||
}
|
||||
return -ENOSPC;
|
||||
free_cpumask_var(tmp_mask);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
static int
|
||||
assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
|
||||
{
|
||||
int err;
|
||||
unsigned long flags;
|
||||
@ -1188,23 +1247,20 @@ static int assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
|
||||
|
||||
static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
|
||||
{
|
||||
cpumask_t mask;
|
||||
int cpu, vector;
|
||||
|
||||
BUG_ON(!cfg->vector);
|
||||
|
||||
vector = cfg->vector;
|
||||
cpus_and(mask, cfg->domain, cpu_online_map);
|
||||
for_each_cpu_mask_nr(cpu, mask)
|
||||
for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
|
||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||
|
||||
cfg->vector = 0;
|
||||
cpus_clear(cfg->domain);
|
||||
cpumask_clear(cfg->domain);
|
||||
|
||||
if (likely(!cfg->move_in_progress))
|
||||
return;
|
||||
cpus_and(mask, cfg->old_domain, cpu_online_map);
|
||||
for_each_cpu_mask_nr(cpu, mask) {
|
||||
for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
|
||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
||||
vector++) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] != irq)
|
||||
@ -1229,7 +1285,7 @@ void __setup_vector_irq(int cpu)
|
||||
if (!desc)
|
||||
continue;
|
||||
cfg = desc->chip_data;
|
||||
if (!cpu_isset(cpu, cfg->domain))
|
||||
if (!cpumask_test_cpu(cpu, cfg->domain))
|
||||
continue;
|
||||
vector = cfg->vector;
|
||||
per_cpu(vector_irq, cpu)[vector] = irq;
|
||||
@ -1241,7 +1297,7 @@ void __setup_vector_irq(int cpu)
|
||||
continue;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
if (!cpu_isset(cpu, cfg->domain))
|
||||
if (!cpumask_test_cpu(cpu, cfg->domain))
|
||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||
}
|
||||
}
|
||||
@ -1377,18 +1433,17 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
struct IO_APIC_route_entry entry;
|
||||
cpumask_t mask;
|
||||
unsigned int dest;
|
||||
|
||||
if (!IO_APIC_IRQ(irq))
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
|
||||
mask = TARGET_CPUS;
|
||||
if (assign_irq_vector(irq, cfg, mask))
|
||||
if (assign_irq_vector(irq, cfg, TARGET_CPUS))
|
||||
return;
|
||||
|
||||
cpus_and(mask, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
|
||||
|
||||
apic_printk(APIC_VERBOSE,KERN_DEBUG
|
||||
"IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
|
||||
@ -1398,8 +1453,7 @@ static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq, struct irq_de
|
||||
|
||||
|
||||
if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
|
||||
cpu_mask_to_apicid(mask), trigger, polarity,
|
||||
cfg->vector)) {
|
||||
dest, trigger, polarity, cfg->vector)) {
|
||||
printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
|
||||
mp_ioapics[apic].mp_apicid, pin);
|
||||
__clear_irq_vector(irq, cfg);
|
||||
@ -2121,7 +2175,7 @@ static int ioapic_retrigger_irq(unsigned int irq)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&vector_lock, flags);
|
||||
send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
|
||||
send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
|
||||
spin_unlock_irqrestore(&vector_lock, flags);
|
||||
|
||||
return 1;
|
||||
@ -2170,18 +2224,17 @@ static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
|
||||
* as simple as edge triggered migration and we can do the irq migration
|
||||
* with a simple atomic update to IO-APIC RTE.
|
||||
*/
|
||||
static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
static void
|
||||
migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
cpumask_t tmp, cleanup_mask;
|
||||
struct irte irte;
|
||||
int modify_ioapic_rte;
|
||||
unsigned int dest;
|
||||
unsigned long flags;
|
||||
unsigned int irq;
|
||||
|
||||
cpus_and(tmp, mask, cpu_online_map);
|
||||
if (cpus_empty(tmp))
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return;
|
||||
|
||||
irq = desc->irq;
|
||||
@ -2194,8 +2247,7 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
|
||||
set_extra_move_desc(desc, mask);
|
||||
|
||||
cpus_and(tmp, cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, mask);
|
||||
|
||||
modify_ioapic_rte = desc->status & IRQ_LEVEL;
|
||||
if (modify_ioapic_rte) {
|
||||
@ -2212,14 +2264,10 @@ static void migrate_ioapic_irq_desc(struct irq_desc *desc, cpumask_t mask)
|
||||
*/
|
||||
modify_irte(irq, &irte);
|
||||
|
||||
if (cfg->move_in_progress) {
|
||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
desc->affinity = mask;
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
}
|
||||
|
||||
static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
|
||||
@ -2241,11 +2289,11 @@ static int migrate_irq_remapped_level_desc(struct irq_desc *desc)
|
||||
}
|
||||
|
||||
/* everthing is clear. we have right of way */
|
||||
migrate_ioapic_irq_desc(desc, desc->pending_mask);
|
||||
migrate_ioapic_irq_desc(desc, &desc->pending_mask);
|
||||
|
||||
ret = 0;
|
||||
desc->status &= ~IRQ_MOVE_PENDING;
|
||||
cpus_clear(desc->pending_mask);
|
||||
cpumask_clear(&desc->pending_mask);
|
||||
|
||||
unmask:
|
||||
unmask_IO_APIC_irq_desc(desc);
|
||||
@ -2292,7 +2340,7 @@ static void set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
|
||||
return;
|
||||
}
|
||||
|
||||
migrate_ioapic_irq_desc(desc, *mask);
|
||||
migrate_ioapic_irq_desc(desc, mask);
|
||||
}
|
||||
static void set_ir_ioapic_affinity_irq(unsigned int irq,
|
||||
const struct cpumask *mask)
|
||||
@ -2331,7 +2379,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
|
||||
if (!cfg->move_cleanup_count)
|
||||
goto unlock;
|
||||
|
||||
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
goto unlock;
|
||||
|
||||
__get_cpu_var(vector_irq)[vector] = -1;
|
||||
@ -2354,14 +2402,8 @@ static void irq_complete_move(struct irq_desc **descp)
|
||||
|
||||
vector = ~get_irq_regs()->orig_ax;
|
||||
me = smp_processor_id();
|
||||
if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
|
||||
cpumask_t cleanup_mask;
|
||||
|
||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
|
||||
send_cleanup_vector(cfg);
|
||||
}
|
||||
#else
|
||||
static inline void irq_complete_move(struct irq_desc **descp) {}
|
||||
@ -3086,16 +3128,13 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
|
||||
struct irq_cfg *cfg;
|
||||
int err;
|
||||
unsigned dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
tmp = TARGET_CPUS;
|
||||
err = assign_irq_vector(irq, cfg, tmp);
|
||||
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
cpus_and(tmp, cfg->domain, tmp);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (irq_remapped(irq)) {
|
||||
@ -3155,19 +3194,12 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
struct irq_cfg *cfg;
|
||||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, *mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, *mask);
|
||||
|
||||
cpumask_and(&tmp, &cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
read_msi_msg_desc(desc, &msg);
|
||||
|
||||
@ -3177,37 +3209,27 @@ static void set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
write_msi_msg_desc(desc, &msg);
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
}
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
/*
|
||||
* Migrate the MSI irq to another cpumask. This migration is
|
||||
* done in the process context using interrupt-remapping hardware.
|
||||
*/
|
||||
static void ir_set_msi_irq_affinity(unsigned int irq,
|
||||
const struct cpumask *mask)
|
||||
static void
|
||||
ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp, cleanup_mask;
|
||||
struct irte irte;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
return;
|
||||
|
||||
if (get_irte(irq, &irte))
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, *mask))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, *mask);
|
||||
|
||||
cpumask_and(&tmp, &cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
irte.vector = cfg->vector;
|
||||
irte.dest_id = IRTE_DEST(dest);
|
||||
|
||||
@ -3221,14 +3243,8 @@ static void ir_set_msi_irq_affinity(unsigned int irq,
|
||||
* at the new destination. So, time to cleanup the previous
|
||||
* vector allocation.
|
||||
*/
|
||||
if (cfg->move_in_progress) {
|
||||
cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
|
||||
cfg->move_cleanup_count = cpus_weight(cleanup_mask);
|
||||
send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
|
||||
cfg->move_in_progress = 0;
|
||||
}
|
||||
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -3425,19 +3441,12 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
struct irq_cfg *cfg;
|
||||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, *mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, *mask);
|
||||
|
||||
cpumask_and(&tmp, &cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
dmar_msi_read(irq, &msg);
|
||||
|
||||
@ -3447,7 +3456,6 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
@ -3487,19 +3495,12 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
struct irq_cfg *cfg;
|
||||
struct msi_msg msg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, *mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, *mask);
|
||||
|
||||
cpumask_and(&tmp, &cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
hpet_msi_read(irq, &msg);
|
||||
|
||||
@ -3509,7 +3510,6 @@ static void hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
|
||||
|
||||
hpet_msi_write(irq, &msg);
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
@ -3569,22 +3569,14 @@ static void set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_cfg *cfg;
|
||||
unsigned int dest;
|
||||
cpumask_t tmp;
|
||||
|
||||
if (!cpumask_intersects(mask, cpu_online_mask))
|
||||
dest = set_desc_affinity(desc, mask);
|
||||
if (dest == BAD_APICID)
|
||||
return;
|
||||
|
||||
cfg = desc->chip_data;
|
||||
if (assign_irq_vector(irq, cfg, *mask))
|
||||
return;
|
||||
|
||||
set_extra_move_desc(desc, *mask);
|
||||
|
||||
cpumask_and(&tmp, &cfg->domain, mask);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
|
||||
target_ht_irq(irq, dest, cfg->vector);
|
||||
cpumask_copy(&desc->affinity, mask);
|
||||
}
|
||||
|
||||
#endif
|
||||
@ -3604,17 +3596,14 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
{
|
||||
struct irq_cfg *cfg;
|
||||
int err;
|
||||
cpumask_t tmp;
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
tmp = TARGET_CPUS;
|
||||
err = assign_irq_vector(irq, cfg, tmp);
|
||||
err = assign_irq_vector(irq, cfg, TARGET_CPUS);
|
||||
if (!err) {
|
||||
struct ht_irq_msg msg;
|
||||
unsigned dest;
|
||||
|
||||
cpus_and(tmp, cfg->domain, tmp);
|
||||
dest = cpu_mask_to_apicid(tmp);
|
||||
dest = cpu_mask_to_apicid_and(cfg->domain, TARGET_CPUS);
|
||||
|
||||
msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
|
||||
|
||||
@ -3650,7 +3639,7 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
|
||||
int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
unsigned long mmr_offset)
|
||||
{
|
||||
const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
|
||||
const struct cpumask *eligible_cpu = cpumask_of(cpu);
|
||||
struct irq_cfg *cfg;
|
||||
int mmr_pnode;
|
||||
unsigned long mmr_value;
|
||||
@ -3660,7 +3649,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
|
||||
cfg = irq_cfg(irq);
|
||||
|
||||
err = assign_irq_vector(irq, cfg, *eligible_cpu);
|
||||
err = assign_irq_vector(irq, cfg, eligible_cpu);
|
||||
if (err != 0)
|
||||
return err;
|
||||
|
||||
@ -3679,7 +3668,7 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
entry->polarity = 0;
|
||||
entry->trigger = 0;
|
||||
entry->mask = 0;
|
||||
entry->dest = cpu_mask_to_apicid(*eligible_cpu);
|
||||
entry->dest = cpu_mask_to_apicid(eligible_cpu);
|
||||
|
||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
@ -3890,7 +3879,7 @@ void __init setup_ioapic_dest(void)
|
||||
int pin, ioapic, irq, irq_entry;
|
||||
struct irq_desc *desc;
|
||||
struct irq_cfg *cfg;
|
||||
cpumask_t mask;
|
||||
const struct cpumask *mask;
|
||||
|
||||
if (skip_ioapic_setup == 1)
|
||||
return;
|
||||
@ -3921,16 +3910,16 @@ void __init setup_ioapic_dest(void)
|
||||
*/
|
||||
if (desc->status &
|
||||
(IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
|
||||
mask = desc->affinity;
|
||||
mask = &desc->affinity;
|
||||
else
|
||||
mask = TARGET_CPUS;
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (intr_remapping_enabled)
|
||||
set_ir_ioapic_affinity_irq_desc(desc, &mask);
|
||||
set_ir_ioapic_affinity_irq_desc(desc, mask);
|
||||
else
|
||||
#endif
|
||||
set_ioapic_affinity_irq_desc(desc, &mask);
|
||||
set_ioapic_affinity_irq_desc(desc, mask);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -116,18 +116,18 @@ static inline void __send_IPI_dest_field(unsigned long mask, int vector)
|
||||
/*
|
||||
* This is only used on smaller machines.
|
||||
*/
|
||||
void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
|
||||
void send_IPI_mask_bitmask(const struct cpumask *cpumask, int vector)
|
||||
{
|
||||
unsigned long mask = cpus_addr(cpumask)[0];
|
||||
unsigned long mask = cpumask_bits(cpumask)[0];
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
|
||||
WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
|
||||
__send_IPI_dest_field(mask, vector);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void send_IPI_mask_sequence(cpumask_t mask, int vector)
|
||||
void send_IPI_mask_sequence(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int query_cpu;
|
||||
@ -139,12 +139,24 @@ void send_IPI_mask_sequence(cpumask_t mask, int vector)
|
||||
*/
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_possible_cpu(query_cpu) {
|
||||
if (cpu_isset(query_cpu, mask)) {
|
||||
for_each_cpu(query_cpu, mask)
|
||||
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu), vector);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int query_cpu;
|
||||
unsigned int this_cpu = smp_processor_id();
|
||||
|
||||
/* See Hack comment above */
|
||||
|
||||
local_irq_save(flags);
|
||||
for_each_cpu(query_cpu, mask)
|
||||
if (query_cpu != this_cpu)
|
||||
__send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
|
||||
vector);
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
|
@ -233,27 +233,28 @@ unsigned int do_IRQ(struct pt_regs *regs)
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#include <mach_apic.h>
|
||||
|
||||
void fixup_irqs(cpumask_t map)
|
||||
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
||||
void fixup_irqs(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
cpumask_t mask;
|
||||
const struct cpumask *affinity;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
if (irq == 2)
|
||||
continue;
|
||||
|
||||
cpus_and(mask, desc->affinity, map);
|
||||
if (any_online_cpu(mask) == NR_CPUS) {
|
||||
affinity = &desc->affinity;
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
mask = map;
|
||||
affinity = cpu_all_mask;
|
||||
}
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, &mask);
|
||||
desc->chip->set_affinity(irq, affinity);
|
||||
else if (desc->action && !(warned++))
|
||||
printk("Cannot set affinity for irq %i\n", irq);
|
||||
}
|
||||
|
@ -83,16 +83,17 @@ asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
void fixup_irqs(cpumask_t map)
|
||||
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
|
||||
void fixup_irqs(void)
|
||||
{
|
||||
unsigned int irq;
|
||||
static int warned;
|
||||
struct irq_desc *desc;
|
||||
|
||||
for_each_irq_desc(irq, desc) {
|
||||
cpumask_t mask;
|
||||
int break_affinity = 0;
|
||||
int set_affinity = 1;
|
||||
const struct cpumask *affinity;
|
||||
|
||||
if (!desc)
|
||||
continue;
|
||||
@ -102,23 +103,23 @@ void fixup_irqs(cpumask_t map)
|
||||
/* interrupt's are disabled at this point */
|
||||
spin_lock(&desc->lock);
|
||||
|
||||
affinity = &desc->affinity;
|
||||
if (!irq_has_action(irq) ||
|
||||
cpus_equal(desc->affinity, map)) {
|
||||
cpumask_equal(affinity, cpu_online_mask)) {
|
||||
spin_unlock(&desc->lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
cpus_and(mask, desc->affinity, map);
|
||||
if (cpus_empty(mask)) {
|
||||
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
|
||||
break_affinity = 1;
|
||||
mask = map;
|
||||
affinity = cpu_all_mask;
|
||||
}
|
||||
|
||||
if (desc->chip->mask)
|
||||
desc->chip->mask(irq);
|
||||
|
||||
if (desc->chip->set_affinity)
|
||||
desc->chip->set_affinity(irq, &mask);
|
||||
desc->chip->set_affinity(irq, affinity);
|
||||
else if (!(warned++))
|
||||
set_affinity = 0;
|
||||
|
||||
|
@ -152,6 +152,11 @@ void __init setup_per_cpu_areas(void)
|
||||
old_size = PERCPU_ENOUGH_ROOM;
|
||||
align = max_t(unsigned long, PAGE_SIZE, align);
|
||||
size = roundup(old_size, align);
|
||||
|
||||
printk(KERN_INFO
|
||||
"NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
|
||||
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
|
||||
|
||||
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
|
||||
size);
|
||||
|
||||
@ -168,24 +173,24 @@ void __init setup_per_cpu_areas(void)
|
||||
"cpu %d has no node %d or node-local memory\n",
|
||||
cpu, node);
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
|
||||
printk(KERN_DEBUG
|
||||
"per cpu data for cpu%d at %016lx\n",
|
||||
cpu, __pa(ptr));
|
||||
}
|
||||
else {
|
||||
ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
printk(KERN_DEBUG
|
||||
"per cpu data for cpu%d on node%d "
|
||||
"at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
}
|
||||
#endif
|
||||
per_cpu_offset(cpu) = ptr - __per_cpu_start;
|
||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
|
||||
NR_CPUS, nr_cpu_ids, nr_node_ids);
|
||||
|
||||
/* Setup percpu data maps */
|
||||
setup_per_cpu_maps();
|
||||
|
||||
|
@ -118,22 +118,22 @@ static void native_smp_send_reschedule(int cpu)
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
|
||||
send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_single_ipi(int cpu)
|
||||
{
|
||||
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR);
|
||||
send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
|
||||
}
|
||||
|
||||
void native_send_call_func_ipi(cpumask_t mask)
|
||||
void native_send_call_func_ipi(const struct cpumask *mask)
|
||||
{
|
||||
cpumask_t allbutself;
|
||||
|
||||
allbutself = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), allbutself);
|
||||
|
||||
if (cpus_equal(mask, allbutself) &&
|
||||
if (cpus_equal(*mask, allbutself) &&
|
||||
cpus_equal(cpu_online_map, cpu_callout_map))
|
||||
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
else
|
||||
|
@ -1344,7 +1344,7 @@ void cpu_disable_common(void)
|
||||
lock_vector_lock();
|
||||
remove_cpu_from_maps(cpu);
|
||||
unlock_vector_lock();
|
||||
fixup_irqs(cpu_online_map);
|
||||
fixup_irqs();
|
||||
}
|
||||
|
||||
int native_cpu_disable(void)
|
||||
|
@ -164,7 +164,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
|
||||
* We have to send the IPI only to
|
||||
* CPUs affected.
|
||||
*/
|
||||
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
|
||||
send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR);
|
||||
|
||||
while (!cpus_empty(flush_cpumask))
|
||||
/* nothing. lockup detection does not belong here */
|
||||
|
@ -191,7 +191,7 @@ void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
|
||||
* We have to send the IPI only to
|
||||
* CPUs affected.
|
||||
*/
|
||||
send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
|
||||
send_IPI_mask(&cpumask, INVALIDATE_TLB_VECTOR_START + sender);
|
||||
|
||||
while (!cpus_empty(f->flush_cpumask))
|
||||
cpu_relax();
|
||||
|
@ -42,9 +42,10 @@ static const struct dmi_system_id bigsmp_dmi_table[] = {
|
||||
{ }
|
||||
};
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
static void vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
{
|
||||
return cpumask_of_cpu(cpu);
|
||||
cpus_clear(*retmask);
|
||||
cpu_set(cpu, *retmask);
|
||||
}
|
||||
|
||||
static int probe_bigsmp(void)
|
||||
|
@ -87,7 +87,7 @@ static int __init acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
}
|
||||
#endif
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
static void vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -97,8 +97,7 @@ static cpumask_t vector_allocation_domain(int cpu)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
}
|
||||
|
||||
struct genapic __initdata_refok apic_es7000 = APIC_INIT("es7000", probe_es7000);
|
||||
|
@ -38,7 +38,7 @@ static int acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
static void vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -48,8 +48,7 @@ static cpumask_t vector_allocation_domain(int cpu)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
}
|
||||
|
||||
struct genapic apic_numaq = APIC_INIT("NUMAQ", probe_numaq);
|
||||
|
@ -24,7 +24,7 @@ static int probe_summit(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static cpumask_t vector_allocation_domain(int cpu)
|
||||
static void vector_allocation_domain(int cpu, cpumask_t *retmask)
|
||||
{
|
||||
/* Careful. Some cpus do not strictly honor the set of cpus
|
||||
* specified in the interrupt destination when using lowest
|
||||
@ -34,8 +34,7 @@ static cpumask_t vector_allocation_domain(int cpu)
|
||||
* deliver interrupts to the wrong hyperthread when only one
|
||||
* hyperthread was specified in the interrupt desitination.
|
||||
*/
|
||||
cpumask_t domain = { { [0] = APIC_ALL_CPUS, } };
|
||||
return domain;
|
||||
*retmask = (cpumask_t){ { [0] = APIC_ALL_CPUS, } };
|
||||
}
|
||||
|
||||
struct genapic apic_summit = APIC_INIT("summit", probe_summit);
|
||||
|
@ -672,7 +672,7 @@ void __init smp_boot_cpus(void)
|
||||
|
||||
/* loop over all the extended VIC CPUs and boot them. The
|
||||
* Quad CPUs must be bootstrapped by their extended VIC cpu */
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
if (i == boot_cpu_id || !cpu_isset(i, phys_cpu_present_map))
|
||||
continue;
|
||||
do_boot_cpu(i);
|
||||
|
@ -278,7 +278,7 @@ void __init numa_init_array(void)
|
||||
int rr, i;
|
||||
|
||||
rr = first_node(node_online_map);
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
if (early_cpu_to_node(i) != NUMA_NO_NODE)
|
||||
continue;
|
||||
numa_set_node(i, rr);
|
||||
@ -549,7 +549,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
|
||||
memnodemap[0] = 0;
|
||||
node_set_online(0);
|
||||
node_set(0, node_possible_map);
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
for (i = 0; i < nr_cpu_ids; i++)
|
||||
numa_set_node(i, 0);
|
||||
e820_register_active_regions(0, start_pfn, last_pfn);
|
||||
setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
|
||||
|
@ -382,7 +382,7 @@ int __init acpi_scan_nodes(unsigned long start, unsigned long end)
|
||||
if (!node_online(i))
|
||||
setup_node_bootmem(i, nodes[i].start, nodes[i].end);
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
int node = early_cpu_to_node(i);
|
||||
|
||||
if (node == NUMA_NO_NODE)
|
||||
|
@ -1079,7 +1079,7 @@ static void drop_other_mm_ref(void *info)
|
||||
|
||||
static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||
{
|
||||
cpumask_t mask;
|
||||
cpumask_var_t mask;
|
||||
unsigned cpu;
|
||||
|
||||
if (current->active_mm == mm) {
|
||||
@ -1091,7 +1091,16 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
/* Get the "official" set of cpus referring to our pagetable. */
|
||||
mask = mm->cpu_vm_mask;
|
||||
if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
|
||||
for_each_online_cpu(cpu) {
|
||||
if (!cpumask_test_cpu(cpu, &mm->cpu_vm_mask)
|
||||
&& per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
|
||||
continue;
|
||||
smp_call_function_single(cpu, drop_other_mm_ref, mm, 1);
|
||||
}
|
||||
return;
|
||||
}
|
||||
cpumask_copy(mask, &mm->cpu_vm_mask);
|
||||
|
||||
/* It's possible that a vcpu may have a stale reference to our
|
||||
cr3, because its in lazy mode, and it hasn't yet flushed
|
||||
@ -1100,11 +1109,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||
if needed. */
|
||||
for_each_online_cpu(cpu) {
|
||||
if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
|
||||
cpu_set(cpu, mask);
|
||||
cpumask_set_cpu(cpu, mask);
|
||||
}
|
||||
|
||||
if (!cpus_empty(mask))
|
||||
smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
|
||||
if (!cpumask_empty(mask))
|
||||
smp_call_function_many(mask, drop_other_mm_ref, mm, 1);
|
||||
free_cpumask_var(mask);
|
||||
}
|
||||
#else
|
||||
static void xen_drop_mm_ref(struct mm_struct *mm)
|
||||
|
@ -33,7 +33,7 @@
|
||||
#include "xen-ops.h"
|
||||
#include "mmu.h"
|
||||
|
||||
cpumask_t xen_cpu_initialized_map;
|
||||
cpumask_var_t xen_cpu_initialized_map;
|
||||
|
||||
static DEFINE_PER_CPU(int, resched_irq);
|
||||
static DEFINE_PER_CPU(int, callfunc_irq);
|
||||
@ -158,7 +158,7 @@ static void __init xen_fill_possible_map(void)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
|
||||
if (rc >= 0) {
|
||||
num_processors++;
|
||||
@ -192,11 +192,14 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
|
||||
if (xen_smp_intr_init(0))
|
||||
BUG();
|
||||
|
||||
xen_cpu_initialized_map = cpumask_of_cpu(0);
|
||||
if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
|
||||
panic("could not allocate xen_cpu_initialized_map\n");
|
||||
|
||||
cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
|
||||
|
||||
/* Restrict the possible_map according to max_cpus. */
|
||||
while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
|
||||
for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--)
|
||||
for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
|
||||
continue;
|
||||
cpu_clear(cpu, cpu_possible_map);
|
||||
}
|
||||
@ -221,7 +224,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
|
||||
struct vcpu_guest_context *ctxt;
|
||||
struct desc_struct *gdt;
|
||||
|
||||
if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
|
||||
if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
|
||||
return 0;
|
||||
|
||||
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
|
||||
@ -408,24 +411,23 @@ static void xen_smp_send_reschedule(int cpu)
|
||||
xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
|
||||
}
|
||||
|
||||
static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector)
|
||||
static void xen_send_IPI_mask(const struct cpumask *mask,
|
||||
enum ipi_vector vector)
|
||||
{
|
||||
unsigned cpu;
|
||||
|
||||
cpus_and(mask, mask, cpu_online_map);
|
||||
|
||||
for_each_cpu_mask_nr(cpu, mask)
|
||||
for_each_cpu_and(cpu, mask, cpu_online_mask)
|
||||
xen_send_IPI_one(cpu, vector);
|
||||
}
|
||||
|
||||
static void xen_smp_send_call_function_ipi(cpumask_t mask)
|
||||
static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
|
||||
|
||||
/* Make sure other vcpus get a chance to run if they need to. */
|
||||
for_each_cpu_mask_nr(cpu, mask) {
|
||||
for_each_cpu(cpu, mask) {
|
||||
if (xen_vcpu_stolen(cpu)) {
|
||||
HYPERVISOR_sched_op(SCHEDOP_yield, 0);
|
||||
break;
|
||||
@ -435,7 +437,8 @@ static void xen_smp_send_call_function_ipi(cpumask_t mask)
|
||||
|
||||
static void xen_smp_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR);
|
||||
xen_send_IPI_mask(cpumask_of(cpu),
|
||||
XEN_CALL_FUNCTION_SINGLE_VECTOR);
|
||||
}
|
||||
|
||||
static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
|
||||
|
@ -35,7 +35,8 @@ void xen_post_suspend(int suspend_cancelled)
|
||||
pfn_to_mfn(xen_start_info->console.domU.mfn);
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
xen_cpu_initialized_map = cpu_online_map;
|
||||
BUG_ON(xen_cpu_initialized_map == NULL);
|
||||
cpumask_copy(xen_cpu_initialized_map, cpu_online_mask);
|
||||
#endif
|
||||
xen_vcpu_restore();
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ void __init xen_init_spinlocks(void);
|
||||
__cpuinit void xen_init_lock_cpu(int cpu);
|
||||
void xen_uninit_lock_cpu(int cpu);
|
||||
|
||||
extern cpumask_t xen_cpu_initialized_map;
|
||||
extern cpumask_var_t xen_cpu_initialized_map;
|
||||
#else
|
||||
static inline void xen_smp_init(void) {}
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user