linux/drivers/acpi/processor_core.c
Hanjun Guo af8f3f514d ACPI / processor: Convert apic_id to phys_id to make it arch agnostic
apic_id in MADT table is the CPU hardware id which identify
it self in the system for x86 and ia64, OSPM will use it for
SMP init to map APIC ID to logical cpu number in the early
boot, when the DSDT/SSDT (ACPI namespace) is scanned later, the
ACPI processor driver is probed and the driver will use acpi_id
in DSDT to get the apic_id, then map to the logical cpu number
which is needed by the processor driver.

Before ACPI 5.0, only x86 and ia64 were supported in ACPI spec,
so apic_id is used both in arch code and ACPI core which is
pretty fine. Since ACPI 5.0, ARM is supported by ACPI and
APIC is not available on ARM, this will confuse people when
apic_id is both used by x86 and ARM in one function.

So convert apic_id to phys_id (which is the original meaning)
in ACPI processor dirver to make it arch agnostic, but leave the
arch dependent code unchanged, no functional change.

Signed-off-by: Hanjun Guo <hanjun.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
2015-01-05 23:32:42 +01:00

206 lines
5.0 KiB
C

/*
* Copyright (C) 2005 Intel Corporation
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
*
* Alex Chiang <achiang@hp.com>
* - Unified x86/ia64 implementations
*/
#include <linux/export.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_core");
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (lapic->processor_id != acpi_id)
return -EINVAL;
*apic_id = lapic->id;
return 0;
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
*apic_id = apic->local_apic_id;
return 0;
}
return -EINVAL;
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
int device_declaration, u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration) {
if ((entry->length < 16) || (lsapic->uid != acpi_id))
return -EINVAL;
} else if (lsapic->processor_id != acpi_id)
return -EINVAL;
*apic_id = (lsapic->id << 8) | lsapic->eid;
return 0;
}
static int map_madt_entry(int type, u32 acpi_id)
{
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
int phys_id = -1; /* CPU hardware ID */
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
(struct acpi_table_header **)&madt)))
madt = NULL;
read_madt++;
}
if (!madt)
return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
/* Parse all entries looking for a match. */
entry += sizeof(struct acpi_table_madt);
while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
return phys_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
int phys_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
if (!buffer.length || !buffer.pointer)
goto exit;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(struct acpi_subtable_header)) {
goto exit;
}
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
map_x2apic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
int phys_id;
phys_id = map_mat_entry(handle, type, acpi_id);
if (phys_id == -1)
phys_id = map_madt_entry(type, acpi_id);
return phys_id;
}
int acpi_map_cpuid(int phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
if (phys_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
* So above phys_id is always set to -1.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
*
* Scope (_PR)
* {
* Processor (CPU0, 0x00, 0x00000410, 0x06) {}
* Processor (CPU1, 0x01, 0x00000410, 0x06) {}
* Processor (CPU2, 0x02, 0x00000410, 0x06) {}
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
* Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
*/
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
return phys_id;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
if (phys_id == 0)
return phys_id;
#endif
return -1;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
int phys_id;
phys_id = acpi_get_phys_id(handle, type, acpi_id);
return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);