Commit f18c5a08 authored by Alexey Starikovskiy's avatar Alexey Starikovskiy Committed by Len Brown

ACPICA: Allow ACPI id to be u32 instead of u8.

Allow ACPI id to be u32 instead of u8.
Requires drop of conversion tables with the acpiid as index.
Signed-off-by: default avatarLen Brown <len.brown@intel.com>
parent 5008740e
......@@ -92,11 +92,6 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
#warning ACPI uses CMPXCHG, i486 and later hardware
#endif
#define MAX_MADT_ENTRIES 256
u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
{[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
EXPORT_SYMBOL(x86_acpiid_to_apicid);
/* --------------------------------------------------------------------------
Boot-time Configuration
-------------------------------------------------------------------------- */
......@@ -253,10 +248,6 @@ acpi_parse_lapic(struct acpi_subtable_header * header, const unsigned long end)
acpi_table_print_madt_entry(header);
/* Record local apic id only when enabled */
if (processor->lapic_flags & ACPI_MADT_ENABLED)
x86_acpiid_to_apicid[processor->processor_id] = processor->id;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
......@@ -563,14 +554,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
int i;
for_each_possible_cpu(i) {
if (x86_acpiid_to_apicid[i] == x86_cpu_to_apicid[cpu]) {
x86_acpiid_to_apicid[i] = -1;
break;
}
}
x86_cpu_to_apicid[cpu] = -1;
cpu_clear(cpu, cpu_present_map);
num_processors--;
......
......@@ -67,11 +67,6 @@ EXPORT_SYMBOL(pm_power_off);
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
#define MAX_SAPICS 256
u16 ia64_acpiid_to_sapicid[MAX_SAPICS] = {[0 ... MAX_SAPICS - 1] = -1 };
EXPORT_SYMBOL(ia64_acpiid_to_sapicid);
const char *acpi_get_sysname(void)
{
#ifdef CONFIG_IA64_GENERIC
......@@ -200,8 +195,6 @@ acpi_parse_lsapic(struct acpi_subtable_header * header, const unsigned long end)
smp_boot_data.cpu_phys_id[available_cpus] =
(lsapic->id << 8) | lsapic->eid;
#endif
ia64_acpiid_to_sapicid[lsapic->processor_id] =
(lsapic->id << 8) | lsapic->eid;
++available_cpus;
}
......@@ -880,7 +873,6 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
cpu_set(cpu, cpu_present_map);
ia64_cpu_to_sapicid[cpu] = physid;
ia64_acpiid_to_sapicid[lsapic->processor_id] = ia64_cpu_to_sapicid[cpu];
*pcpu = cpu;
return (0);
......@@ -890,14 +882,6 @@ EXPORT_SYMBOL(acpi_map_lsapic);
int acpi_unmap_lsapic(int cpu)
{
int i;
for (i = 0; i < MAX_SAPICS; i++) {
if (ia64_acpiid_to_sapicid[i] == ia64_cpu_to_sapicid[cpu]) {
ia64_acpiid_to_sapicid[i] = -1;
break;
}
}
ia64_cpu_to_sapicid[cpu] = -1;
cpu_clear(cpu, cpu_present_map);
......
......@@ -375,30 +375,126 @@ static int acpi_processor_remove_fs(struct acpi_device *device)
}
/* Use the acpiid in MADT to map cpus in case of SMP */
#ifndef CONFIG_SMP
#define convert_acpiid_to_cpu(acpi_id) (-1)
#else
static struct acpi_table_madt *madt;
static int map_lapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_apic *lapic =
(struct acpi_madt_local_apic *)entry;
if ((lapic->lapic_flags & ACPI_MADT_ENABLED) &&
lapic->processor_id == acpi_id) {
*apic_id = lapic->id;
return 1;
}
return 0;
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
u32 acpi_id, int *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
(struct acpi_madt_local_sapic *)entry;
/* Only check enabled APICs*/
if (lsapic->lapic_flags & ACPI_MADT_ENABLED) {
/* First check against id */
if (lsapic->processor_id == acpi_id) {
*apic_id = lsapic->id;
return 1;
/* Check against optional uid */
} else if (entry->length >= 16 &&
lsapic->uid == acpi_id) {
*apic_id = lsapic->uid;
return 1;
}
}
return 0;
}
#ifdef CONFIG_IA64
#define arch_acpiid_to_apicid ia64_acpiid_to_sapicid
#define arch_cpu_to_apicid ia64_cpu_to_sapicid
#define ARCH_BAD_APICID (0xffff)
#else
#define arch_acpiid_to_apicid x86_acpiid_to_apicid
#define arch_cpu_to_apicid x86_cpu_to_apicid
#define ARCH_BAD_APICID (0xff)
#endif
static int convert_acpiid_to_cpu(u8 acpi_id)
static int map_madt_entry(u32 acpi_id)
{
unsigned long madt_end, entry;
int apic_id = -1;
if (!madt)
return apic_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
/* Parse all entries looking for a match. */
entry += sizeof(struct acpi_table_madt);
while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
if (map_lapic_id(header, acpi_id, &apic_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
if (map_lsapic_id(header, acpi_id, &apic_id))
break;
}
entry += header->length;
}
return apic_id;
}
static int map_mat_entry(acpi_handle handle, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
int apic_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
if (!buffer.length || !buffer.pointer)
goto exit;
obj = buffer.pointer;
if (obj->type != ACPI_TYPE_BUFFER ||
obj->buffer.length < sizeof(struct acpi_subtable_header)) {
goto exit;
}
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
map_lapic_id(header, acpi_id, &apic_id);
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
map_lsapic_id(header, acpi_id, &apic_id);
}
exit:
if (buffer.pointer)
kfree(buffer.pointer);
return apic_id;
}
static int get_apic_id(acpi_handle handle, u32 acpi_id)
{
u16 apic_id;
int i;
int apic_id = -1;
apic_id = arch_acpiid_to_apicid[acpi_id];
if (apic_id == ARCH_BAD_APICID)
return -1;
apic_id = map_mat_entry(handle, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(acpi_id);
if (apic_id == -1)
return apic_id;
for (i = 0; i < NR_CPUS; i++) {
for (i = 0; i < NR_CPUS; ++i) {
if (arch_cpu_to_apicid[i] == apic_id)
return i;
}
......@@ -456,7 +552,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
*/
pr->acpi_id = object.processor.proc_id;
cpu_index = convert_acpiid_to_cpu(pr->acpi_id);
cpu_index = get_apic_id(pr->handle, pr->acpi_id);
/* Handle UP system running SMP kernel, with no LAPIC in MADT */
if (!cpu0_initialized && (cpu_index == -1) &&
......@@ -473,7 +569,7 @@ static int acpi_processor_get_info(struct acpi_processor *pr)
* less than the max # of CPUs. They should be ignored _iff
* they are physically not present.
*/
if (cpu_index == -1) {
if (pr->id == -1) {
if (ACPI_FAILURE
(acpi_processor_hotadd_init(pr->handle, &pr->id))) {
return -ENODEV;
......@@ -895,6 +991,12 @@ static int __init acpi_processor_init(void)
memset(&processors, 0, sizeof(processors));
memset(&errata, 0, sizeof(errata));
#ifdef CONFIG_SMP
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
(struct acpi_table_header **)&madt)))
madt = 0;
#endif
acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir);
if (!acpi_processor_dir)
return -ENOMEM;
......
......@@ -144,8 +144,6 @@ extern void acpi_reserve_bootmem(void);
#endif /*CONFIG_ACPI_SLEEP*/
extern u8 x86_acpiid_to_apicid[];
#define ARCH_HAS_POWER_INIT 1
#endif /*__KERNEL__*/
......
......@@ -119,8 +119,6 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
extern u16 ia64_acpiid_to_sapicid[];
/*
* Refer Intel ACPI _PDC support document for bit definitions
*/
......
......@@ -136,8 +136,6 @@ extern void acpi_reserve_bootmem(void);
extern int acpi_disabled;
extern int acpi_pci_disabled;
extern u8 x86_acpiid_to_apicid[];
#define ARCH_HAS_POWER_INIT 1
extern int acpi_skip_timer_override;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment