Commit e8bdc5a9 authored by Len Brown's avatar Len Brown

Pull acpica into test branch

parents c6f4bc21 b0b7eaaf
This diff is collapsed.
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
static int nvidia_hpet_detected __initdata; static int nvidia_hpet_detected __initdata;
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) static int __init nvidia_hpet_check(struct acpi_table_header *header)
{ {
nvidia_hpet_detected = 1; nvidia_hpet_detected = 1;
return 0; return 0;
...@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device) ...@@ -30,7 +30,7 @@ static int __init check_bridge(int vendor, int device)
is enabled. */ is enabled. */
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) { if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
nvidia_hpet_detected = 0; nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check); acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) { if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1; acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board " printk(KERN_INFO "Nvidia board "
......
...@@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) ...@@ -190,7 +190,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
/* Invoke C3 */ /* Invoke C3 */
inb(cx_address); inb(cx_address);
/* Dummy op - must do something useless after P_LVL3 read */ /* Dummy op - must do something useless after P_LVL3 read */
t = inl(acpi_fadt.xpm_tmr_blk.address); t = inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
/* Disable bus ratio bit */ /* Disable bus ratio bit */
local_irq_disable(); local_irq_disable();
...@@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -250,8 +250,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(3, 0x22); outb(3, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) { } else if ((pr != NULL) && pr->flags.bm_control) {
/* Disable bus master arbitration */ /* Disable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
ACPI_MTX_DO_NOT_LOCK);
} }
switch (longhaul_version) { switch (longhaul_version) {
...@@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -281,8 +280,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
case TYPE_POWERSAVER: case TYPE_POWERSAVER:
if (longhaul_flags & USE_ACPI_C3) { if (longhaul_flags & USE_ACPI_C3) {
/* Don't allow wakeup */ /* Don't allow wakeup */
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
ACPI_MTX_DO_NOT_LOCK);
do_powersaver(cx->address, clock_ratio_index); do_powersaver(cx->address, clock_ratio_index);
} else { } else {
do_powersaver(0, clock_ratio_index); do_powersaver(0, clock_ratio_index);
...@@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index) ...@@ -295,8 +293,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
outb(0, 0x22); outb(0, 0x22);
} else if ((pr != NULL) && pr->flags.bm_control) { } else if ((pr != NULL) && pr->flags.bm_control) {
/* Enable bus master arbitration */ /* Enable bus master arbitration */
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
ACPI_MTX_DO_NOT_LOCK);
} }
outb(pic2_mask,0xA1); /* restore mask */ outb(pic2_mask,0xA1); /* restore mask */
outb(pic1_mask,0x21); outb(pic1_mask,0x21);
......
...@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) ...@@ -1057,7 +1057,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
static int gsi_to_irq[MAX_GSI_NUM]; static int gsi_to_irq[MAX_GSI_NUM];
/* Don't set up the ACPI SCI because it's already set up */ /* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi) if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi; return gsi;
ioapic = mp_find_ioapic(gsi); ioapic = mp_find_ioapic(gsi);
...@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) ...@@ -1114,7 +1114,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
/* /*
* Don't assign IRQ used by ACPI SCI * Don't assign IRQ used by ACPI SCI
*/ */
if (gsi == acpi_fadt.sci_int) if (gsi == acpi_gbl_FADT.sci_interrupt)
gsi = pci_irq++; gsi = pci_irq++;
gsi_to_irq[irq] = gsi; gsi_to_irq[irq] = gsi;
} else { } else {
......
...@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long); ...@@ -62,19 +62,19 @@ extern void * boot_ioremap(unsigned long, unsigned long);
/* Identify CPU proximity domains */ /* Identify CPU proximity domains */
static void __init parse_cpu_affinity_structure(char *p) static void __init parse_cpu_affinity_structure(char *p)
{ {
struct acpi_table_processor_affinity *cpu_affinity = struct acpi_srat_cpu_affinity *cpu_affinity =
(struct acpi_table_processor_affinity *) p; (struct acpi_srat_cpu_affinity *) p;
if (!cpu_affinity->flags.enabled) if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return; /* empty entry */ return; /* empty entry */
/* mark this node as "seen" in node bitmap */ /* mark this node as "seen" in node bitmap */
BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain); BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain; apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
printk("CPU 0x%02X in proximity domain 0x%02X\n", printk("CPU 0x%02X in proximity domain 0x%02X\n",
cpu_affinity->apic_id, cpu_affinity->proximity_domain); cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
} }
/* /*
...@@ -87,25 +87,24 @@ static void __init parse_memory_affinity_structure (char *sratp) ...@@ -87,25 +87,24 @@ static void __init parse_memory_affinity_structure (char *sratp)
unsigned long start_pfn, end_pfn; unsigned long start_pfn, end_pfn;
u8 pxm; u8 pxm;
struct node_memory_chunk_s *p, *q, *pend; struct node_memory_chunk_s *p, *q, *pend;
struct acpi_table_memory_affinity *memory_affinity = struct acpi_srat_mem_affinity *memory_affinity =
(struct acpi_table_memory_affinity *) sratp; (struct acpi_srat_mem_affinity *) sratp;
if (!memory_affinity->flags.enabled) if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return; /* empty entry */ return; /* empty entry */
pxm = memory_affinity->proximity_domain & 0xff;
/* mark this node as "seen" in node bitmap */ /* mark this node as "seen" in node bitmap */
BMAP_SET(pxm_bitmap, memory_affinity->proximity_domain); BMAP_SET(pxm_bitmap, pxm);
/* calculate info for memory chunk structure */ /* calculate info for memory chunk structure */
paddr = memory_affinity->base_addr_hi; paddr = memory_affinity->base_address;
paddr = (paddr << 32) | memory_affinity->base_addr_lo; size = memory_affinity->length;
size = memory_affinity->length_hi;
size = (size << 32) | memory_affinity->length_lo;
start_pfn = paddr >> PAGE_SHIFT; start_pfn = paddr >> PAGE_SHIFT;
end_pfn = (paddr + size) >> PAGE_SHIFT; end_pfn = (paddr + size) >> PAGE_SHIFT;
pxm = memory_affinity->proximity_domain;
if (num_memory_chunks >= MAXCHUNKS) { if (num_memory_chunks >= MAXCHUNKS) {
printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n", printk("Too many mem chunks in SRAT. Ignoring %lld MBytes at %llx\n",
...@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp) ...@@ -132,8 +131,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n", printk("Memory range 0x%lX to 0x%lX (type 0x%X) in proximity domain 0x%02X %s\n",
start_pfn, end_pfn, start_pfn, end_pfn,
memory_affinity->memory_type, memory_affinity->memory_type,
memory_affinity->proximity_domain, pxm,
(memory_affinity->flags.hot_pluggable ? ((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
"enabled and removable" : "enabled" ) ); "enabled and removable" : "enabled" ) );
} }
...@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp) ...@@ -185,10 +184,10 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
num_memory_chunks = 0; num_memory_chunks = 0;
while (p < end) { while (p < end) {
switch (*p) { switch (*p) {
case ACPI_SRAT_PROCESSOR_AFFINITY: case ACPI_SRAT_TYPE_CPU_AFFINITY:
parse_cpu_affinity_structure(p); parse_cpu_affinity_structure(p);
break; break;
case ACPI_SRAT_MEMORY_AFFINITY: case ACPI_SRAT_TYPE_MEMORY_AFFINITY:
parse_memory_affinity_structure(p); parse_memory_affinity_structure(p);
break; break;
default: default:
...@@ -262,31 +261,30 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp) ...@@ -262,31 +261,30 @@ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp)
return 0; return 0;
} }
struct acpi_static_rsdt {
struct acpi_table_rsdt table;
u32 padding[7]; /* Allow for 7 more table entries */
};
int __init get_memcfg_from_srat(void) int __init get_memcfg_from_srat(void)
{ {
struct acpi_table_header *header = NULL; struct acpi_table_header *header = NULL;
struct acpi_table_rsdp *rsdp = NULL; struct acpi_table_rsdp *rsdp = NULL;
struct acpi_table_rsdt *rsdt = NULL; struct acpi_table_rsdt *rsdt = NULL;
struct acpi_pointer *rsdp_address = NULL; acpi_native_uint rsdp_address = 0;
struct acpi_table_rsdt saved_rsdt; struct acpi_static_rsdt saved_rsdt;
int tables = 0; int tables = 0;
int i = 0; int i = 0;
if (ACPI_FAILURE(acpi_find_root_pointer(ACPI_PHYSICAL_ADDRESSING, rsdp_address = acpi_find_rsdp();
rsdp_address))) { if (!rsdp_address) {
printk("%s: System description tables not found\n", printk("%s: System description tables not found\n",
__FUNCTION__); __FUNCTION__);
goto out_err; goto out_err;
} }
if (rsdp_address->pointer_type == ACPI_PHYSICAL_POINTER) {
printk("%s: assigning address to rsdp\n", __FUNCTION__); printk("%s: assigning address to rsdp\n", __FUNCTION__);
rsdp = (struct acpi_table_rsdp *) rsdp = (struct acpi_table_rsdp *)(u32)rsdp_address;
(u32)rsdp_address->pointer.physical;
} else {
printk("%s: rsdp_address is not a physical pointer\n", __FUNCTION__);
goto out_err;
}
if (!rsdp) { if (!rsdp) {
printk("%s: Didn't find ACPI root!\n", __FUNCTION__); printk("%s: Didn't find ACPI root!\n", __FUNCTION__);
goto out_err; goto out_err;
...@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void) ...@@ -295,13 +293,13 @@ int __init get_memcfg_from_srat(void)
printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision, printk(KERN_INFO "%.8s v%d [%.6s]\n", rsdp->signature, rsdp->revision,
rsdp->oem_id); rsdp->oem_id);
if (strncmp(rsdp->signature, RSDP_SIG,strlen(RSDP_SIG))) { if (strncmp(rsdp->signature, ACPI_SIG_RSDP,strlen(ACPI_SIG_RSDP))) {
printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__); printk(KERN_WARNING "%s: RSDP table signature incorrect\n", __FUNCTION__);
goto out_err; goto out_err;
} }
rsdt = (struct acpi_table_rsdt *) rsdt = (struct acpi_table_rsdt *)
boot_ioremap(rsdp->rsdt_address, sizeof(struct acpi_table_rsdt)); boot_ioremap(rsdp->rsdt_physical_address, sizeof(struct acpi_table_rsdt));
if (!rsdt) { if (!rsdt) {
printk(KERN_WARNING printk(KERN_WARNING
...@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void) ...@@ -310,9 +308,9 @@ int __init get_memcfg_from_srat(void)
goto out_err; goto out_err;
} }
header = & rsdt->header; header = &rsdt->header;
if (strncmp(header->signature, RSDT_SIG, strlen(RSDT_SIG))) { if (strncmp(header->signature, ACPI_SIG_RSDT, strlen(ACPI_SIG_RSDT))) {
printk(KERN_WARNING "ACPI: RSDT signature incorrect\n"); printk(KERN_WARNING "ACPI: RSDT signature incorrect\n");
goto out_err; goto out_err;
} }
...@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void) ...@@ -330,9 +328,9 @@ int __init get_memcfg_from_srat(void)
memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt)); memcpy(&saved_rsdt, rsdt, sizeof(saved_rsdt));
if (saved_rsdt.header.length > sizeof(saved_rsdt)) { if (saved_rsdt.table.header.length > sizeof(saved_rsdt)) {
printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n", printk(KERN_WARNING "ACPI: Too big length in RSDT: %d\n",
saved_rsdt.header.length); saved_rsdt.table.header.length);
goto out_err; goto out_err;
} }
...@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void) ...@@ -341,15 +339,15 @@ int __init get_memcfg_from_srat(void)
for (i = 0; i < tables; i++) { for (i = 0; i < tables; i++) {
/* Map in header, then map in full table length. */ /* Map in header, then map in full table length. */
header = (struct acpi_table_header *) header = (struct acpi_table_header *)
boot_ioremap(saved_rsdt.entry[i], sizeof(struct acpi_table_header)); boot_ioremap(saved_rsdt.table.table_offset_entry[i], sizeof(struct acpi_table_header));
if (!header) if (!header)
break; break;
header = (struct acpi_table_header *) header = (struct acpi_table_header *)
boot_ioremap(saved_rsdt.entry[i], header->length); boot_ioremap(saved_rsdt.table.table_offset_entry[i], header->length);
if (!header) if (!header)
break; break;
if (strncmp((char *) &header->signature, "SRAT", 4)) if (strncmp((char *) &header->signature, ACPI_SIG_SRAT, 4))
continue; continue;
/* we've found the srat table. don't need to look at any more tables */ /* we've found the srat table. don't need to look at any more tables */
......
...@@ -84,15 +84,6 @@ struct es7000_oem_table { ...@@ -84,15 +84,6 @@ struct es7000_oem_table {
}; };
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
struct acpi_table_sdt {
unsigned long pa;
unsigned long count;
struct {
unsigned long pa;
enum acpi_table_id id;
unsigned long size;
} entry[50];
};
struct oem_table { struct oem_table {
struct acpi_table_header Header; struct acpi_table_header Header;
......
...@@ -160,53 +160,16 @@ parse_unisys_oem (char *oemptr) ...@@ -160,53 +160,16 @@ parse_unisys_oem (char *oemptr)
int __init int __init
find_unisys_acpi_oem_table(unsigned long *oem_addr) find_unisys_acpi_oem_table(unsigned long *oem_addr)
{ {
struct acpi_table_rsdp *rsdp = NULL;
unsigned long rsdp_phys = 0;
struct acpi_table_header *header = NULL; struct acpi_table_header *header = NULL;
int i; int i = 0;
struct acpi_table_sdt sdt; while (ACPI_SUCCESS(acpi_get_table("OEM1", i++, &header))) {
if (!memcmp((char *) &header->oem_id, "UNISYS", 6)) {
rsdp_phys = acpi_find_rsdp(); struct oem_table *t = (struct oem_table *)header;
rsdp = __va(rsdp_phys); *oem_addr = (unsigned long)__acpi_map_table(t->OEMTableAddr,
if (rsdp->rsdt_address) { t->OEMTableSize);
struct acpi_table_rsdt *mapped_rsdt = NULL;
sdt.pa = rsdp->rsdt_address;
header = (struct acpi_table_header *)
__acpi_map_table(sdt.pa, sizeof(struct acpi_table_header));
if (!header)
return -ENODEV;
sdt.count = (header->length - sizeof(struct acpi_table_header)) >> 3;
mapped_rsdt = (struct acpi_table_rsdt *)
__acpi_map_table(sdt.pa, header->length);
if (!mapped_rsdt)
return -ENODEV;
header = &mapped_rsdt->header;
for (i = 0; i < sdt.count; i++)
sdt.entry[i].pa = (unsigned long) mapped_rsdt->entry[i];
};
for (i = 0; i < sdt.count; i++) {
header = (struct acpi_table_header *)
__acpi_map_table(sdt.entry[i].pa,
sizeof(struct acpi_table_header));
if (!header)
continue;
if (!strncmp((char *) &header->signature, "OEM1", 4)) {
if (!strncmp((char *) &header->oem_id, "UNISYS", 6)) {
void *addr;
struct oem_table *t;
acpi_table_print(header, sdt.entry[i].pa);
t = (struct oem_table *) __acpi_map_table(sdt.entry[i].pa, header->length);
addr = (void *) __acpi_map_table(t->OEMTableAddr, t->OEMTableSize);
*oem_addr = (unsigned long) addr;
return 0; return 0;
} }
} }
}
return -1; return -1;
} }
#endif #endif
......
...@@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32); ...@@ -36,7 +36,7 @@ static DECLARE_BITMAP(fallback_slots, MAX_CHECK_BUS*32);
static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
{ {
int cfg_num = -1; int cfg_num = -1;
struct acpi_table_mcfg_config *cfg; struct acpi_mcfg_allocation *cfg;
if (seg == 0 && bus < MAX_CHECK_BUS && if (seg == 0 && bus < MAX_CHECK_BUS &&
test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots)) test_bit(PCI_SLOT(devfn) + 32*bus, fallback_slots))
...@@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) ...@@ -48,11 +48,11 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
break; break;
} }
cfg = &pci_mmcfg_config[cfg_num]; cfg = &pci_mmcfg_config[cfg_num];
if (cfg->pci_segment_group_number != seg) if (cfg->pci_segment != seg)
continue; continue;
if ((cfg->start_bus_number <= bus) && if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus)) (cfg->end_bus_number >= bus))
return cfg->base_address; return cfg->address;
} }
/* Handle more broken MCFG tables on Asus etc. /* Handle more broken MCFG tables on Asus etc.
...@@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) ...@@ -60,9 +60,9 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
this applies to all busses. */ this applies to all busses. */
cfg = &pci_mmcfg_config[0]; cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 && if (pci_mmcfg_config_num == 1 &&
cfg->pci_segment_group_number == 0 && cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0) (cfg->start_bus_number | cfg->end_bus_number) == 0)
return cfg->base_address; return cfg->address;
/* Fall back to type 0 */ /* Fall back to type 0 */
return 0; return 0;
...@@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type) ...@@ -199,19 +199,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0) if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return; return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) || if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) || (pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0)) (pci_mmcfg_config[0].address == 0))
return; return;
/* Only do this check when type 1 works. If it doesn't work /* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */ assume we run on a Mac and always use MCFG */
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) { E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
pci_mmcfg_config[0].base_address); (unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return; return;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -26,14 +26,10 @@ ...@@ -26,14 +26,10 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <asm/sn/sn2/sn_hwperf.h> #include <asm/sn/sn2/sn_hwperf.h>
#include <asm/sn/acpi.h> #include <asm/sn/acpi.h>
#include "acpi/acglobal.h"
extern void sn_init_cpei_timer(void); extern void sn_init_cpei_timer(void);
extern void register_sn_procfs(void); extern void register_sn_procfs(void);
extern void sn_acpi_bus_fixup(struct pci_bus *);
extern void sn_bus_fixup(struct pci_bus *);
extern void sn_acpi_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_more_slot_fixup(struct pci_dev *, struct pcidev_info *);
extern void sn_legacy_pci_window_fixup(struct pci_controller *, u64, u64);
extern void sn_io_acpi_init(void); extern void sn_io_acpi_init(void);
extern void sn_io_init(void); extern void sn_io_init(void);
...@@ -48,6 +44,9 @@ struct sysdata_el { ...@@ -48,6 +44,9 @@ struct sysdata_el {
int sn_ioif_inited; /* SN I/O infrastructure initialized? */ int sn_ioif_inited; /* SN I/O infrastructure initialized? */
int sn_acpi_rev; /* SN ACPI revision */
EXPORT_SYMBOL_GPL(sn_acpi_rev);
struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */ struct sn_pcibus_provider *sn_pci_provider[PCIIO_ASIC_MAX_TYPES]; /* indexed by asic type */
/* /*
...@@ -98,25 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num, ...@@ -98,25 +97,6 @@ sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
return ret_stuff.status; return ret_stuff.status;
} }
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/* /*
* sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified * sn_pcidev_info_get() - Retrieve the pcidev_info struct for the specified
* device. * device.
...@@ -249,47 +229,22 @@ void sn_pci_unfixup_slot(struct pci_dev *dev) ...@@ -249,47 +229,22 @@ void sn_pci_unfixup_slot(struct pci_dev *dev)
} }
/* /*
* sn_pci_fixup_slot() - This routine sets up a slot's resources consistent * sn_pci_fixup_slot()
* with the Linux PCI abstraction layer. Resources
* acquired from our PCI provider include PIO maps
* to BAR space and interrupt objects.
*/ */
void sn_pci_fixup_slot(struct pci_dev *dev) void sn_pci_fixup_slot(struct pci_dev *dev, struct pcidev_info *pcidev_info,
struct sn_irq_info *sn_irq_info)
{ {
int segment = pci_domain_nr(dev->bus); int segment = pci_domain_nr(dev->bus);
int status = 0;
struct pcibus_bussoft *bs; struct pcibus_bussoft *bs;
struct pci_bus *host_pci_bus; struct pci_bus *host_pci_bus;
struct pci_dev *host_pci_dev; struct pci_dev *host_pci_dev;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
unsigned int bus_no, devfn; unsigned int bus_no, devfn;
pci_dev_get(dev); /* for the sysdata pointer */ pci_dev_get(dev); /* for the sysdata pointer */
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
BUG(); /* Cannot afford to run out of memory */
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
BUG(); /* Cannot afford to run out of memory */
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) segment, (u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Add pcidev_info to list in pci_controller.platform_data */ /* Add pcidev_info to list in pci_controller.platform_data */
list_add_tail(&pcidev_info->pdi_list, list_add_tail(&pcidev_info->pdi_list,
&(SN_PLATFORM_DATA(dev->bus)->pcidev_info)); &(SN_PLATFORM_DATA(dev->bus)->pcidev_info));
if (SN_ACPI_BASE_SUPPORT())
sn_acpi_slot_fixup(dev, pcidev_info);
else
sn_more_slot_fixup(dev, pcidev_info);
/* /*
* Using the PROMs values for the PCI host bus, get the Linux * Using the PROMs values for the PCI host bus, get the Linux
* PCI host_pci_dev struct and set up host bus linkages * PCI host_pci_dev struct and set up host bus linkages
...@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address) ...@@ -489,11 +444,6 @@ void sn_generate_path(struct pci_bus *pci_bus, char *address)
sprintf(address, "%s^%d", address, geo_slot(geoid)); sprintf(address, "%s^%d", address, geo_slot(geoid));
} }
/*
* sn_pci_fixup_bus() - Perform SN specific setup of software structs
* (pcibus_bussoft, pcidev_info) and hardware
* registers, for the specified bus and devices under it.
*/
void __devinit void __devinit
sn_pci_fixup_bus(struct pci_bus *bus) sn_pci_fixup_bus(struct pci_bus *bus)
{ {
...@@ -519,6 +469,15 @@ sn_io_early_init(void) ...@@ -519,6 +469,15 @@ sn_io_early_init(void)
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM()) if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0; return 0;
/* we set the acpi revision to that of the DSDT table OEM rev. */
{
struct acpi_table_header *header = NULL;
acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
BUG_ON(header == NULL);
sn_acpi_rev = header->oem_revision;
}
/* /*
* prime sn_pci_provider[]. Individial provider init routines will * prime sn_pci_provider[]. Individial provider init routines will
* override their respective default entries. * override their respective default entries.
...@@ -544,8 +503,12 @@ sn_io_early_init(void) ...@@ -544,8 +503,12 @@ sn_io_early_init(void)
register_sn_procfs(); register_sn_procfs();
#endif #endif
{
struct acpi_table_header *header;
(void)acpi_get_table_by_index(ACPI_TABLE_INDEX_DSDT, &header);
printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n", printk(KERN_INFO "ACPI DSDT OEM Rev 0x%x\n",
acpi_gbl_DSDT->oem_revision); header->oem_revision);
}
if (SN_ACPI_BASE_SUPPORT()) if (SN_ACPI_BASE_SUPPORT())
sn_io_acpi_init(); sn_io_acpi_init();
else else
...@@ -605,7 +568,6 @@ sn_io_late_init(void) ...@@ -605,7 +568,6 @@ sn_io_late_init(void)
fs_initcall(sn_io_late_init); fs_initcall(sn_io_late_init);
EXPORT_SYMBOL(sn_pci_fixup_slot);
EXPORT_SYMBOL(sn_pci_unfixup_slot); EXPORT_SYMBOL(sn_pci_unfixup_slot);
EXPORT_SYMBOL(sn_bus_store_sysdata); EXPORT_SYMBOL(sn_bus_store_sysdata);
EXPORT_SYMBOL(sn_bus_free_sysdata); EXPORT_SYMBOL(sn_bus_free_sysdata);
......
...@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address) ...@@ -56,6 +56,25 @@ static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
return ret_stuff.v0; return ret_stuff.v0;
} }
/*
* Retrieve the pci device information given the bus and device|function number.
*/
static inline u64
sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev,
u64 sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_GET_PCIDEV_INFO,
(u64) segment, (u64) bus_number, (u64) devfn,
(u64) pci_dev,
sn_irq_info, 0, 0);
return ret_stuff.v0;
}
/* /*
* sn_fixup_ionodes() - This routine initializes the HUB data structure for * sn_fixup_ionodes() - This routine initializes the HUB data structure for
...@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count, ...@@ -172,18 +191,40 @@ sn_pci_window_fixup(struct pci_dev *dev, unsigned int count,
} }
/* /*
* sn_more_slot_fixup() - We are not running with an ACPI capable PROM, * sn_io_slot_fixup() - We are not running with an ACPI capable PROM,
* and need to convert the pci_dev->resource * and need to convert the pci_dev->resource
* 'start' and 'end' addresses to mapped addresses, * 'start' and 'end' addresses to mapped addresses,
* and setup the pci_controller->window array entries. * and setup the pci_controller->window array entries.
*/ */
void void
sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) sn_io_slot_fixup(struct pci_dev *dev)
{ {
unsigned int count = 0; unsigned int count = 0;
int idx; int idx;
s64 pci_addrs[PCI_ROM_RESOURCE + 1]; s64 pci_addrs[PCI_ROM_RESOURCE + 1];
unsigned long addr, end, size, start; unsigned long addr, end, size, start;
struct pcidev_info *pcidev_info;
struct sn_irq_info *sn_irq_info;
int status;
pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL);
if (!pcidev_info)
panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__);
sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL);
if (!sn_irq_info)
panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__);
/* Call to retrieve pci device information needed by kernel. */
status = sal_get_pcidev_info((u64) pci_domain_nr(dev),
(u64) dev->bus->number,
dev->devfn,
(u64) __pa(pcidev_info),
(u64) __pa(sn_irq_info));
if (status)
BUG(); /* Cannot get platform pci device information */
/* Copy over PIO Mapped Addresses */ /* Copy over PIO Mapped Addresses */
for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) { for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
...@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info) ...@@ -219,8 +260,12 @@ sn_more_slot_fixup(struct pci_dev *dev, struct pcidev_info *pcidev_info)
*/ */
if (count > 0) if (count > 0)
sn_pci_window_fixup(dev, count, pci_addrs); sn_pci_window_fixup(dev, count, pci_addrs);
sn_pci_fixup_slot(dev, pcidev_info, sn_irq_info);
} }
EXPORT_SYMBOL(sn_io_slot_fixup);
/* /*
* sn_pci_controller_fixup() - This routine sets up a bus's resources * sn_pci_controller_fixup() - This routine sets up a bus's resources
* consistent with the Linux PCI abstraction layer. * consistent with the Linux PCI abstraction layer.
...@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus) ...@@ -272,9 +317,6 @@ sn_bus_fixup(struct pci_bus *bus)
{ {
struct pci_dev *pci_dev = NULL; struct pci_dev *pci_dev = NULL;
struct pcibus_bussoft *prom_bussoft_ptr; struct pcibus_bussoft *prom_bussoft_ptr;
extern void sn_common_bus_fixup(struct pci_bus *,
struct pcibus_bussoft *);
if (!bus->parent) { /* If root bus */ if (!bus->parent) { /* If root bus */
prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data; prom_bussoft_ptr = PCI_CONTROLLER(bus)->platform_data;
...@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus) ...@@ -291,7 +333,7 @@ sn_bus_fixup(struct pci_bus *bus)
prom_bussoft_ptr->bs_legacy_mem); prom_bussoft_ptr->bs_legacy_mem);
} }
list_for_each_entry(pci_dev, &bus->devices, bus_list) { list_for_each_entry(pci_dev, &bus->devices, bus_list) {
sn_pci_fixup_slot(pci_dev); sn_io_slot_fixup(pci_dev);
} }
} }
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
* on IA64. This routine will convert a port number into a valid * on IA64. This routine will convert a port number into a valid
* SN i/o address. Used by sn_in*() and sn_out*(). * SN i/o address. Used by sn_in*() and sn_out*().
*/ */
void *sn_io_addr(unsigned long port) void *sn_io_addr(unsigned long port)
{ {
if (!IS_RUNNING_ON_SIMULATOR()) { if (!IS_RUNNING_ON_SIMULATOR()) {
......
...@@ -20,7 +20,8 @@ ...@@ -20,7 +20,8 @@
#include "xtalk/hubdev.h" #include "xtalk/hubdev.h"
int int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{ {
struct ia64_sal_retval ret_stuff; struct ia64_sal_retval ret_stuff;
u64 busnum; u64 busnum;
...@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp) ...@@ -32,7 +33,8 @@ sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp)
segment = soft->pbi_buscommon.bs_persist_segment; segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum; busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment, SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, 0, 0, 0); busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0; return (int)ret_stuff.v0;
} }
......
...@@ -32,7 +32,7 @@ static void via_bugs(void) ...@@ -32,7 +32,7 @@ static void via_bugs(void)
static int nvidia_hpet_detected __initdata; static int nvidia_hpet_detected __initdata;
static int __init nvidia_hpet_check(unsigned long phys, unsigned long size) static int __init nvidia_hpet_check(struct acpi_table_header *header)
{ {
nvidia_hpet_detected = 1; nvidia_hpet_detected = 1;
return 0; return 0;
...@@ -53,7 +53,7 @@ static void nvidia_bugs(void) ...@@ -53,7 +53,7 @@ static void nvidia_bugs(void)
return; return;
nvidia_hpet_detected = 0; nvidia_hpet_detected = 0;
acpi_table_parse(ACPI_HPET, nvidia_hpet_check); acpi_table_parse(ACPI_SIG_HPET, nvidia_hpet_check);
if (nvidia_hpet_detected == 0) { if (nvidia_hpet_detected == 0) {
acpi_skip_timer_override = 1; acpi_skip_timer_override = 1;
printk(KERN_INFO "Nvidia board " printk(KERN_INFO "Nvidia board "
......
...@@ -58,8 +58,8 @@ void __init clustered_apic_check(void) ...@@ -58,8 +58,8 @@ void __init clustered_apic_check(void)
* Some x86_64 machines use physical APIC mode regardless of how many * Some x86_64 machines use physical APIC mode regardless of how many
* procs/clusters are present (x86_64 ES7000 is an example). * procs/clusters are present (x86_64 ES7000 is an example).
*/ */
if (acpi_fadt.revision > FADT2_REVISION_ID) if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID)
if (acpi_fadt.force_apic_physical_destination_mode) { if (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) {
genapic = &apic_cluster; genapic = &apic_cluster;
goto print; goto print;
} }
......
...@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity) ...@@ -798,7 +798,7 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
return gsi; return gsi;
/* Don't set up the ACPI SCI because it's already set up */ /* Don't set up the ACPI SCI because it's already set up */
if (acpi_fadt.sci_int == gsi) if (acpi_gbl_FADT.sci_interrupt == gsi)
return gsi; return gsi;
ioapic = mp_find_ioapic(gsi); ioapic = mp_find_ioapic(gsi);
......
...@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void) ...@@ -498,7 +498,7 @@ static unsigned long get_cmos_time(void)
{ {
unsigned int year, mon, day, hour, min, sec; unsigned int year, mon, day, hour, min, sec;
unsigned long flags; unsigned long flags;
unsigned extyear = 0; unsigned century = 0;
spin_lock_irqsave(&rtc_lock, flags); spin_lock_irqsave(&rtc_lock, flags);
...@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void) ...@@ -510,9 +510,9 @@ static unsigned long get_cmos_time(void)
mon = CMOS_READ(RTC_MONTH); mon = CMOS_READ(RTC_MONTH);
year = CMOS_READ(RTC_YEAR); year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
if (acpi_fadt.revision >= FADT2_REVISION_ID && if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
acpi_fadt.century) acpi_gbl_FADT.century)
extyear = CMOS_READ(acpi_fadt.century); century = CMOS_READ(acpi_gbl_FADT.century);
#endif #endif
} while (sec != CMOS_READ(RTC_SECONDS)); } while (sec != CMOS_READ(RTC_SECONDS));
...@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void) ...@@ -530,10 +530,10 @@ static unsigned long get_cmos_time(void)
BCD_TO_BIN(mon); BCD_TO_BIN(mon);
BCD_TO_BIN(year); BCD_TO_BIN(year);
if (extyear) { if (century) {
BCD_TO_BIN(extyear); BCD_TO_BIN(century);
year += extyear; year += century * 100;
printk(KERN_INFO "Extended CMOS year: %d\n", extyear); printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
} else { } else {
/* /*
* x86-64 systems only exists since 2002. * x86-64 systems only exists since 2002.
...@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void) ...@@ -954,7 +954,7 @@ __cpuinit int unsynchronized_tsc(void)
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
/* But TSC doesn't tick in C3 so don't use it there */ /* But TSC doesn't tick in C3 so don't use it there */
if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 1000) if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
return 1; return 1;
#endif #endif
return 0; return 0;
......
...@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void) ...@@ -101,7 +101,7 @@ static __init inline int srat_disabled(void)
static __init int slit_valid(struct acpi_table_slit *slit) static __init int slit_valid(struct acpi_table_slit *slit)
{ {
int i, j; int i, j;
int d = slit->localities; int d = slit->locality_count;
for (i = 0; i < d; i++) { for (i = 0; i < d; i++) {
for (j = 0; j < d; j++) { for (j = 0; j < d; j++) {
u8 val = slit->entry[d*i + j]; u8 val = slit->entry[d*i + j];
...@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) ...@@ -127,18 +127,18 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
/* Callback for Proximity Domain -> LAPIC mapping */ /* Callback for Proximity Domain -> LAPIC mapping */
void __init void __init
acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
{ {
int pxm, node; int pxm, node;
if (srat_disabled()) if (srat_disabled())
return; return;
if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
bad_srat(); bad_srat();
return; return;
} }
if (pa->flags.enabled == 0) if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
return; return;
pxm = pa->proximity_domain; pxm = pa->proximity_domain_lo;
node = setup_node(pxm); node = setup_node(pxm);
if (node < 0) { if (node < 0) {
printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm); printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
...@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end) ...@@ -279,7 +279,7 @@ static int reserve_hotadd(int node, unsigned long start, unsigned long end)
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
void __init void __init
acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{ {
struct bootnode *nd, oldnode; struct bootnode *nd, oldnode;
unsigned long start, end; unsigned long start, end;
...@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) ...@@ -288,16 +288,17 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
if (srat_disabled()) if (srat_disabled())
return; return;
if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) { if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) {
bad_srat(); bad_srat();
return; return;
} }
if (ma->flags.enabled == 0) if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
return; return;
if (ma->flags.hot_pluggable && !save_add_info())
if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
return; return;
start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32); start = ma->base_address;
end = start + (ma->length_lo | ((u64)ma->length_hi << 32)); end = start + ma->length;
pxm = ma->proximity_domain; pxm = ma->proximity_domain;
node = setup_node(pxm); node = setup_node(pxm);
if (node < 0) { if (node < 0) {
...@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) ...@@ -337,7 +338,8 @@ acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
push_node_boundaries(node, nd->start >> PAGE_SHIFT, push_node_boundaries(node, nd->start >> PAGE_SHIFT,
nd->end >> PAGE_SHIFT); nd->end >> PAGE_SHIFT);
if (ma->flags.hot_pluggable && (reserve_hotadd(node, start, end) < 0)) { if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) &&
(reserve_hotadd(node, start, end) < 0)) {
/* Ignore hotadd region. Undo damage */ /* Ignore hotadd region. Undo damage */
printk(KERN_NOTICE "SRAT: Hotplug region ignored\n"); printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
*nd = oldnode; *nd = oldnode;
...@@ -461,7 +463,7 @@ int __node_distance(int a, int b) ...@@ -461,7 +463,7 @@ int __node_distance(int a, int b)
if (!acpi_slit) if (!acpi_slit)
return a == b ? 10 : 20; return a == b ? 10 : 20;
index = acpi_slit->localities * node_to_pxm(a); index = acpi_slit->locality_count * node_to_pxm(a);
return acpi_slit->entry[index + node_to_pxm(b)]; return acpi_slit->entry[index + node_to_pxm(b)];
} }
......
...@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS); ...@@ -25,7 +25,7 @@ static DECLARE_BITMAP(fallback_slots, 32*MAX_CHECK_BUS);
/* Static virtual mapping of the MMCONFIG aperture */ /* Static virtual mapping of the MMCONFIG aperture */
struct mmcfg_virt { struct mmcfg_virt {
struct acpi_table_mcfg_config *cfg; struct acpi_mcfg_allocation *cfg;
char __iomem *virt; char __iomem *virt;
}; };
static struct mmcfg_virt *pci_mmcfg_virt; static struct mmcfg_virt *pci_mmcfg_virt;
...@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt; ...@@ -33,14 +33,14 @@ static struct mmcfg_virt *pci_mmcfg_virt;
static char __iomem *get_virt(unsigned int seg, unsigned bus) static char __iomem *get_virt(unsigned int seg, unsigned bus)
{ {
int cfg_num = -1; int cfg_num = -1;
struct acpi_table_mcfg_config *cfg; struct acpi_mcfg_allocation *cfg;
while (1) { while (1) {
++cfg_num; ++cfg_num;
if (cfg_num >= pci_mmcfg_config_num) if (cfg_num >= pci_mmcfg_config_num)
break; break;
cfg = pci_mmcfg_virt[cfg_num].cfg; cfg = pci_mmcfg_virt[cfg_num].cfg;
if (cfg->pci_segment_group_number != seg) if (cfg->pci_segment != seg)
continue; continue;
if ((cfg->start_bus_number <= bus) && if ((cfg->start_bus_number <= bus) &&
(cfg->end_bus_number >= bus)) (cfg->end_bus_number >= bus))
...@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus) ...@@ -52,7 +52,7 @@ static char __iomem *get_virt(unsigned int seg, unsigned bus)
this applies to all busses. */ this applies to all busses. */
cfg = &pci_mmcfg_config[0]; cfg = &pci_mmcfg_config[0];
if (pci_mmcfg_config_num == 1 && if (pci_mmcfg_config_num == 1 &&
cfg->pci_segment_group_number == 0 && cfg->pci_segment == 0 &&
(cfg->start_bus_number | cfg->end_bus_number) == 0) (cfg->start_bus_number | cfg->end_bus_number) == 0)
return pci_mmcfg_virt[0].virt; return pci_mmcfg_virt[0].virt;
...@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type) ...@@ -170,19 +170,19 @@ void __init pci_mmcfg_init(int type)
if ((pci_probe & PCI_PROBE_MMCONF) == 0) if ((pci_probe & PCI_PROBE_MMCONF) == 0)
return; return;
acpi_table_parse(ACPI_MCFG, acpi_parse_mcfg); acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
if ((pci_mmcfg_config_num == 0) || if ((pci_mmcfg_config_num == 0) ||
(pci_mmcfg_config == NULL) || (pci_mmcfg_config == NULL) ||
(pci_mmcfg_config[0].base_address == 0)) (pci_mmcfg_config[0].address == 0))
return; return;
/* Only do this check when type 1 works. If it doesn't work /* Only do this check when type 1 works. If it doesn't work
assume we run on a Mac and always use MCFG */ assume we run on a Mac and always use MCFG */
if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].base_address, if (type == 1 && !e820_all_mapped(pci_mmcfg_config[0].address,
pci_mmcfg_config[0].base_address + MMCONFIG_APER_MIN, pci_mmcfg_config[0].address + MMCONFIG_APER_MIN,
E820_RESERVED)) { E820_RESERVED)) {
printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %x is not E820-reserved\n", printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %lx is not E820-reserved\n",
pci_mmcfg_config[0].base_address); (unsigned long)pci_mmcfg_config[0].address);
printk(KERN_ERR "PCI: Not using MMCONFIG.\n"); printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
return; return;
} }
...@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type) ...@@ -194,15 +194,16 @@ void __init pci_mmcfg_init(int type)
} }
for (i = 0; i < pci_mmcfg_config_num; ++i) { for (i = 0; i < pci_mmcfg_config_num; ++i) {
pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i];
pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].base_address, pci_mmcfg_virt[i].virt = ioremap_nocache(pci_mmcfg_config[i].address,
MMCONFIG_APER_MAX); MMCONFIG_APER_MAX);
if (!pci_mmcfg_virt[i].virt) { if (!pci_mmcfg_virt[i].virt) {
printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
"segment %d\n", "segment %d\n",
pci_mmcfg_config[i].pci_segment_group_number); pci_mmcfg_config[i].pci_segment);
return; return;
} }
printk(KERN_INFO "PCI: Using MMCONFIG at %x\n", pci_mmcfg_config[i].base_address); printk(KERN_INFO "PCI: Using MMCONFIG at %lx\n",
(unsigned long)pci_mmcfg_config[i].address);
} }
unreachable_devices(); unreachable_devices();
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
* Pontus Fuchs - Helper functions, cleanup * Pontus Fuchs - Helper functions, cleanup
* Johann Wiesner - Small compile fixes * Johann Wiesner - Small compile fixes
* John Belmonte - ACPI code for Toshiba laptop was a good starting point. * John Belmonte - ACPI code for Toshiba laptop was a good starting point.
* ric Burghard - LED display support for W1N * ic Burghard - LED display support for W1N
* *
*/ */
...@@ -1128,7 +1128,6 @@ static int asus_model_match(char *model) ...@@ -1128,7 +1128,6 @@ static int asus_model_match(char *model)
static int asus_hotk_get_info(void) static int asus_hotk_get_info(void)
{ {
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_buffer dsdt = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL; union acpi_object *model = NULL;
int bsts_result; int bsts_result;
char *string = NULL; char *string = NULL;
...@@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void) ...@@ -1142,11 +1141,9 @@ static int asus_hotk_get_info(void)
* HID), this bit will be moved. A global variable asus_info contains * HID), this bit will be moved. A global variable asus_info contains
* the DSDT header. * the DSDT header.
*/ */
status = acpi_get_table(ACPI_TABLE_ID_DSDT, 1, &dsdt); status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
printk(KERN_WARNING " Couldn't get the DSDT table header\n"); printk(KERN_WARNING " Couldn't get the DSDT table header\n");
else
asus_info = dsdt.pointer;
/* We have to write 0 on init this far for all ASUS models */ /* We have to write 0 on init this far for all ASUS models */
if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) { if (!write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
...@@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void) ...@@ -1358,8 +1355,6 @@ static void __exit asus_acpi_exit(void)
acpi_bus_unregister_driver(&asus_hotk_driver); acpi_bus_unregister_driver(&asus_hotk_driver);
remove_proc_entry(PROC_ASUS, acpi_root_dir); remove_proc_entry(PROC_ASUS, acpi_root_dir);
kfree(asus_info);
return; return;
} }
......
...@@ -44,7 +44,7 @@ struct acpi_blacklist_item { ...@@ -44,7 +44,7 @@ struct acpi_blacklist_item {
char oem_id[7]; char oem_id[7];
char oem_table_id[9]; char oem_table_id[9];
u32 oem_revision; u32 oem_revision;
acpi_table_type table; char *table;
enum acpi_blacklist_predicates oem_revision_predicate; enum acpi_blacklist_predicates oem_revision_predicate;
char *reason; char *reason;
u32 is_critical_error; u32 is_critical_error;
...@@ -56,18 +56,18 @@ struct acpi_blacklist_item { ...@@ -56,18 +56,18 @@ struct acpi_blacklist_item {
*/ */
static struct acpi_blacklist_item acpi_blacklist[] __initdata = { static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
/* Compaq Presario 1700 */ /* Compaq Presario 1700 */
{"PTLTD ", " DSDT ", 0x06040000, ACPI_DSDT, less_than_or_equal, {"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
"Multiple problems", 1}, "Multiple problems", 1},
/* Sony FX120, FX140, FX150? */ /* Sony FX120, FX140, FX150? */
{"SONY ", "U0 ", 0x20010313, ACPI_DSDT, less_than_or_equal, {"SONY ", "U0 ", 0x20010313, ACPI_SIG_DSDT, less_than_or_equal,
"ACPI driver problem", 1}, "ACPI driver problem", 1},
/* Compaq Presario 800, Insyde BIOS */ /* Compaq Presario 800, Insyde BIOS */
{"INT440", "SYSFexxx", 0x00001001, ACPI_DSDT, less_than_or_equal, {"INT440", "SYSFexxx", 0x00001001, ACPI_SIG_DSDT, less_than_or_equal,
"Does not use _REG to protect EC OpRegions", 1}, "Does not use _REG to protect EC OpRegions", 1},
/* IBM 600E - _ADR should return 7, but it returns 1 */ /* IBM 600E - _ADR should return 7, but it returns 1 */
{"IBM ", "TP600E ", 0x00000105, ACPI_DSDT, less_than_or_equal, {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1}, "Incorrect _ADR", 1},
{"ASUS\0\0", "P2B-S ", 0, ACPI_DSDT, all_versions, {"ASUS\0\0", "P2B-S ", 0, ACPI_SIG_DSDT, all_versions,
"Bogus PCI routing", 1}, "Bogus PCI routing", 1},
{""} {""}
...@@ -103,22 +103,21 @@ int __init acpi_blacklisted(void) ...@@ -103,22 +103,21 @@ int __init acpi_blacklisted(void)
{ {
int i = 0; int i = 0;
int blacklisted = 0; int blacklisted = 0;
struct acpi_table_header *table_header; struct acpi_table_header table_header;
while (acpi_blacklist[i].oem_id[0] != '\0') { while (acpi_blacklist[i].oem_id[0] != '\0') {
if (acpi_get_table_header_early if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
(acpi_blacklist[i].table, &table_header)) {
i++; i++;
continue; continue;
} }
if (strncmp(acpi_blacklist[i].oem_id, table_header->oem_id, 6)) { if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
i++; i++;
continue; continue;
} }
if (strncmp if (strncmp
(acpi_blacklist[i].oem_table_id, table_header->oem_table_id, (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
8)) { 8)) {
i++; i++;
continue; continue;
...@@ -127,14 +126,14 @@ int __init acpi_blacklisted(void) ...@@ -127,14 +126,14 @@ int __init acpi_blacklisted(void)
if ((acpi_blacklist[i].oem_revision_predicate == all_versions) if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
|| (acpi_blacklist[i].oem_revision_predicate == || (acpi_blacklist[i].oem_revision_predicate ==
less_than_or_equal less_than_or_equal
&& table_header->oem_revision <= && table_header.oem_revision <=
acpi_blacklist[i].oem_revision) acpi_blacklist[i].oem_revision)
|| (acpi_blacklist[i].oem_revision_predicate == || (acpi_blacklist[i].oem_revision_predicate ==
greater_than_or_equal greater_than_or_equal
&& table_header->oem_revision >= && table_header.oem_revision >=
acpi_blacklist[i].oem_revision) acpi_blacklist[i].oem_revision)
|| (acpi_blacklist[i].oem_revision_predicate == equal || (acpi_blacklist[i].oem_revision_predicate == equal
&& table_header->oem_revision == && table_header.oem_revision ==
acpi_blacklist[i].oem_revision)) { acpi_blacklist[i].oem_revision)) {
printk(KERN_ERR PREFIX printk(KERN_ERR PREFIX
......
...@@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus") ...@@ -44,9 +44,6 @@ ACPI_MODULE_NAME("acpi_bus")
extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger); extern void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger);
#endif #endif
struct fadt_descriptor acpi_fadt;
EXPORT_SYMBOL(acpi_fadt);
struct acpi_device *acpi_root; struct acpi_device *acpi_root;
struct proc_dir_entry *acpi_root_dir; struct proc_dir_entry *acpi_root_dir;
EXPORT_SYMBOL(acpi_root_dir); EXPORT_SYMBOL(acpi_root_dir);
...@@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void) ...@@ -582,11 +579,12 @@ static int __init acpi_bus_init_irq(void)
return 0; return 0;
} }
acpi_native_uint acpi_gbl_permanent_mmap;
void __init acpi_early_init(void) void __init acpi_early_init(void)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
struct acpi_buffer buffer = { sizeof(acpi_fadt), &acpi_fadt };
if (acpi_disabled) if (acpi_disabled)
return; return;
...@@ -597,6 +595,15 @@ void __init acpi_early_init(void) ...@@ -597,6 +595,15 @@ void __init acpi_early_init(void)
if (!acpi_strict) if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE; acpi_gbl_enable_interpreter_slack = TRUE;
acpi_gbl_permanent_mmap = 1;
status = acpi_reallocate_root_table();
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX
"Unable to reallocate ACPI tables\n");
goto error0;
}
status = acpi_initialize_subsystem(); status = acpi_initialize_subsystem();
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX printk(KERN_ERR PREFIX
...@@ -611,32 +618,25 @@ void __init acpi_early_init(void) ...@@ -611,32 +618,25 @@ void __init acpi_early_init(void)
goto error0; goto error0;
} }
/*
* Get a separate copy of the FADT for use by other drivers.
*/
status = acpi_get_table(ACPI_TABLE_ID_FADT, 1, &buffer);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to get the FADT\n");
goto error0;
}
#ifdef CONFIG_X86 #ifdef CONFIG_X86
if (!acpi_ioapic) { if (!acpi_ioapic) {
extern acpi_interrupt_flags acpi_sci_flags; extern u8 acpi_sci_flags;
/* compatible (0) means level (3) */ /* compatible (0) means level (3) */
if (acpi_sci_flags.trigger == 0) if (!(acpi_sci_flags & ACPI_MADT_TRIGGER_MASK)) {
acpi_sci_flags.trigger = 3; acpi_sci_flags &= ~ACPI_MADT_TRIGGER_MASK;
acpi_sci_flags |= ACPI_MADT_TRIGGER_LEVEL;
}
/* Set PIC-mode SCI trigger type */ /* Set PIC-mode SCI trigger type */
acpi_pic_sci_set_trigger(acpi_fadt.sci_int, acpi_pic_sci_set_trigger(acpi_gbl_FADT.sci_interrupt,
acpi_sci_flags.trigger); (acpi_sci_flags & ACPI_MADT_TRIGGER_MASK) >> 2);
} else { } else {
extern int acpi_sci_override_gsi; extern int acpi_sci_override_gsi;
/* /*
* now that acpi_fadt is initialized, * now that acpi_gbl_FADT is initialized,
* update it with result from INT_SRC_OVR parsing * update it with result from INT_SRC_OVR parsing
*/ */
acpi_fadt.sci_int = acpi_sci_override_gsi; acpi_gbl_FADT.sci_interrupt = acpi_sci_override_gsi;
} }
#endif #endif
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op, ...@@ -133,7 +133,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
} }
} }
/* We could put the returned object (Node) on the object stack for later, /*
* We could put the returned object (Node) on the object stack for later,
* but for now, we will put it in the "op" object that the parser uses, * but for now, we will put it in the "op" object that the parser uses,
* so we can get it again at the end of this scope * so we can get it again at the end of this scope
*/ */
...@@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op, ...@@ -514,8 +515,33 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
/* Third arg is the bank_value */ /* Third arg is the bank_value */
/* TBD: This arg is a term_arg, not a constant, and must be evaluated */
arg = arg->common.next; arg = arg->common.next;
/* Currently, only the following constants are supported */
switch (arg->common.aml_opcode) {
case AML_ZERO_OP:
info.bank_value = 0;
break;
case AML_ONE_OP:
info.bank_value = 1;
break;
case AML_BYTE_OP:
case AML_WORD_OP:
case AML_DWORD_OP:
case AML_QWORD_OP:
info.bank_value = (u32) arg->common.value.integer; info.bank_value = (u32) arg->common.value.integer;
break;
default:
info.bank_value = 0;
ACPI_ERROR((AE_INFO,
"Non-constant BankValue for BankField is not implemented"));
}
/* Fourth arg is the field flags */ /* Fourth arg is the field flags */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <acpi/acdispat.h> #include <acpi/acdispat.h>
#include <acpi/acnamesp.h> #include <acpi/acnamesp.h>
#include <acpi/actables.h>
#define _COMPONENT ACPI_DISPATCHER #define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsinit") ACPI_MODULE_NAME("dsinit")
...@@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -90,7 +91,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
* We are only interested in NS nodes owned by the table that * We are only interested in NS nodes owned by the table that
* was just loaded * was just loaded
*/ */
if (node->owner_id != info->table_desc->owner_id) { if (node->owner_id != info->owner_id) {
return (AE_OK); return (AE_OK);
} }
...@@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle, ...@@ -150,14 +151,21 @@ acpi_ds_init_one_object(acpi_handle obj_handle,
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, acpi_ds_initialize_objects(acpi_native_uint table_index,
struct acpi_namespace_node * start_node) struct acpi_namespace_node * start_node)
{ {
acpi_status status; acpi_status status;
struct acpi_init_walk_info info; struct acpi_init_walk_info info;
struct acpi_table_header *table;
acpi_owner_id owner_id;
ACPI_FUNCTION_TRACE(ds_initialize_objects); ACPI_FUNCTION_TRACE(ds_initialize_objects);
status = acpi_tb_get_owner_id(table_index, &owner_id);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"**** Starting initialization of namespace objects ****\n")); "**** Starting initialization of namespace objects ****\n"));
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:")); ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:"));
...@@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, ...@@ -166,7 +174,8 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
info.op_region_count = 0; info.op_region_count = 0;
info.object_count = 0; info.object_count = 0;
info.device_count = 0; info.device_count = 0;
info.table_desc = table_desc; info.table_index = table_index;
info.owner_id = owner_id;
/* Walk entire namespace from the supplied root */ /* Walk entire namespace from the supplied root */
...@@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc, ...@@ -176,10 +185,14 @@ acpi_ds_initialize_objects(struct acpi_table_desc * table_desc,
ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace")); ACPI_EXCEPTION((AE_INFO, status, "During WalkNamespace"));
} }
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT,
"\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n", "\nTable [%4.4s](id %4.4X) - %hd Objects with %hd Devices %hd Methods %hd Regions\n",
table_desc->pointer->signature, table->signature, owner_id, info.object_count,
table_desc->owner_id, info.object_count,
info.device_count, info.method_count, info.device_count, info.method_count,
info.op_region_count)); info.op_region_count));
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ...@@ -327,7 +327,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Execute method %p, currentstate=%p\n", "Calling method %p, currentstate=%p\n",
this_walk_state->prev_op, this_walk_state)); this_walk_state->prev_op, this_walk_state));
/* /*
...@@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ...@@ -351,49 +351,7 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
/* /* Begin method parse/execution. Create a new walk state */
* 1) Parse the method. All "normal" methods are parsed for each execution.
* Internal methods (_OSI, etc.) do not require parsing.
*/
if (!(obj_desc->method.method_flags & AML_METHOD_INTERNAL_ONLY)) {
/* Create a new walk state for the parse */
next_walk_state =
acpi_ds_create_walk_state(obj_desc->method.owner_id, op,
obj_desc, NULL);
if (!next_walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Create and init a parse tree root */
op = acpi_ps_create_scope_op();
if (!op) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(next_walk_state, op, method_node,
obj_desc->method.aml_start,
obj_desc->method.aml_length,
NULL, 1);
if (ACPI_FAILURE(status)) {
acpi_ps_delete_parse_tree(op);
goto cleanup;
}
/* Begin AML parse (deletes next_walk_state) */
status = acpi_ps_parse_aml(next_walk_state);
acpi_ps_delete_parse_tree(op);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/* 2) Begin method execution. Create a new walk state */
next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id, next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
NULL, obj_desc, thread); NULL, obj_desc, thread);
...@@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ...@@ -424,7 +382,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
obj_desc->method.aml_start, obj_desc->method.aml_start,
obj_desc->method.aml_length, info, 3); obj_desc->method.aml_length, info,
ACPI_IMODE_EXECUTE);
ACPI_FREE(info); ACPI_FREE(info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, ...@@ -445,8 +404,8 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread,
this_walk_state->num_operands = 0; this_walk_state->num_operands = 0;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Starting nested execution, newstate=%p\n", "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
next_walk_state)); method_node->name.ascii, next_walk_state));
/* Invoke an internal method if necessary */ /* Invoke an internal method if necessary */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, ...@@ -260,7 +260,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
} }
obj_desc->buffer.flags |= AOPOBJ_DATA_VALID; obj_desc->buffer.flags |= AOPOBJ_DATA_VALID;
op->common.node = (struct acpi_namespace_node *)obj_desc; op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, ...@@ -270,7 +270,8 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
* *
* PARAMETERS: walk_state - Current walk state * PARAMETERS: walk_state - Current walk state
* Op - Parser object to be translated * Op - Parser object to be translated
* package_length - Number of elements in the package * element_count - Number of elements in the package - this is
* the num_elements argument to Package()
* obj_desc_ptr - Where the ACPI internal object is returned * obj_desc_ptr - Where the ACPI internal object is returned
* *
* RETURN: Status * RETURN: Status
...@@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state, ...@@ -278,18 +279,29 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
* DESCRIPTION: Translate a parser Op package object to the equivalent * DESCRIPTION: Translate a parser Op package object to the equivalent
* namespace object * namespace object
* *
* NOTE: The number of elements in the package will be always be the num_elements
* count, regardless of the number of elements in the package list. If
* num_elements is smaller, only that many package list elements are used.
* if num_elements is larger, the Package object is padded out with
* objects of type Uninitialized (as per ACPI spec.)
*
* Even though the ASL compilers do not allow num_elements to be smaller
* than the Package list length (for the fixed length package opcode), some
* BIOS code modifies the AML on the fly to adjust the num_elements, and
* this code compensates for that. This also provides compatibility with
* other AML interpreters.
*
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op, union acpi_parse_object *op,
u32 package_length, u32 element_count,
union acpi_operand_object **obj_desc_ptr) union acpi_operand_object **obj_desc_ptr)
{ {
union acpi_parse_object *arg; union acpi_parse_object *arg;
union acpi_parse_object *parent; union acpi_parse_object *parent;
union acpi_operand_object *obj_desc = NULL; union acpi_operand_object *obj_desc = NULL;
u32 package_list_length;
acpi_status status = AE_OK; acpi_status status = AE_OK;
acpi_native_uint i; acpi_native_uint i;
...@@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -318,32 +330,13 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
obj_desc->package.node = parent->common.node; obj_desc->package.node = parent->common.node;
} }
obj_desc->package.count = package_length;
/* Count the number of items in the package list */
arg = op->common.value.arg;
arg = arg->common.next;
for (package_list_length = 0; arg; package_list_length++) {
arg = arg->common.next;
}
/* /*
* The package length (number of elements) will be the greater * Allocate the element array (array of pointers to the individual
* of the specified length and the length of the initializer list * objects) based on the num_elements parameter. Add an extra pointer slot
*/ * so that the list is always null terminated.
if (package_list_length > package_length) {
obj_desc->package.count = package_list_length;
}
/*
* Allocate the pointer array (array of pointers to the
* individual objects). Add an extra pointer slot so
* that the list is always null terminated.
*/ */
obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
obj_desc->package. element_count +
count +
1) * sizeof(void *)); 1) * sizeof(void *));
if (!obj_desc->package.elements) { if (!obj_desc->package.elements) {
...@@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -351,15 +344,20 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
obj_desc->package.count = element_count;
/* /*
* Initialize all elements of the package * Initialize the elements of the package, up to the num_elements count.
* Package is automatically padded with uninitialized (NULL) elements
* if num_elements is greater than the package list length. Likewise,
* Package is truncated if num_elements is less than the list length.
*/ */
arg = op->common.value.arg; arg = op->common.value.arg;
arg = arg->common.next; arg = arg->common.next;
for (i = 0; arg; i++) { for (i = 0; arg && (i < element_count); i++) {
if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
/* Object (package or buffer) is already built */ /* This package element is already built, just get it */
obj_desc->package.elements[i] = obj_desc->package.elements[i] =
ACPI_CAST_PTR(union acpi_operand_object, ACPI_CAST_PTR(union acpi_operand_object,
...@@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state, ...@@ -373,8 +371,14 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
arg = arg->common.next; arg = arg->common.next;
} }
if (!arg) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Package List length larger than NumElements count (%X), truncated\n",
element_count));
}
obj_desc->package.flags |= AOPOBJ_DATA_VALID; obj_desc->package.flags |= AOPOBJ_DATA_VALID;
op->common.node = (struct acpi_namespace_node *)obj_desc; op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -488,8 +492,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* /*
* Defer evaluation of Buffer term_arg operand * Defer evaluation of Buffer term_arg operand
*/ */
obj_desc->buffer.node = (struct acpi_namespace_node *) obj_desc->buffer.node =
walk_state->operands[0]; ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->buffer.aml_start = op->named.data; obj_desc->buffer.aml_start = op->named.data;
obj_desc->buffer.aml_length = op->named.length; obj_desc->buffer.aml_length = op->named.length;
break; break;
...@@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -499,8 +504,9 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* /*
* Defer evaluation of Package term_arg operand * Defer evaluation of Package term_arg operand
*/ */
obj_desc->package.node = (struct acpi_namespace_node *) obj_desc->package.node =
walk_state->operands[0]; ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
obj_desc->package.aml_start = op->named.data; obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length; obj_desc->package.aml_length = op->named.length;
break; break;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node, ...@@ -114,7 +114,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
} }
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, 1); aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state); acpi_ds_delete_walk_state(walk_state);
goto cleanup; goto cleanup;
...@@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node, ...@@ -157,7 +157,7 @@ acpi_ds_execute_arguments(struct acpi_namespace_node *node,
/* Execute the opcode and arguments */ /* Execute the opcode and arguments */
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start, status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, 3); aml_length, NULL, ACPI_IMODE_EXECUTE);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state); acpi_ds_delete_walk_state(walk_state);
goto cleanup; goto cleanup;
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, ...@@ -219,7 +219,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
if (!op) { if (!op) {
status = acpi_ds_load2_begin_op(walk_state, out_op); status = acpi_ds_load2_begin_op(walk_state, out_op);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); goto error_exit;
} }
op = *out_op; op = *out_op;
...@@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, ...@@ -238,7 +238,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
status = acpi_ds_scope_stack_pop(walk_state); status = acpi_ds_scope_stack_pop(walk_state);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); goto error_exit;
} }
} }
} }
...@@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, ...@@ -287,7 +287,7 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
status = acpi_ds_result_stack_push(walk_state); status = acpi_ds_result_stack_push(walk_state);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); goto error_exit;
} }
status = acpi_ds_exec_begin_control_op(walk_state, op); status = acpi_ds_exec_begin_control_op(walk_state, op);
...@@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state, ...@@ -328,6 +328,10 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
/* Nothing to do here during method execution */ /* Nothing to do here during method execution */
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
error_exit:
status = acpi_ds_method_error(status, walk_state);
return_ACPI_STATUS(status);
} }
/***************************************************************************** /*****************************************************************************
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, ...@@ -196,6 +196,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state,
* one of the opcodes that actually opens a scope * one of the opcodes that actually opens a scope
*/ */
switch (node->type) { switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE: case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER: case ACPI_TYPE_POWER:
...@@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, ...@@ -546,6 +547,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
acpi_status status; acpi_status status;
acpi_object_type object_type; acpi_object_type object_type;
char *buffer_ptr; char *buffer_ptr;
u32 flags;
ACPI_FUNCTION_TRACE(ds_load2_begin_op); ACPI_FUNCTION_TRACE(ds_load2_begin_op);
...@@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, ...@@ -669,6 +671,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
* one of the opcodes that actually opens a scope * one of the opcodes that actually opens a scope
*/ */
switch (node->type) { switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */ case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE: case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER: case ACPI_TYPE_POWER:
...@@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, ...@@ -750,12 +753,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
break; break;
} }
/* Add new entry into namespace */ flags = ACPI_NS_NO_UPSEARCH;
if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
/* Execution mode, node cannot already exist, node is temporary */
flags |= (ACPI_NS_ERROR_IF_FOUND | ACPI_NS_TEMPORARY);
}
/* Add new entry or lookup existing entry */
status = status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr, acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_LOAD_PASS2, object_type, ACPI_IMODE_LOAD_PASS2, flags,
ACPI_NS_NO_UPSEARCH, walk_state, &(node)); walk_state, &node);
break; break;
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void) ...@@ -872,9 +872,8 @@ static int __init acpi_ec_get_real_ecdt(void)
acpi_status status; acpi_status status;
struct acpi_table_ecdt *ecdt_ptr; struct acpi_table_ecdt *ecdt_ptr;
status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, status = acpi_get_table(ACPI_SIG_ECDT, 1,
(struct acpi_table_header **) (struct acpi_table_header **)&ecdt_ptr);
&ecdt_ptr);
if (ACPI_FAILURE(status)) if (ACPI_FAILURE(status))
return -ENODEV; return -ENODEV;
...@@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void) ...@@ -891,14 +890,14 @@ static int __init acpi_ec_get_real_ecdt(void)
if (acpi_ec_mode == EC_INTR) { if (acpi_ec_mode == EC_INTR) {
init_waitqueue_head(&ec_ecdt->wait); init_waitqueue_head(&ec_ecdt->wait);
} }
ec_ecdt->command_addr = ecdt_ptr->ec_control.address; ec_ecdt->command_addr = ecdt_ptr->control.address;
ec_ecdt->data_addr = ecdt_ptr->ec_data.address; ec_ecdt->data_addr = ecdt_ptr->data.address;
ec_ecdt->gpe = ecdt_ptr->gpe_bit; ec_ecdt->gpe = ecdt_ptr->gpe;
/* use the GL just to be safe */ /* use the GL just to be safe */
ec_ecdt->global_lock = TRUE; ec_ecdt->global_lock = TRUE;
ec_ecdt->uid = ecdt_ptr->uid; ec_ecdt->uid = ecdt_ptr->uid;
status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); status = acpi_get_handle(NULL, ecdt_ptr->id, &ec_ecdt->handle);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
goto error; goto error;
} }
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void) ...@@ -70,13 +70,6 @@ acpi_status acpi_ev_initialize_events(void)
ACPI_FUNCTION_TRACE(ev_initialize_events); ACPI_FUNCTION_TRACE(ev_initialize_events);
/* Make sure we have ACPI tables */
if (!acpi_gbl_DSDT) {
ACPI_WARNING((AE_INFO, "No ACPI tables present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
/* /*
* Initialize the Fixed and General Purpose Events. This is done prior to * Initialize the Fixed and General Purpose Events. This is done prior to
* enabling SCIs to prevent interrupts from occurring before the handlers are * enabling SCIs to prevent interrupts from occurring before the handlers are
...@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void) ...@@ -211,8 +204,7 @@ static acpi_status acpi_ev_fixed_event_initialize(void)
if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) { if (acpi_gbl_fixed_event_info[i].enable_register_id != 0xFF) {
status = status =
acpi_set_register(acpi_gbl_fixed_event_info[i]. acpi_set_register(acpi_gbl_fixed_event_info[i].
enable_register_id, 0, enable_register_id, 0);
ACPI_MTX_LOCK);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return (status); return (status);
} }
...@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) ...@@ -298,7 +290,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
/* Clear the status bit */ /* Clear the status bit */
(void)acpi_set_register(acpi_gbl_fixed_event_info[event]. (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
status_register_id, 1, ACPI_MTX_DO_NOT_LOCK); status_register_id, 1);
/* /*
* Make sure we've got a handler. If not, report an error. * Make sure we've got a handler. If not, report an error.
...@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event) ...@@ -306,8 +298,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
*/ */
if (NULL == acpi_gbl_fixed_event_handlers[event].handler) { if (NULL == acpi_gbl_fixed_event_handlers[event].handler) {
(void)acpi_set_register(acpi_gbl_fixed_event_info[event]. (void)acpi_set_register(acpi_gbl_fixed_event_info[event].
enable_register_id, 0, enable_register_id, 0);
ACPI_MTX_DO_NOT_LOCK);
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"No installed handler for fixed event [%08X]", "No installed handler for fixed event [%08X]",
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info, ...@@ -121,7 +121,9 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
if (!gpe_register_info) { if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST); return_ACPI_STATUS(AE_NOT_EXIST);
} }
register_bit = gpe_event_info->register_bit; register_bit = (u8)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
/* 1) Disable case. Simply clear all enable bits */ /* 1) Disable case. Simply clear all enable bits */
...@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -458,8 +460,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
/* Examine one GPE bit */ /* Examine one GPE bit */
if (enabled_status_byte & if (enabled_status_byte & (1 << j)) {
acpi_gbl_decode_to8bit[j]) {
/* /*
* Found an active GPE. Dispatch the event to a handler * Found an active GPE. Dispatch the event to a handler
* or method. * or method.
...@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) ...@@ -570,7 +571,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, ACPI_EXCEPTION((AE_INFO, status,
"While evaluating GPE method [%4.4s]", "while evaluating GPE method [%4.4s]",
acpi_ut_get_node_name acpi_ut_get_node_name
(local_gpe_event_info.dispatch. (local_gpe_event_info.dispatch.
method_node))); method_node)));
...@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -618,6 +619,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
ACPI_FUNCTION_TRACE(ev_gpe_dispatch); ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
acpi_gpe_count++;
/* /*
* If edge-triggered, clear the GPE status bit now. Note that * If edge-triggered, clear the GPE status bit now. Note that
* level-triggered events are cleared after the GPE is serviced. * level-triggered events are cleared after the GPE is serviced.
...@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -633,20 +636,23 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
} }
} }
/* Save current system state */ if (!acpi_gbl_system_awake_and_running) {
/*
if (acpi_gbl_system_awake_and_running) { * We just woke up because of a wake GPE. Disable any further GPEs
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); * until we are fully up and running (Only wake GPEs should be enabled
} else { * at this time, but we just brute-force disable them all.)
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_SYSTEM_RUNNING); * 1) We must disable this particular wake GPE so it won't fire again
* 2) We want to disable all wake GPEs, since we are now awake
*/
(void)acpi_hw_disable_all_gpes();
} }
/* /*
* Dispatch the GPE to either an installed handler, or the control * Dispatch the GPE to either an installed handler, or the control method
* method associated with this GPE (_Lxx or _Exx). * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
* If a handler exists, we invoke it and do not attempt to run the method. * it and do not attempt to run the method. If there is neither a handler
* If there is neither a handler nor a method, we disable the level to * nor a method, we disable this GPE to prevent further such pointless
* prevent further events from coming in here. * events from firing.
*/ */
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
case ACPI_GPE_DISPATCH_HANDLER: case ACPI_GPE_DISPATCH_HANDLER:
...@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -677,8 +683,8 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_METHOD:
/* /*
* Disable GPE, so it doesn't keep firing before the method has a * Disable the GPE, so it doesn't keep firing before the method has a
* chance to run. * chance to run (it runs asynchronously with interrupts enabled).
*/ */
status = acpi_ev_disable_gpe(gpe_event_info); status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) ...@@ -726,50 +732,3 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
return_UINT32(ACPI_INTERRUPT_HANDLED); return_UINT32(ACPI_INTERRUPT_HANDLED);
} }
#ifdef ACPI_GPE_NOTIFY_CHECK
/*******************************************************************************
* TBD: NOT USED, PROTOTYPE ONLY AND WILL PROBABLY BE REMOVED
*
* FUNCTION: acpi_ev_check_for_wake_only_gpe
*
* PARAMETERS: gpe_event_info - info for this GPE
*
* RETURN: Status
*
* DESCRIPTION: Determine if a a GPE is "wake-only".
*
* Called from Notify() code in interpreter when a "DeviceWake"
* Notify comes in.
*
******************************************************************************/
acpi_status
acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_check_for_wake_only_gpe);
if ((gpe_event_info) && /* Only >0 for _Lxx/_Exx */
((gpe_event_info->flags & ACPI_GPE_SYSTEM_MASK) == ACPI_GPE_SYSTEM_RUNNING)) { /* System state at GPE time */
/* This must be a wake-only GPE, disable it */
status = acpi_ev_disable_gpe(gpe_event_info);
/* Set GPE to wake-only. Do not change wake disabled/enabled status */
acpi_ev_set_gpe_type(gpe_event_info, ACPI_GPE_TYPE_WAKE);
ACPI_INFO((AE_INFO,
"GPE %p was updated from wake/run to wake-only",
gpe_event_info));
/* This was a wake-only GPE */
return_ACPI_STATUS(AE_WAKE_ONLY_GPE);
}
return_ACPI_STATUS(AE_OK);
}
#endif
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 ...@@ -529,7 +529,7 @@ static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
/* Install new interrupt handler if not SCI_INT */ /* Install new interrupt handler if not SCI_INT */
if (interrupt_number != acpi_gbl_FADT->sci_int) { if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
status = acpi_os_install_interrupt_handler(interrupt_number, status = acpi_os_install_interrupt_handler(interrupt_number,
acpi_ev_gpe_xrupt_handler, acpi_ev_gpe_xrupt_handler,
gpe_xrupt); gpe_xrupt);
...@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt) ...@@ -567,7 +567,7 @@ acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
/* We never want to remove the SCI interrupt handler */ /* We never want to remove the SCI interrupt handler */
if (gpe_xrupt->interrupt_number == acpi_gbl_FADT->sci_int) { if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
gpe_xrupt->gpe_block_list_head = NULL; gpe_xrupt->gpe_block_list_head = NULL;
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block) ...@@ -796,30 +796,31 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
(u8) (gpe_block->block_base_number + (u8) (gpe_block->block_base_number +
(i * ACPI_GPE_REGISTER_WIDTH)); (i * ACPI_GPE_REGISTER_WIDTH));
ACPI_STORE_ADDRESS(this_register->status_address.address, this_register->status_address.address =
(gpe_block->block_address.address + i)); gpe_block->block_address.address + i;
ACPI_STORE_ADDRESS(this_register->enable_address.address, this_register->enable_address.address =
(gpe_block->block_address.address gpe_block->block_address.address + i +
+ i + gpe_block->register_count)); gpe_block->register_count;
this_register->status_address.address_space_id = this_register->status_address.space_id =
gpe_block->block_address.address_space_id; gpe_block->block_address.space_id;
this_register->enable_address.address_space_id = this_register->enable_address.space_id =
gpe_block->block_address.address_space_id; gpe_block->block_address.space_id;
this_register->status_address.register_bit_width = this_register->status_address.bit_width =
ACPI_GPE_REGISTER_WIDTH; ACPI_GPE_REGISTER_WIDTH;
this_register->enable_address.register_bit_width = this_register->enable_address.bit_width =
ACPI_GPE_REGISTER_WIDTH; ACPI_GPE_REGISTER_WIDTH;
this_register->status_address.register_bit_offset = this_register->status_address.bit_offset =
ACPI_GPE_REGISTER_WIDTH; ACPI_GPE_REGISTER_WIDTH;
this_register->enable_address.register_bit_offset = this_register->enable_address.bit_offset =
ACPI_GPE_REGISTER_WIDTH; ACPI_GPE_REGISTER_WIDTH;
/* Init the event_info for each GPE within this register */ /* Init the event_info for each GPE within this register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
this_event->register_bit = acpi_gbl_decode_to8bit[j]; this_event->gpe_number =
(u8) (this_register->base_gpe_number + j);
this_event->register_info = this_register; this_event->register_info = this_register;
this_event++; this_event++;
} }
...@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1109,11 +1110,12 @@ acpi_status acpi_ev_gpe_initialize(void)
* If EITHER the register length OR the block address are zero, then that * If EITHER the register length OR the block address are zero, then that
* particular block is not supported. * particular block is not supported.
*/ */
if (acpi_gbl_FADT->gpe0_blk_len && acpi_gbl_FADT->xgpe0_blk.address) { if (acpi_gbl_FADT.gpe0_block_length &&
acpi_gbl_FADT.xgpe0_block.address) {
/* GPE block 0 exists (has both length and address > 0) */ /* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16) (acpi_gbl_FADT->gpe0_blk_len / 2); register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
gpe_number_max = gpe_number_max =
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1; (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
...@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1121,9 +1123,9 @@ acpi_status acpi_ev_gpe_initialize(void)
/* Install GPE Block 0 */ /* Install GPE Block 0 */
status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
&acpi_gbl_FADT->xgpe0_blk, &acpi_gbl_FADT.xgpe0_block,
register_count0, 0, register_count0, 0,
acpi_gbl_FADT->sci_int, acpi_gbl_FADT.sci_interrupt,
&acpi_gbl_gpe_fadt_blocks[0]); &acpi_gbl_gpe_fadt_blocks[0]);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
...@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1132,20 +1134,21 @@ acpi_status acpi_ev_gpe_initialize(void)
} }
} }
if (acpi_gbl_FADT->gpe1_blk_len && acpi_gbl_FADT->xgpe1_blk.address) { if (acpi_gbl_FADT.gpe1_block_length &&
acpi_gbl_FADT.xgpe1_block.address) {
/* GPE block 1 exists (has both length and address > 0) */ /* GPE block 1 exists (has both length and address > 0) */
register_count1 = (u16) (acpi_gbl_FADT->gpe1_blk_len / 2); register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
/* Check for GPE0/GPE1 overlap (if both banks exist) */ /* Check for GPE0/GPE1 overlap (if both banks exist) */
if ((register_count0) && if ((register_count0) &&
(gpe_number_max >= acpi_gbl_FADT->gpe1_base)) { (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1", "GPE0 block (GPE 0 to %d) overlaps the GPE1 block (GPE %d to %d) - Ignoring GPE1",
gpe_number_max, acpi_gbl_FADT->gpe1_base, gpe_number_max, acpi_gbl_FADT.gpe1_base,
acpi_gbl_FADT->gpe1_base + acpi_gbl_FADT.gpe1_base +
((register_count1 * ((register_count1 *
ACPI_GPE_REGISTER_WIDTH) - 1))); ACPI_GPE_REGISTER_WIDTH) - 1)));
...@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1157,10 +1160,11 @@ acpi_status acpi_ev_gpe_initialize(void)
status = status =
acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device, acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
&acpi_gbl_FADT->xgpe1_blk, &acpi_gbl_FADT.xgpe1_block,
register_count1, register_count1,
acpi_gbl_FADT->gpe1_base, acpi_gbl_FADT.gpe1_base,
acpi_gbl_FADT->sci_int, acpi_gbl_FADT.
sci_interrupt,
&acpi_gbl_gpe_fadt_blocks &acpi_gbl_gpe_fadt_blocks
[1]); [1]);
...@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void) ...@@ -1173,7 +1177,7 @@ acpi_status acpi_ev_gpe_initialize(void)
* GPE0 and GPE1 do not have to be contiguous in the GPE number * GPE0 and GPE1 do not have to be contiguous in the GPE number
* space. However, GPE0 always starts at GPE number zero. * space. However, GPE0 always starts at GPE number zero.
*/ */
gpe_number_max = acpi_gbl_FADT->gpe1_base + gpe_number_max = acpi_gbl_FADT.gpe1_base +
((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1); ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
} }
} }
......
This diff is collapsed.
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 bit_width, acpi_integer * value) u32 bit_width, acpi_integer * value)
{ {
acpi_status status; acpi_status status;
acpi_status status2;
acpi_adr_space_handler handler; acpi_adr_space_handler handler;
acpi_adr_space_setup region_setup; acpi_adr_space_setup region_setup;
union acpi_operand_object *handler_desc; union acpi_operand_object *handler_desc;
...@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* setup will potentially execute control methods * setup will potentially execute control methods
* (e.g., _REG method for this region) * (e.g., _REG method for this region)
*/ */
acpi_ex_exit_interpreter(); acpi_ex_relinquish_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE, status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
handler_desc->address_space.context, handler_desc->address_space.context,
...@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Re-enter the interpreter */ /* Re-enter the interpreter */
status2 = acpi_ex_enter_interpreter(); acpi_ex_reacquire_interpreter();
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
/* Check for failure of the Region Setup */ /* Check for failure of the Region Setup */
...@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* exit the interpreter because the handler *might* block -- we don't * exit the interpreter because the handler *might* block -- we don't
* know what it will do, so we can't hold the lock on the intepreter. * know what it will do, so we can't hold the lock on the intepreter.
*/ */
acpi_ex_exit_interpreter(); acpi_ex_relinquish_interpreter();
} }
/* Call the handler */ /* Call the handler */
...@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* We just returned from a non-default handler, we must re-enter the * We just returned from a non-default handler, we must re-enter the
* interpreter * interpreter
*/ */
status2 = acpi_ex_enter_interpreter(); acpi_ex_reacquire_interpreter();
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
} }
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -48,6 +48,11 @@ ...@@ -48,6 +48,11 @@
#define _COMPONENT ACPI_EVENTS #define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evrgnini") ACPI_MODULE_NAME("evrgnini")
/* Local prototypes */
static u8 acpi_ev_match_pci_root_bridge(char *id);
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node);
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ev_system_memory_region_setup * FUNCTION: acpi_ev_system_memory_region_setup
...@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini") ...@@ -62,6 +67,7 @@ ACPI_MODULE_NAME("evrgnini")
* DESCRIPTION: Setup a system_memory operation region * DESCRIPTION: Setup a system_memory operation region
* *
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
acpi_ev_system_memory_region_setup(acpi_handle handle, acpi_ev_system_memory_region_setup(acpi_handle handle,
u32 function, u32 function,
...@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -168,9 +174,9 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
union acpi_operand_object *handler_obj; union acpi_operand_object *handler_obj;
struct acpi_namespace_node *parent_node; struct acpi_namespace_node *parent_node;
struct acpi_namespace_node *pci_root_node; struct acpi_namespace_node *pci_root_node;
struct acpi_namespace_node *pci_device_node;
union acpi_operand_object *region_obj = union acpi_operand_object *region_obj =
(union acpi_operand_object *)handle; (union acpi_operand_object *)handle;
struct acpi_device_id object_hID;
ACPI_FUNCTION_TRACE(ev_pci_config_region_setup); ACPI_FUNCTION_TRACE(ev_pci_config_region_setup);
...@@ -215,27 +221,14 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -215,27 +221,14 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
pci_root_node = parent_node; pci_root_node = parent_node;
while (pci_root_node != acpi_gbl_root_node) { while (pci_root_node != acpi_gbl_root_node) {
status =
acpi_ut_execute_HID(pci_root_node, &object_hID); /* Get the _HID/_CID in order to detect a root_bridge */
if (ACPI_SUCCESS(status)) {
/* if (acpi_ev_is_pci_root_bridge(pci_root_node)) {
* Got a valid _HID string, check if this is a PCI root.
* New for ACPI 3.0: check for a PCI Express root also.
*/
if (!
(ACPI_STRNCMP
(object_hID.value, PCI_ROOT_HID_STRING,
sizeof(PCI_ROOT_HID_STRING)))
||
!(ACPI_STRNCMP
(object_hID.value,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
/* Install a handler for this PCI root bridge */ /* Install a handler for this PCI root bridge */
status = status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) { if (status == AE_SAME_HANDLER) {
/* /*
...@@ -245,8 +238,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -245,8 +238,7 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
*/ */
status = AE_OK; status = AE_OK;
} else { } else {
ACPI_EXCEPTION((AE_INFO, ACPI_EXCEPTION((AE_INFO, status,
status,
"Could not install PciConfig handler for Root Bridge %4.4s", "Could not install PciConfig handler for Root Bridge %4.4s",
acpi_ut_get_node_name acpi_ut_get_node_name
(pci_root_node))); (pci_root_node)));
...@@ -254,7 +246,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -254,7 +246,6 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
} }
break; break;
} }
}
pci_root_node = acpi_ns_get_parent_node(pci_root_node); pci_root_node = acpi_ns_get_parent_node(pci_root_node);
} }
...@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -282,14 +273,25 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* /*
* For PCI_Config space access, we need the segment, bus, * For PCI_Config space access, we need the segment, bus,
* device and function numbers. Acquire them here. * device and function numbers. Acquire them here.
*
* Find the parent device object. (This allows the operation region to be
* within a subscope under the device, such as a control method.)
*/ */
pci_device_node = region_obj->region.node;
while (pci_device_node && (pci_device_node->type != ACPI_TYPE_DEVICE)) {
pci_device_node = acpi_ns_get_parent_node(pci_device_node);
}
if (!pci_device_node) {
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
/* /*
* Get the PCI device and function numbers from the _ADR object * Get the PCI device and function numbers from the _ADR object
* contained in the parent's scope. * contained in the parent's scope.
*/ */
status = status =
acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, parent_node, acpi_ut_evaluate_numeric_object(METHOD_NAME__ADR, pci_device_node,
&pci_value); &pci_value);
/* /*
...@@ -327,6 +329,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle, ...@@ -327,6 +329,91 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
/*******************************************************************************
*
* FUNCTION: acpi_ev_match_pci_root_bridge
*
* PARAMETERS: Id - The HID/CID in string format
*
* RETURN: TRUE if the Id is a match for a PCI/PCI-Express Root Bridge
*
* DESCRIPTION: Determine if the input ID is a PCI Root Bridge ID.
*
******************************************************************************/
static u8 acpi_ev_match_pci_root_bridge(char *id)
{
/*
* Check if this is a PCI root.
* ACPI 3.0+: check for a PCI Express root also.
*/
if (!(ACPI_STRNCMP(id,
PCI_ROOT_HID_STRING,
sizeof(PCI_ROOT_HID_STRING))) ||
!(ACPI_STRNCMP(id,
PCI_EXPRESS_ROOT_HID_STRING,
sizeof(PCI_EXPRESS_ROOT_HID_STRING)))) {
return (TRUE);
}
return (FALSE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_is_pci_root_bridge
*
* PARAMETERS: Node - Device node being examined
*
* RETURN: TRUE if device is a PCI/PCI-Express Root Bridge
*
* DESCRIPTION: Determine if the input device represents a PCI Root Bridge by
* examining the _HID and _CID for the device.
*
******************************************************************************/
static u8 acpi_ev_is_pci_root_bridge(struct acpi_namespace_node *node)
{
acpi_status status;
struct acpi_device_id hid;
struct acpi_compatible_id_list *cid;
acpi_native_uint i;
/*
* Get the _HID and check for a PCI Root Bridge
*/
status = acpi_ut_execute_HID(node, &hid);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
if (acpi_ev_match_pci_root_bridge(hid.value)) {
return (TRUE);
}
/*
* The _HID did not match.
* Get the _CID and check for a PCI Root Bridge
*/
status = acpi_ut_execute_CID(node, &cid);
if (ACPI_FAILURE(status)) {
return (FALSE);
}
/* Check all _CIDs in the returned list */
for (i = 0; i < cid->count; i++) {
if (acpi_ev_match_pci_root_bridge(cid->id[i].value)) {
ACPI_FREE(cid);
return (TRUE);
}
}
ACPI_FREE(cid);
return (FALSE);
}
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ev_pci_bar_region_setup * FUNCTION: acpi_ev_pci_bar_region_setup
...@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle, ...@@ -432,6 +519,9 @@ acpi_ev_default_region_setup(acpi_handle handle,
* a PCI address in the scope of the definition. This address is * a PCI address in the scope of the definition. This address is
* required to perform an access to PCI config space. * required to perform an access to PCI config space.
* *
* MUTEX: Interpreter should be unlocked, because we may run the _REG
* method for this region.
*
******************************************************************************/ ******************************************************************************/
acpi_status acpi_status
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
******************************************************************************/ ******************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -142,7 +142,8 @@ u32 acpi_ev_install_sci_handler(void) ...@@ -142,7 +142,8 @@ u32 acpi_ev_install_sci_handler(void)
ACPI_FUNCTION_TRACE(ev_install_sci_handler); ACPI_FUNCTION_TRACE(ev_install_sci_handler);
status = acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT->sci_int, status =
acpi_os_install_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler, acpi_ev_sci_xrupt_handler,
acpi_gbl_gpe_xrupt_list_head); acpi_gbl_gpe_xrupt_list_head);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
...@@ -175,7 +176,8 @@ acpi_status acpi_ev_remove_sci_handler(void) ...@@ -175,7 +176,8 @@ acpi_status acpi_ev_remove_sci_handler(void)
/* Just let the OS remove the handler and disable the level */ /* Just let the OS remove the handler and disable the level */
status = acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT->sci_int, status =
acpi_os_remove_interrupt_handler((u32) acpi_gbl_FADT.sci_interrupt,
acpi_ev_sci_xrupt_handler); acpi_ev_sci_xrupt_handler);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) ...@@ -768,11 +768,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (AE_BAD_PARAMETER); return (AE_BAD_PARAMETER);
} }
status = acpi_ex_enter_interpreter(); /* Must lock interpreter to prevent race conditions */
if (ACPI_FAILURE(status)) {
return (status);
}
acpi_ex_enter_interpreter();
status = acpi_ev_acquire_global_lock(timeout); status = acpi_ev_acquire_global_lock(timeout);
acpi_ex_exit_interpreter(); acpi_ex_exit_interpreter();
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <acpi/acevents.h> #include <acpi/acevents.h>
#include <acpi/acnamesp.h> #include <acpi/acnamesp.h>
#include <acpi/actables.h>
#define _COMPONENT ACPI_EVENTS #define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evxfevnt") ACPI_MODULE_NAME("evxfevnt")
...@@ -65,13 +66,14 @@ acpi_status acpi_enable(void) ...@@ -65,13 +66,14 @@ acpi_status acpi_enable(void)
ACPI_FUNCTION_TRACE(acpi_enable); ACPI_FUNCTION_TRACE(acpi_enable);
/* Make sure we have the FADT */ /* ACPI tables must be present */
if (!acpi_gbl_FADT) { if (!acpi_tb_tables_loaded()) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES); return_ACPI_STATUS(AE_NO_ACPI_TABLES);
} }
/* Check current mode */
if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT, ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in ACPI mode\n")); "System is already in ACPI mode\n"));
...@@ -111,11 +113,6 @@ acpi_status acpi_disable(void) ...@@ -111,11 +113,6 @@ acpi_status acpi_disable(void)
ACPI_FUNCTION_TRACE(acpi_disable); ACPI_FUNCTION_TRACE(acpi_disable);
if (!acpi_gbl_FADT) {
ACPI_WARNING((AE_INFO, "No FADT information present!"));
return_ACPI_STATUS(AE_NO_ACPI_TABLES);
}
if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) {
ACPI_DEBUG_PRINT((ACPI_DB_INIT, ACPI_DEBUG_PRINT((ACPI_DB_INIT,
"System is already in legacy (non-ACPI) mode\n")); "System is already in legacy (non-ACPI) mode\n"));
...@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ...@@ -169,7 +166,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
*/ */
status = status =
acpi_set_register(acpi_gbl_fixed_event_info[event]. acpi_set_register(acpi_gbl_fixed_event_info[event].
enable_register_id, 1, ACPI_MTX_LOCK); enable_register_id, 1);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ...@@ -178,7 +175,7 @@ acpi_status acpi_enable_event(u32 event, u32 flags)
status = status =
acpi_get_register(acpi_gbl_fixed_event_info[event]. acpi_get_register(acpi_gbl_fixed_event_info[event].
enable_register_id, &value, ACPI_MTX_LOCK); enable_register_id, &value);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags) ...@@ -368,14 +365,14 @@ acpi_status acpi_disable_event(u32 event, u32 flags)
*/ */
status = status =
acpi_set_register(acpi_gbl_fixed_event_info[event]. acpi_set_register(acpi_gbl_fixed_event_info[event].
enable_register_id, 0, ACPI_MTX_LOCK); enable_register_id, 0);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
status = status =
acpi_get_register(acpi_gbl_fixed_event_info[event]. acpi_get_register(acpi_gbl_fixed_event_info[event].
enable_register_id, &value, ACPI_MTX_LOCK); enable_register_id, &value);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event) ...@@ -421,7 +418,7 @@ acpi_status acpi_clear_event(u32 event)
*/ */
status = status =
acpi_set_register(acpi_gbl_fixed_event_info[event]. acpi_set_register(acpi_gbl_fixed_event_info[event].
status_register_id, 1, ACPI_MTX_LOCK); status_register_id, 1);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) ...@@ -510,7 +507,7 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
status = status =
acpi_get_register(acpi_gbl_fixed_event_info[event]. acpi_get_register(acpi_gbl_fixed_event_info[event].
status_register_id, event_status, ACPI_MTX_LOCK); status_register_id, event_status);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state) ...@@ -359,8 +359,9 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object **operand = &walk_state->operands[0];
union acpi_operand_object *obj_desc; union acpi_operand_object *obj_desc;
struct acpi_namespace_node *node; struct acpi_namespace_node *node;
struct acpi_table_header *table;
union acpi_operand_object *region_obj2; union acpi_operand_object *region_obj2;
acpi_native_uint table_index;
struct acpi_table_header *table;
ACPI_FUNCTION_TRACE(ex_create_table_region); ACPI_FUNCTION_TRACE(ex_create_table_region);
...@@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state) ...@@ -380,7 +381,7 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
status = acpi_tb_find_table(operand[1]->string.pointer, status = acpi_tb_find_table(operand[1]->string.pointer,
operand[2]->string.pointer, operand[2]->string.pointer,
operand[3]->string.pointer, &table); operand[3]->string.pointer, &table_index);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state) ...@@ -395,6 +396,11 @@ acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
region_obj2 = obj_desc->common.next_object; region_obj2 = obj_desc->common.next_object;
region_obj2->extra.region_context = NULL; region_obj2->extra.region_context = NULL;
status = acpi_get_table_by_index(table_index, &table);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Init the region from the operands */ /* Init the region from the operands */
obj_desc->region.space_id = REGION_DATA_TABLE; obj_desc->region.space_id = REGION_DATA_TABLE;
...@@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start, ...@@ -553,7 +559,8 @@ acpi_ex_create_method(u8 * aml_start,
obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD); obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!obj_desc) { if (!obj_desc) {
return_ACPI_STATUS(AE_NO_MEMORY); status = AE_NO_MEMORY;
goto exit;
} }
/* Save the method's AML pointer and length */ /* Save the method's AML pointer and length */
...@@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start, ...@@ -576,10 +583,7 @@ acpi_ex_create_method(u8 * aml_start,
* Get the sync_level. If method is serialized, a mutex will be * Get the sync_level. If method is serialized, a mutex will be
* created for this method when it is parsed. * created for this method when it is parsed.
*/ */
if (acpi_gbl_all_methods_serialized) { if (method_flags & AML_METHOD_SERIALIZED) {
obj_desc->method.sync_level = 0;
obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
} else if (method_flags & AML_METHOD_SERIALIZED) {
/* /*
* ACPI 1.0: sync_level = 0 * ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration * ACPI 2.0: sync_level = sync_level in method declaration
...@@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start, ...@@ -597,6 +601,7 @@ acpi_ex_create_method(u8 * aml_start,
acpi_ut_remove_reference(obj_desc); acpi_ut_remove_reference(obj_desc);
exit:
/* Remove a reference to the operand */ /* Remove a reference to the operand */
acpi_ut_remove_reference(operand[1]); acpi_ut_remove_reference(operand[1]);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -59,8 +59,6 @@ static void acpi_ex_out_string(char *title, char *value); ...@@ -59,8 +59,6 @@ static void acpi_ex_out_string(char *title, char *value);
static void acpi_ex_out_pointer(char *title, void *value); static void acpi_ex_out_pointer(char *title, void *value);
static void acpi_ex_out_address(char *title, acpi_physical_address value);
static void static void
acpi_ex_dump_object(union acpi_operand_object *obj_desc, acpi_ex_dump_object(union acpi_operand_object *obj_desc,
struct acpi_exdump_info *info); struct acpi_exdump_info *info);
...@@ -92,10 +90,11 @@ static struct acpi_exdump_info acpi_ex_dump_string[4] = { ...@@ -92,10 +90,11 @@ static struct acpi_exdump_info acpi_ex_dump_string[4] = {
{ACPI_EXD_STRING, 0, NULL} {ACPI_EXD_STRING, 0, NULL}
}; };
static struct acpi_exdump_info acpi_ex_dump_buffer[4] = { static struct acpi_exdump_info acpi_ex_dump_buffer[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL}, {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_buffer), NULL},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"}, {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(buffer.length), "Length"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.pointer), "Pointer"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(buffer.node), "Parent Node"},
{ACPI_EXD_BUFFER, 0, NULL} {ACPI_EXD_BUFFER, 0, NULL}
}; };
...@@ -165,8 +164,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = { ...@@ -165,8 +164,8 @@ static struct acpi_exdump_info acpi_ex_dump_power[5] = {
static struct acpi_exdump_info acpi_ex_dump_processor[7] = { static struct acpi_exdump_info acpi_ex_dump_processor[7] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL}, {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_processor), NULL},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.proc_id), "Processor ID"},
{ACPI_EXD_UINT32, ACPI_EXD_OFFSET(processor.length), "Length"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(processor.length), "Length"},
{ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"}, {ACPI_EXD_ADDRESS, ACPI_EXD_OFFSET(processor.address), "Address"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify), {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(processor.system_notify),
"System Notify"}, "System Notify"},
...@@ -379,18 +378,12 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc, ...@@ -379,18 +378,12 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
break; break;
case ACPI_EXD_POINTER: case ACPI_EXD_POINTER:
case ACPI_EXD_ADDRESS:
acpi_ex_out_pointer(name, acpi_ex_out_pointer(name,
*ACPI_CAST_PTR(void *, target)); *ACPI_CAST_PTR(void *, target));
break; break;
case ACPI_EXD_ADDRESS:
acpi_ex_out_address(name,
*ACPI_CAST_PTR
(acpi_physical_address, target));
break;
case ACPI_EXD_STRING: case ACPI_EXD_STRING:
acpi_ut_print_string(obj_desc->string.pointer, acpi_ut_print_string(obj_desc->string.pointer,
...@@ -834,16 +827,6 @@ static void acpi_ex_out_pointer(char *title, void *value) ...@@ -834,16 +827,6 @@ static void acpi_ex_out_pointer(char *title, void *value)
acpi_os_printf("%20s : %p\n", title, value); acpi_os_printf("%20s : %p\n", title, value);
} }
static void acpi_ex_out_address(char *title, acpi_physical_address value)
{
#if ACPI_MACHINE_WIDTH == 16
acpi_os_printf("%20s : %p\n", title, value);
#else
acpi_os_printf("%20s : %8.8X%8.8X\n", title, ACPI_FORMAT_UINT64(value));
#endif
}
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ex_dump_namespace_node * FUNCTION: acpi_ex_dump_namespace_node
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, ...@@ -257,14 +257,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
} }
ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD, ACPI_DEBUG_PRINT_RAW((ACPI_DB_BFIELD,
" Region [%s:%X], Width %X, ByteBase %X, Offset %X at %8.8X%8.8X\n", " Region [%s:%X], Width %X, ByteBase %X, Offset %X at %p\n",
acpi_ut_get_region_name(rgn_desc->region. acpi_ut_get_region_name(rgn_desc->region.
space_id), space_id),
rgn_desc->region.space_id, rgn_desc->region.space_id,
obj_desc->common_field.access_byte_width, obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset, obj_desc->common_field.base_byte_offset,
field_datum_byte_offset, field_datum_byte_offset, (void *)address));
ACPI_FORMAT_UINT64(address)));
/* Invoke the appropriate address_space/op_region handler */ /* Invoke the appropriate address_space/op_region handler */
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <acpi/acinterp.h> #include <acpi/acinterp.h>
#include <acpi/acevents.h>
#define _COMPONENT ACPI_EXECUTER #define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exmutex") ACPI_MODULE_NAME("exmutex")
...@@ -150,7 +151,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, ...@@ -150,7 +151,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
/* Sanity check -- we must have a valid thread ID */ /* Sanity check: we must have a valid thread ID */
if (!walk_state->thread) { if (!walk_state->thread) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
...@@ -174,24 +175,28 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, ...@@ -174,24 +175,28 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Support for multiple acquires by the owning thread */ /* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.owner_thread) { if (obj_desc->mutex.owner_thread) {
if (obj_desc->mutex.owner_thread->thread_id ==
/* Special case for Global Lock, allow all threads */ walk_state->thread->thread_id) {
if ((obj_desc->mutex.owner_thread->thread_id ==
walk_state->thread->thread_id) ||
(obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) {
/* /*
* The mutex is already owned by this thread, * The mutex is already owned by this thread, just increment the
* just increment the acquisition depth * acquisition depth
*/ */
obj_desc->mutex.acquisition_depth++; obj_desc->mutex.acquisition_depth++;
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
} }
/* Acquire the mutex, wait if necessary */ /* Acquire the mutex, wait if necessary. Special case for Global Lock */
if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
status =
acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
} else {
status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
(u16) time_desc->integer.
value);
}
status = acpi_ex_system_acquire_mutex(time_desc, obj_desc);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
/* Includes failure from a timeout on time_desc */ /* Includes failure from a timeout on time_desc */
...@@ -211,7 +216,6 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, ...@@ -211,7 +216,6 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Link the mutex to the current thread for force-unlock at method exit */ /* Link the mutex to the current thread for force-unlock at method exit */
acpi_ex_link_mutex(obj_desc, walk_state->thread); acpi_ex_link_mutex(obj_desc, walk_state->thread);
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -232,7 +236,7 @@ acpi_status ...@@ -232,7 +236,7 @@ acpi_status
acpi_ex_release_mutex(union acpi_operand_object *obj_desc, acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state) struct acpi_walk_state *walk_state)
{ {
acpi_status status; acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_release_mutex); ACPI_FUNCTION_TRACE(ex_release_mutex);
...@@ -249,7 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -249,7 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
} }
/* Sanity check -- we must have a valid thread ID */ /* Sanity check: we must have a valid thread ID */
if (!walk_state->thread) { if (!walk_state->thread) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
...@@ -264,7 +268,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -264,7 +268,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
*/ */
if ((obj_desc->mutex.owner_thread->thread_id != if ((obj_desc->mutex.owner_thread->thread_id !=
walk_state->thread->thread_id) walk_state->thread->thread_id)
&& (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) { && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
(unsigned long)walk_state->thread->thread_id, (unsigned long)walk_state->thread->thread_id,
...@@ -274,8 +278,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -274,8 +278,8 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
} }
/* /*
* The sync level of the mutex must be less than or * The sync level of the mutex must be less than or equal to the current
* equal to the current sync level * sync level
*/ */
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
...@@ -298,11 +302,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -298,11 +302,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
acpi_ex_unlink_mutex(obj_desc); acpi_ex_unlink_mutex(obj_desc);
/* Release the mutex */ /* Release the mutex, special case for Global Lock */
status = acpi_ex_system_release_mutex(obj_desc); if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
status = acpi_ev_release_global_lock();
} else {
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
}
/* Update the mutex and walk state, restore sync_level before acquire */ /* Update the mutex and restore sync_level */
obj_desc->mutex.owner_thread = NULL; obj_desc->mutex.owner_thread = NULL;
walk_state->thread->current_sync_level = walk_state->thread->current_sync_level =
...@@ -321,39 +329,49 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -321,39 +329,49 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* *
* DESCRIPTION: Release all mutexes held by this thread * DESCRIPTION: Release all mutexes held by this thread
* *
* NOTE: This function is called as the thread is exiting the interpreter.
* Mutexes are not released when an individual control method is exited, but
* only when the parent thread actually exits the interpreter. This allows one
* method to acquire a mutex, and a different method to release it, as long as
* this is performed underneath a single parent control method.
*
******************************************************************************/ ******************************************************************************/
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread) void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
{ {
union acpi_operand_object *next = thread->acquired_mutex_list; union acpi_operand_object *next = thread->acquired_mutex_list;
union acpi_operand_object *this; union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_ENTRY(); ACPI_FUNCTION_ENTRY();
/* Traverse the list of owned mutexes, releasing each one */ /* Traverse the list of owned mutexes, releasing each one */
while (next) { while (next) {
this = next; obj_desc = next;
next = this->mutex.next; next = obj_desc->mutex.next;
this->mutex.acquisition_depth = 1; obj_desc->mutex.prev = NULL;
this->mutex.prev = NULL; obj_desc->mutex.next = NULL;
this->mutex.next = NULL; obj_desc->mutex.acquisition_depth = 0;
/* Release the mutex */ /* Release the mutex, special case for Global Lock */
status = acpi_ex_system_release_mutex(this); if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
if (ACPI_FAILURE(status)) {
continue; /* Ignore errors */
(void)acpi_ev_release_global_lock();
} else {
acpi_os_release_mutex(obj_desc->mutex.os_mutex);
} }
/* Mark mutex unowned */ /* Mark mutex unowned */
this->mutex.owner_thread = NULL; obj_desc->mutex.owner_thread = NULL;
/* Update Thread sync_level (Last mutex is the important one) */ /* Update Thread sync_level (Last mutex is the important one) */
thread->current_sync_level = this->mutex.original_sync_level; thread->current_sync_level =
obj_desc->mutex.original_sync_level;
} }
} }
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -104,9 +104,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state) ...@@ -104,9 +104,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
status = AE_NO_MEMORY; status = AE_NO_MEMORY;
goto cleanup; goto cleanup;
} }
#if ACPI_MACHINE_WIDTH != 16
return_desc->integer.value = acpi_os_get_timer(); return_desc->integer.value = acpi_os_get_timer();
#endif
break; break;
default: /* Unknown opcode */ default: /* Unknown opcode */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -155,16 +155,15 @@ acpi_ex_system_memory_space_handler(u32 function, ...@@ -155,16 +155,15 @@ acpi_ex_system_memory_space_handler(u32 function,
/* Create a new mapping starting at the address given */ /* Create a new mapping starting at the address given */
status = acpi_os_map_memory(address, window_size, mem_info->mapped_logical_address =
(void **)&mem_info-> acpi_os_map_memory((acpi_native_uint) address, window_size);
mapped_logical_address); if (!mem_info->mapped_logical_address) {
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Could not map memory at %8.8X%8.8X, size %X", "Could not map memory at %8.8X%8.8X, size %X",
ACPI_FORMAT_UINT64(address), ACPI_FORMAT_UINT64(address),
(u32) window_size)); (u32) window_size));
mem_info->mapped_length = 0; mem_info->mapped_length = 0;
return_ACPI_STATUS(status); return_ACPI_STATUS(AE_NO_MEMORY);
} }
/* Save the physical address and mapping size */ /* Save the physical address and mapping size */
...@@ -210,11 +209,10 @@ acpi_ex_system_memory_space_handler(u32 function, ...@@ -210,11 +209,10 @@ acpi_ex_system_memory_space_handler(u32 function,
*value = (acpi_integer) ACPI_GET32(logical_addr_ptr); *value = (acpi_integer) ACPI_GET32(logical_addr_ptr);
break; break;
#if ACPI_MACHINE_WIDTH != 16
case 64: case 64:
*value = (acpi_integer) ACPI_GET64(logical_addr_ptr); *value = (acpi_integer) ACPI_GET64(logical_addr_ptr);
break; break;
#endif
default: default:
/* bit_width was already validated */ /* bit_width was already validated */
break; break;
...@@ -236,11 +234,9 @@ acpi_ex_system_memory_space_handler(u32 function, ...@@ -236,11 +234,9 @@ acpi_ex_system_memory_space_handler(u32 function,
ACPI_SET32(logical_addr_ptr) = (u32) * value; ACPI_SET32(logical_addr_ptr) = (u32) * value;
break; break;
#if ACPI_MACHINE_WIDTH != 16
case 64: case 64:
ACPI_SET64(logical_addr_ptr) = (u64) * value; ACPI_SET64(logical_addr_ptr) = (u64) * value;
break; break;
#endif
default: default:
/* bit_width was already validated */ /* bit_width was already validated */
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -141,7 +141,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, ...@@ -141,7 +141,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
acpi_status status = AE_OK; acpi_status status = AE_OK;
union acpi_operand_object *stack_desc; union acpi_operand_object *stack_desc;
void *temp_node; void *temp_node;
union acpi_operand_object *obj_desc; union acpi_operand_object *obj_desc = NULL;
u16 opcode; u16 opcode;
ACPI_FUNCTION_TRACE(ex_resolve_object_to_value); ACPI_FUNCTION_TRACE(ex_resolve_object_to_value);
...@@ -299,8 +299,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, ...@@ -299,8 +299,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
status = acpi_ds_get_package_arguments(stack_desc); status = acpi_ds_get_package_arguments(stack_desc);
break; break;
/* These cases may never happen here, but just in case.. */
case ACPI_TYPE_BUFFER_FIELD: case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD: case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD: case ACPI_TYPE_LOCAL_BANK_FIELD:
...@@ -314,6 +312,10 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, ...@@ -314,6 +312,10 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
status = status =
acpi_ex_read_data_from_field(walk_state, stack_desc, acpi_ex_read_data_from_field(walk_state, stack_desc,
&obj_desc); &obj_desc);
/* Remove a reference to the original operand, then override */
acpi_ut_remove_reference(*stack_ptr);
*stack_ptr = (void *)obj_desc; *stack_ptr = (void *)obj_desc;
break; break;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -611,22 +611,20 @@ acpi_ex_resolve_operands(u16 opcode, ...@@ -611,22 +611,20 @@ acpi_ex_resolve_operands(u16 opcode,
} }
goto next_operand; goto next_operand;
case ARGI_REGION_OR_FIELD: case ARGI_REGION_OR_BUFFER: /* Used by Load() only */
/* Need an operand of type REGION or a FIELD in a region */ /* Need an operand of type REGION or a BUFFER (which could be a resolved region field) */
switch (ACPI_GET_OBJECT_TYPE(obj_desc)) { switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
case ACPI_TYPE_BUFFER:
case ACPI_TYPE_REGION: case ACPI_TYPE_REGION:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/* Valid operand */ /* Valid operand */
break; break;
default: default:
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Needed [Region/RegionField], found [%s] %p", "Needed [Region/Buffer], found [%s] %p",
acpi_ut_get_object_type_name acpi_ut_get_object_type_name
(obj_desc), obj_desc)); (obj_desc), obj_desc));
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2006, R. Byron Moore * Copyright (C) 2000 - 2007, R. Byron Moore
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment