Commit 9bd47bf9 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'acpica' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'acpica' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (27 commits)
  ACPICA: Update version to 20090521.
  ACPICA: Disable preservation of SCI enable bit (SCI_EN)
  ACPICA: Region deletion: Ensure region object is removed from handler list
  ACPICA: Eliminate extra call to NsGetParentNode
  ACPICA: Simplify internal operation region interface
  ACPICA: Update Load() to use operation region interfaces
  ACPICA: New: AcpiInstallMethod - install a single control method
  ACPICA: Invalidate DdbHandle after table unload
  ACPICA: Fix reference count issues for DdbHandle object
  ACPICA: Simplify and optimize NsGetNextNode function
  ACPICA: Additional validation of _PRT packages (resource mgr)
  ACPICA: Fix DebugObject output for DdbHandle objects
  ACPICA: Fix allowable release order for ASL mutex objects
  ACPICA: Mutex support: Fix release ordering issue and current sync level
  ACPICA: Update version to 20090422.
  ACPICA: Linux OSL: cleanup/update/merge
  ACPICA: Fix implementation of AML BreakPoint operator (break to debugger)
  ACPICA: Fix miscellaneous warnings under gcc 4+
  ACPICA: Miscellaneous lint changes
  ACPICA: Fix possible dereference of null pointer
  ...
parents 46a50661 d6a1cd49
...@@ -139,7 +139,7 @@ acpi_status acpi_ev_initialize_op_regions(void); ...@@ -139,7 +139,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
acpi_status acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function, u32 function,
acpi_physical_address address, u32 region_offset,
u32 bit_width, acpi_integer * value); u32 bit_width, acpi_integer * value);
acpi_status acpi_status
......
...@@ -362,9 +362,6 @@ extern u8 acpi_gbl_method_executing; ...@@ -362,9 +362,6 @@ extern u8 acpi_gbl_method_executing;
extern u8 acpi_gbl_abort_method; extern u8 acpi_gbl_abort_method;
extern u8 acpi_gbl_db_terminate_threads; extern u8 acpi_gbl_db_terminate_threads;
ACPI_EXTERN int optind;
ACPI_EXTERN char *optarg;
ACPI_EXTERN u8 acpi_gbl_db_opt_tables; ACPI_EXTERN u8 acpi_gbl_db_opt_tables;
ACPI_EXTERN u8 acpi_gbl_db_opt_stats; ACPI_EXTERN u8 acpi_gbl_db_opt_stats;
ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods; ACPI_EXTERN u8 acpi_gbl_db_opt_ini_methods;
......
...@@ -205,6 +205,7 @@ struct acpi_namespace_node { ...@@ -205,6 +205,7 @@ struct acpi_namespace_node {
#define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */ #define ANOBJ_METHOD_LOCAL 0x08 /* Node is a method local */
#define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */ #define ANOBJ_SUBTREE_HAS_INI 0x10 /* Used to optimize device initialization */
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */ #define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
#define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */ #define ANOBJ_IS_EXTERNAL 0x08 /* i_aSL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */ #define ANOBJ_METHOD_NO_RETVAL 0x10 /* i_aSL only: Method has no return value */
...@@ -788,11 +789,14 @@ struct acpi_bit_register_info { ...@@ -788,11 +789,14 @@ struct acpi_bit_register_info {
/* For control registers, both ignored and reserved bits must be preserved */ /* For control registers, both ignored and reserved bits must be preserved */
/* /*
* The ACPI spec says to ignore PM1_CTL.SCI_EN (bit 0) * For PM1 control, the SCI enable bit (bit 0, SCI_EN) is defined by the
* but we need to be able to write ACPI_BITREG_SCI_ENABLE directly * ACPI specification to be a "preserved" bit - "OSPM always preserves this
* as a BIOS workaround on some machines. * bit position", section 4.7.3.2.1. However, on some machines the OS must
* write a one to this bit after resume for the machine to work properly.
* To enable this, we no longer attempt to preserve this bit. No machines
* are known to fail if the bit is not preserved. (May 2009)
*/ */
#define ACPI_PM1_CONTROL_IGNORED_BITS 0x0200 /* Bits 9 */ #define ACPI_PM1_CONTROL_IGNORED_BITS 0x0200 /* Bit 9 */
#define ACPI_PM1_CONTROL_RESERVED_BITS 0xC1F8 /* Bits 14-15, 3-8 */ #define ACPI_PM1_CONTROL_RESERVED_BITS 0xC1F8 /* Bits 14-15, 3-8 */
#define ACPI_PM1_CONTROL_PRESERVED_BITS \ #define ACPI_PM1_CONTROL_PRESERVED_BITS \
(ACPI_PM1_CONTROL_IGNORED_BITS | ACPI_PM1_CONTROL_RESERVED_BITS) (ACPI_PM1_CONTROL_IGNORED_BITS | ACPI_PM1_CONTROL_RESERVED_BITS)
......
...@@ -99,8 +99,17 @@ acpi_ns_walk_namespace(acpi_object_type type, ...@@ -99,8 +99,17 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_walk_callback user_function, acpi_walk_callback user_function,
void *context, void **return_value); void *context, void **return_value);
struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
*parent, struct acpi_namespace_node *parent,
struct acpi_namespace_node
*child);
struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
struct
acpi_namespace_node
*parent,
struct
acpi_namespace_node
*child); *child);
/* /*
......
...@@ -483,7 +483,7 @@ typedef enum { ...@@ -483,7 +483,7 @@ typedef enum {
#define AML_METHOD_ARG_COUNT 0x07 #define AML_METHOD_ARG_COUNT 0x07
#define AML_METHOD_SERIALIZED 0x08 #define AML_METHOD_SERIALIZED 0x08
#define AML_METHOD_SYNCH_LEVEL 0xF0 #define AML_METHOD_SYNC_LEVEL 0xF0
/* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */ /* METHOD_FLAGS_ARG_COUNT is not used internally, define additional flags */
......
...@@ -734,7 +734,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -734,7 +734,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */ /* Local ID (0-7) is (AML opcode - base AML_LOCAL_OP) */
obj_desc->reference.value = opcode - AML_LOCAL_OP; obj_desc->reference.value =
((u32)opcode) - AML_LOCAL_OP;
obj_desc->reference.class = ACPI_REFCLASS_LOCAL; obj_desc->reference.class = ACPI_REFCLASS_LOCAL;
#ifndef ACPI_NO_METHOD_EXECUTION #ifndef ACPI_NO_METHOD_EXECUTION
...@@ -754,7 +755,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state, ...@@ -754,7 +755,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */ /* Arg ID (0-6) is (AML opcode - base AML_ARG_OP) */
obj_desc->reference.value = opcode - AML_ARG_OP; obj_desc->reference.value = ((u32)opcode) - AML_ARG_OP;
obj_desc->reference.class = ACPI_REFCLASS_ARG; obj_desc->reference.class = ACPI_REFCLASS_ARG;
#ifndef ACPI_NO_METHOD_EXECUTION #ifndef ACPI_NO_METHOD_EXECUTION
......
...@@ -1386,14 +1386,19 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state, ...@@ -1386,14 +1386,19 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
case AML_BREAK_POINT_OP: case AML_BREAK_POINT_OP:
/* Call up to the OS service layer to handle this */ /*
* Set the single-step flag. This will cause the debugger (if present)
status = * to break to the console within the AML debugger at the start of the
acpi_os_signal(ACPI_SIGNAL_BREAKPOINT, * next AML instruction.
"Executed AML Breakpoint opcode"); */
ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
ACPI_DEBUGGER_EXEC(acpi_os_printf
("**break** Executed AML BreakPoint opcode\n"));
/* If and when it returns, all done. */ /* Call to the OSL in case OS wants a piece of the action */
status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
"Executed AML Breakpoint opcode");
break; break;
case AML_BREAK_OP: case AML_BREAK_OP:
......
...@@ -102,7 +102,7 @@ acpi_ds_result_pop(union acpi_operand_object **object, ...@@ -102,7 +102,7 @@ acpi_ds_result_pop(union acpi_operand_object **object,
/* Return object of the top element and clean that top element result stack */ /* Return object of the top element and clean that top element result stack */
walk_state->result_count--; walk_state->result_count--;
index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM; index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
*object = state->results.obj_desc[index]; *object = state->results.obj_desc[index];
if (!*object) { if (!*object) {
...@@ -186,7 +186,7 @@ acpi_ds_result_push(union acpi_operand_object * object, ...@@ -186,7 +186,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
/* Assign the address of object to the top free element of result stack */ /* Assign the address of object to the top free element of result stack */
index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM; index = (u32)walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
state->results.obj_desc[index] = object; state->results.obj_desc[index] = object;
walk_state->result_count++; walk_state->result_count++;
......
...@@ -275,7 +275,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) ...@@ -275,7 +275,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* *
* PARAMETERS: region_obj - Internal region object * PARAMETERS: region_obj - Internal region object
* Function - Read or Write operation * Function - Read or Write operation
* Address - Where in the space to read or write * region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64) * bit_width - Field width in bits (8, 16, 32, or 64)
* Value - Pointer to in or out value, must be * Value - Pointer to in or out value, must be
* full 64-bit acpi_integer * full 64-bit acpi_integer
...@@ -290,7 +290,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function) ...@@ -290,7 +290,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
acpi_status acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 function, u32 function,
acpi_physical_address address, u32 region_offset,
u32 bit_width, acpi_integer * value) u32 bit_width, acpi_integer * value)
{ {
acpi_status status; acpi_status status;
...@@ -396,7 +396,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -396,7 +396,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
"Handler %p (@%p) Address %8.8X%8.8X [%s]\n", "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
&region_obj->region.handler->address_space, handler, &region_obj->region.handler->address_space, handler,
ACPI_FORMAT_NATIVE_UINT(address), ACPI_FORMAT_NATIVE_UINT(region_obj->region.address +
region_offset),
acpi_ut_get_region_name(region_obj->region. acpi_ut_get_region_name(region_obj->region.
space_id))); space_id)));
...@@ -412,8 +413,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj, ...@@ -412,8 +413,9 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Call the handler */ /* Call the handler */
status = handler(function, address, bit_width, value, status = handler(function,
handler_desc->address_space.context, (region_obj->region.address + region_offset),
bit_width, value, handler_desc->address_space.context,
region_obj2->extra.region_context); region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
......
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
ACPI_MODULE_NAME("evxfevnt") ACPI_MODULE_NAME("evxfevnt")
/* Local prototypes */ /* Local prototypes */
acpi_status static acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context); struct acpi_gpe_block_info *gpe_block, void *context);
...@@ -785,7 +785,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) ...@@ -785,7 +785,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
* block device. NULL if the GPE is one of the FADT-defined GPEs. * block device. NULL if the GPE is one of the FADT-defined GPEs.
* *
******************************************************************************/ ******************************************************************************/
acpi_status static acpi_status
acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block, void *context) struct acpi_gpe_block_info *gpe_block, void *context)
{ {
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include "acnamesp.h" #include "acnamesp.h"
#include "actables.h" #include "actables.h"
#include "acdispat.h" #include "acdispat.h"
#include "acevents.h"
#define _COMPONENT ACPI_EXECUTER #define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exconfig") ACPI_MODULE_NAME("exconfig")
...@@ -57,6 +58,10 @@ acpi_ex_add_table(u32 table_index, ...@@ -57,6 +58,10 @@ acpi_ex_add_table(u32 table_index,
struct acpi_namespace_node *parent_node, struct acpi_namespace_node *parent_node,
union acpi_operand_object **ddb_handle); union acpi_operand_object **ddb_handle);
static acpi_status
acpi_ex_region_read(union acpi_operand_object *obj_desc,
u32 length, u8 *buffer);
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ex_add_table * FUNCTION: acpi_ex_add_table
...@@ -91,6 +96,7 @@ acpi_ex_add_table(u32 table_index, ...@@ -91,6 +96,7 @@ acpi_ex_add_table(u32 table_index,
/* Init the table handle */ /* Init the table handle */
obj_desc->common.flags |= AOPOBJ_DATA_VALID;
obj_desc->reference.class = ACPI_REFCLASS_TABLE; obj_desc->reference.class = ACPI_REFCLASS_TABLE;
*ddb_handle = obj_desc; *ddb_handle = obj_desc;
...@@ -229,6 +235,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, ...@@ -229,6 +235,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
walk_state); walk_state);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
(void)acpi_ex_unload_table(ddb_handle); (void)acpi_ex_unload_table(ddb_handle);
acpi_ut_remove_reference(ddb_handle);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} }
...@@ -252,6 +260,47 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, ...@@ -252,6 +260,47 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
/*******************************************************************************
*
* FUNCTION: acpi_ex_region_read
*
* PARAMETERS: obj_desc - Region descriptor
* Length - Number of bytes to read
* Buffer - Pointer to where to put the data
*
* RETURN: Status
*
* DESCRIPTION: Read data from an operation region. The read starts from the
* beginning of the region.
*
******************************************************************************/
static acpi_status
acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
{
acpi_status status;
acpi_integer value;
u32 region_offset = 0;
u32 i;
/* Bytewise reads */
for (i = 0; i < length; i++) {
status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
region_offset, 8,
&value);
if (ACPI_FAILURE(status)) {
return status;
}
*buffer = (u8)value;
buffer++;
region_offset++;
}
return AE_OK;
}
/******************************************************************************* /*******************************************************************************
* *
* FUNCTION: acpi_ex_load_op * FUNCTION: acpi_ex_load_op
...@@ -314,18 +363,23 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, ...@@ -314,18 +363,23 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
} }
} }
/* /* Get the table header first so we can get the table length */
* Map the table header and get the actual table length. The region
* length is not guaranteed to be the same as the table length. table = ACPI_ALLOCATE(sizeof(struct acpi_table_header));
*/
table = acpi_os_map_memory(obj_desc->region.address,
sizeof(struct acpi_table_header));
if (!table) { if (!table) {
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
status =
acpi_ex_region_read(obj_desc,
sizeof(struct acpi_table_header),
ACPI_CAST_PTR(u8, table));
length = table->length; length = table->length;
acpi_os_unmap_memory(table, sizeof(struct acpi_table_header)); ACPI_FREE(table);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Must have at least an ACPI table header */ /* Must have at least an ACPI table header */
...@@ -334,10 +388,19 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, ...@@ -334,10 +388,19 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
} }
/* /*
* The memory region is not guaranteed to remain stable and we must * The original implementation simply mapped the table, with no copy.
* copy the table to a local buffer. For example, the memory region * However, the memory region is not guaranteed to remain stable and
* is corrupted after suspend on some machines. Dynamically loaded * we must copy the table to a local buffer. For example, the memory
* tables are usually small, so this overhead is minimal. * region is corrupted after suspend on some machines. Dynamically
* loaded tables are usually small, so this overhead is minimal.
*
* The latest implementation (5/2009) does not use a mapping at all.
* We use the low-level operation region interface to read the table
* instead of the obvious optimization of using a direct mapping.
* This maintains a consistent use of operation regions across the
* entire subsystem. This is important if additional processing must
* be performed in the (possibly user-installed) operation region
* handler. For example, acpi_exec and ASLTS depend on this.
*/ */
/* Allocate a buffer for the table */ /* Allocate a buffer for the table */
...@@ -347,17 +410,16 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, ...@@ -347,17 +410,16 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(AE_NO_MEMORY);
} }
/* Map the entire table and copy it */ /* Read the entire table */
table = acpi_os_map_memory(obj_desc->region.address, length); status = acpi_ex_region_read(obj_desc, length,
if (!table) { ACPI_CAST_PTR(u8,
table_desc.pointer));
if (ACPI_FAILURE(status)) {
ACPI_FREE(table_desc.pointer); ACPI_FREE(table_desc.pointer);
return_ACPI_STATUS(AE_NO_MEMORY); return_ACPI_STATUS(status);
} }
ACPI_MEMCPY(table_desc.pointer, table, length);
acpi_os_unmap_memory(table, length);
table_desc.address = obj_desc->region.address; table_desc.address = obj_desc->region.address;
break; break;
...@@ -454,6 +516,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, ...@@ -454,6 +516,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
/* Remove the reference by added by acpi_ex_store above */
acpi_ut_remove_reference(ddb_handle);
/* Invoke table handler if present */ /* Invoke table handler if present */
if (acpi_gbl_table_handler) { if (acpi_gbl_table_handler) {
...@@ -495,13 +561,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) ...@@ -495,13 +561,18 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
/* /*
* Validate the handle * Validate the handle
* Although the handle is partially validated in acpi_ex_reconfiguration(), * Although the handle is partially validated in acpi_ex_reconfiguration()
* when it calls acpi_ex_resolve_operands(), the handle is more completely * when it calls acpi_ex_resolve_operands(), the handle is more completely
* validated here. * validated here.
*
* Handle must be a valid operand object of type reference. Also, the
* ddb_handle must still be marked valid (table has not been previously
* unloaded)
*/ */
if ((!ddb_handle) || if ((!ddb_handle) ||
(ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) || (ACPI_GET_DESCRIPTOR_TYPE(ddb_handle) != ACPI_DESC_TYPE_OPERAND) ||
(ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE)) { (ddb_handle->common.type != ACPI_TYPE_LOCAL_REFERENCE) ||
(!(ddb_handle->common.flags & AOPOBJ_DATA_VALID))) {
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
...@@ -509,6 +580,12 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) ...@@ -509,6 +580,12 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
table_index = table_desc->reference.value; table_index = table_desc->reference.value;
/* Ensure the table is still loaded */
if (!acpi_tb_is_table_loaded(table_index)) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
/* Invoke table handler if present */ /* Invoke table handler if present */
if (acpi_gbl_table_handler) { if (acpi_gbl_table_handler) {
...@@ -530,8 +607,10 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle) ...@@ -530,8 +607,10 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
(void)acpi_tb_release_owner_id(table_index); (void)acpi_tb_release_owner_id(table_index);
acpi_tb_set_table_loaded_flag(table_index, FALSE); acpi_tb_set_table_loaded_flag(table_index, FALSE);
/* Table unloaded, remove a reference to the ddb_handle object */ /*
* Invalidate the handle. We do this because the handle may be stored
acpi_ut_remove_reference(ddb_handle); * in a named object and may not be actually deleted until much later.
*/
ddb_handle->common.flags &= ~AOPOBJ_DATA_VALID;
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -502,7 +502,7 @@ acpi_ex_create_method(u8 * aml_start, ...@@ -502,7 +502,7 @@ acpi_ex_create_method(u8 * aml_start,
* ACPI 2.0: sync_level = sync_level in method declaration * ACPI 2.0: sync_level = sync_level in method declaration
*/ */
obj_desc->method.sync_level = (u8) obj_desc->method.sync_level = (u8)
((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4); ((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
} }
/* Attach the new object to the method Node */ /* Attach the new object to the method Node */
......
...@@ -120,9 +120,11 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = { ...@@ -120,9 +120,11 @@ static struct acpi_exdump_info acpi_ex_dump_event[2] = {
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"} {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"}
}; };
static struct acpi_exdump_info acpi_ex_dump_method[8] = { static struct acpi_exdump_info acpi_ex_dump_method[9] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.method_flags), "Method Flags"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count),
"Parameter Count"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"}, {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"},
......
...@@ -222,7 +222,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, ...@@ -222,7 +222,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
{ {
acpi_status status; acpi_status status;
union acpi_operand_object *rgn_desc; union acpi_operand_object *rgn_desc;
acpi_physical_address address; u32 region_offset;
ACPI_FUNCTION_TRACE(ex_access_region); ACPI_FUNCTION_TRACE(ex_access_region);
...@@ -243,7 +243,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, ...@@ -243,7 +243,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
* 3) The current offset into the field * 3) The current offset into the field
*/ */
rgn_desc = obj_desc->common_field.region_obj; rgn_desc = obj_desc->common_field.region_obj;
address = rgn_desc->region.address + region_offset =
obj_desc->common_field.base_byte_offset + field_datum_byte_offset; obj_desc->common_field.base_byte_offset + field_datum_byte_offset;
if ((function & ACPI_IO_MASK) == ACPI_READ) { if ((function & ACPI_IO_MASK) == ACPI_READ) {
...@@ -260,14 +260,16 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc, ...@@ -260,14 +260,16 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
obj_desc->common_field.access_byte_width, obj_desc->common_field.access_byte_width,
obj_desc->common_field.base_byte_offset, obj_desc->common_field.base_byte_offset,
field_datum_byte_offset, ACPI_CAST_PTR(void, field_datum_byte_offset, ACPI_CAST_PTR(void,
address))); (rgn_desc->
region.
address +
region_offset))));
/* Invoke the appropriate address_space/op_region handler */ /* Invoke the appropriate address_space/op_region handler */
status = acpi_ev_address_space_dispatch(rgn_desc, function, status =
address, acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
ACPI_MUL_8(obj_desc-> ACPI_MUL_8(obj_desc->common_field.
common_field.
access_byte_width), access_byte_width),
value); value);
......
...@@ -83,6 +83,15 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc) ...@@ -83,6 +83,15 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
if (obj_desc->mutex.prev) { if (obj_desc->mutex.prev) {
(obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next; (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
/*
* Migrate the previous sync level associated with this mutex to the
* previous mutex on the list so that it may be preserved. This handles
* the case where several mutexes have been acquired at the same level,
* but are not released in opposite order.
*/
(obj_desc->mutex.prev)->mutex.original_sync_level =
obj_desc->mutex.original_sync_level;
} else { } else {
thread->acquired_mutex_list = obj_desc->mutex.next; thread->acquired_mutex_list = obj_desc->mutex.next;
} }
...@@ -349,6 +358,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -349,6 +358,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
struct acpi_walk_state *walk_state) struct acpi_walk_state *walk_state)
{ {
acpi_status status = AE_OK; acpi_status status = AE_OK;
u8 previous_sync_level;
ACPI_FUNCTION_TRACE(ex_release_mutex); ACPI_FUNCTION_TRACE(ex_release_mutex);
...@@ -373,11 +383,12 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -373,11 +383,12 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
walk_state->thread->thread_id) walk_state->thread->thread_id)
&& (obj_desc != acpi_gbl_global_lock_mutex)) { && (obj_desc != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX", "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
(unsigned long)walk_state->thread->thread_id, ACPI_CAST_PTR(void, walk_state->thread->thread_id),
acpi_ut_get_node_name(obj_desc->mutex.node), acpi_ut_get_node_name(obj_desc->mutex.node),
(unsigned long)obj_desc->mutex.owner_thread-> ACPI_CAST_PTR(void,
thread_id)); obj_desc->mutex.owner_thread->
thread_id)));
return_ACPI_STATUS(AE_AML_NOT_OWNER); return_ACPI_STATUS(AE_AML_NOT_OWNER);
} }
...@@ -391,10 +402,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -391,10 +402,14 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
} }
/* /*
* The sync level of the mutex must be less than or equal to the current * The sync level of the mutex must be equal to the current sync level. In
* sync level * other words, the current level means that at least one mutex at that
* level is currently being held. Attempting to release a mutex of a
* different level can only mean that the mutex ordering rule is being
* violated. This behavior is clarified in ACPI 4.0 specification.
*/ */
if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) { if (obj_desc->mutex.sync_level !=
walk_state->thread->current_sync_level) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d", "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
acpi_ut_get_node_name(obj_desc->mutex.node), acpi_ut_get_node_name(obj_desc->mutex.node),
...@@ -403,14 +418,24 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, ...@@ -403,14 +418,24 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_MUTEX_ORDER); return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
} }
/*
* Get the previous sync_level from the head of the acquired mutex list.
* This handles the case where several mutexes at the same level have been
* acquired, but are not released in reverse order.
*/
previous_sync_level =
walk_state->thread->acquired_mutex_list->mutex.original_sync_level;
status = acpi_ex_release_mutex_object(obj_desc); status = acpi_ex_release_mutex_object(obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (obj_desc->mutex.acquisition_depth == 0) { if (obj_desc->mutex.acquisition_depth == 0) {
/* Restore the original sync_level */ /* Restore the previous sync_level */
walk_state->thread->current_sync_level = walk_state->thread->current_sync_level = previous_sync_level;
obj_desc->mutex.original_sync_level;
} }
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
......
...@@ -193,10 +193,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc, ...@@ -193,10 +193,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
case ACPI_REFCLASS_TABLE: case ACPI_REFCLASS_TABLE:
/* Case for ddb_handle */
ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
"Table Index 0x%X\n", "Table Index 0x%X\n",
source_desc->reference.value)); source_desc->reference.value));
break; return;
default: default:
break; break;
......
...@@ -81,9 +81,9 @@ acpi_status acpi_hw_clear_acpi_status(void) ...@@ -81,9 +81,9 @@ acpi_status acpi_hw_clear_acpi_status(void)
ACPI_FUNCTION_TRACE(hw_clear_acpi_status); ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %0llX\n", ACPI_DEBUG_PRINT((ACPI_DB_IO, "About to write %04X to %8.8X%8.8X\n",
ACPI_BITMASK_ALL_FIXED_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS,
acpi_gbl_xpm1a_status.address)); ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
......
...@@ -334,9 +334,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node) ...@@ -334,9 +334,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/* Get the next node in this scope (NULL if none) */ /* Get the next node in this scope (NULL if none) */
child_node = child_node = acpi_ns_get_next_node(parent_node, child_node);
acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
child_node);
if (child_node) { if (child_node) {
/* Found a child node - detach any attached object */ /* Found a child node - detach any attached object */
...@@ -345,8 +343,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node) ...@@ -345,8 +343,7 @@ void acpi_ns_delete_namespace_subtree(struct acpi_namespace_node *parent_node)
/* Check if this node has any children */ /* Check if this node has any children */
if (acpi_ns_get_next_node if (child_node->child) {
(ACPI_TYPE_ANY, child_node, NULL)) {
/* /*
* There is at least one child of this node, * There is at least one child of this node,
* visit the node * visit the node
...@@ -432,9 +429,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id) ...@@ -432,9 +429,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
* Get the next child of this parent node. When child_node is NULL, * Get the next child of this parent node. When child_node is NULL,
* the first child of the parent is returned * the first child of the parent is returned
*/ */
child_node = child_node = acpi_ns_get_next_node(parent_node, child_node);
acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
child_node);
if (deletion_node) { if (deletion_node) {
acpi_ns_delete_children(deletion_node); acpi_ns_delete_children(deletion_node);
...@@ -452,8 +447,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id) ...@@ -452,8 +447,7 @@ void acpi_ns_delete_namespace_by_owner(acpi_owner_id owner_id)
/* Check if this node has any children */ /* Check if this node has any children */
if (acpi_ns_get_next_node if (child_node->child) {
(ACPI_TYPE_ANY, child_node, NULL)) {
/* /*
* There is at least one child of this node, * There is at least one child of this node,
* visit the node * visit the node
......
...@@ -149,7 +149,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) ...@@ -149,7 +149,7 @@ char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node)
name_buffer = ACPI_ALLOCATE_ZEROED(size); name_buffer = ACPI_ALLOCATE_ZEROED(size);
if (!name_buffer) { if (!name_buffer) {
ACPI_ERROR((AE_INFO, "Allocation failure")); ACPI_ERROR((AE_INFO, "Could not allocate %u bytes", (u32)size));
return_PTR(NULL); return_PTR(NULL);
} }
......
...@@ -213,6 +213,15 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node) ...@@ -213,6 +213,15 @@ void acpi_ns_detach_object(struct acpi_namespace_node *node)
return_VOID; return_VOID;
} }
if (node->flags & ANOBJ_ALLOCATED_BUFFER) {
/* Free the dynamic aml buffer */
if (obj_desc->common.type == ACPI_TYPE_METHOD) {
ACPI_FREE(obj_desc->method.aml_start);
}
}
/* Clear the entry in all cases */ /* Clear the entry in all cases */
node->object = NULL; node->object = NULL;
......
...@@ -144,7 +144,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, ...@@ -144,7 +144,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
pathname = acpi_ns_get_external_pathname(node); pathname = acpi_ns_get_external_pathname(node);
if (!pathname) { if (!pathname) {
pathname = ACPI_CAST_PTR(char, predefined->info.name); return AE_OK; /* Could not get pathname, ignore */
} }
/* /*
...@@ -230,10 +230,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node, ...@@ -230,10 +230,7 @@ acpi_ns_check_predefined_names(struct acpi_namespace_node *node,
} }
exit: exit:
if (pathname != predefined->info.name) {
ACPI_FREE(pathname); ACPI_FREE(pathname);
}
return (status); return (status);
} }
......
...@@ -45,6 +45,10 @@ ...@@ -45,6 +45,10 @@
#include "accommon.h" #include "accommon.h"
#include "acnamesp.h" #include "acnamesp.h"
#ifdef ACPI_ASL_COMPILER
#include "amlcode.h"
#endif
#define _COMPONENT ACPI_NAMESPACE #define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nssearch") ACPI_MODULE_NAME("nssearch")
......
...@@ -52,8 +52,7 @@ ACPI_MODULE_NAME("nswalk") ...@@ -52,8 +52,7 @@ ACPI_MODULE_NAME("nswalk")
* *
* FUNCTION: acpi_ns_get_next_node * FUNCTION: acpi_ns_get_next_node
* *
* PARAMETERS: Type - Type of node to be searched for * PARAMETERS: parent_node - Parent node whose children we are
* parent_node - Parent node whose children we are
* getting * getting
* child_node - Previous child that was found. * child_node - Previous child that was found.
* The NEXT child will be returned * The NEXT child will be returned
...@@ -66,27 +65,68 @@ ACPI_MODULE_NAME("nswalk") ...@@ -66,27 +65,68 @@ ACPI_MODULE_NAME("nswalk")
* within Scope is returned. * within Scope is returned.
* *
******************************************************************************/ ******************************************************************************/
struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
*parent_node, struct acpi_namespace_node *parent_node,
struct acpi_namespace_node
*child_node) *child_node)
{ {
struct acpi_namespace_node *next_node = NULL;
ACPI_FUNCTION_ENTRY(); ACPI_FUNCTION_ENTRY();
if (!child_node) { if (!child_node) {
/* It's really the parent's _scope_ that we want */ /* It's really the parent's _scope_ that we want */
next_node = parent_node->child; return parent_node->child;
} }
else { /*
/* Start search at the NEXT node */ * Get the next node.
*
next_node = acpi_ns_get_next_valid_node(child_node); * If we are at the end of this peer list, return NULL
*/
if (child_node->flags & ANOBJ_END_OF_PEER_LIST) {
return NULL;
} }
/* Otherwise just return the next peer */
return child_node->peer;
}
/*******************************************************************************
*
* FUNCTION: acpi_ns_get_next_node_typed
*
* PARAMETERS: Type - Type of node to be searched for
* parent_node - Parent node whose children we are
* getting
* child_node - Previous child that was found.
* The NEXT child will be returned
*
* RETURN: struct acpi_namespace_node - Pointer to the NEXT child or NULL if
* none is found.
*
* DESCRIPTION: Return the next peer node within the namespace. If Handle
* is valid, Scope is ignored. Otherwise, the first node
* within Scope is returned.
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
struct
acpi_namespace_node
*parent_node,
struct
acpi_namespace_node
*child_node)
{
struct acpi_namespace_node *next_node = NULL;
ACPI_FUNCTION_ENTRY();
next_node = acpi_ns_get_next_node(parent_node, child_node);
/* If any type is OK, we are done */ /* If any type is OK, we are done */
if (type == ACPI_TYPE_ANY) { if (type == ACPI_TYPE_ANY) {
...@@ -186,9 +226,7 @@ acpi_ns_walk_namespace(acpi_object_type type, ...@@ -186,9 +226,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
/* Get the next node in this scope. Null if not found */ /* Get the next node in this scope. Null if not found */
status = AE_OK; status = AE_OK;
child_node = child_node = acpi_ns_get_next_node(parent_node, child_node);
acpi_ns_get_next_node(ACPI_TYPE_ANY, parent_node,
child_node);
if (child_node) { if (child_node) {
/* Found next child, get the type if we are not searching for ANY */ /* Found next child, get the type if we are not searching for ANY */
...@@ -269,8 +307,7 @@ acpi_ns_walk_namespace(acpi_object_type type, ...@@ -269,8 +307,7 @@ acpi_ns_walk_namespace(acpi_object_type type,
* function has specified that the maximum depth has been reached. * function has specified that the maximum depth has been reached.
*/ */
if ((level < max_depth) && (status != AE_CTRL_DEPTH)) { if ((level < max_depth) && (status != AE_CTRL_DEPTH)) {
if (acpi_ns_get_next_node if (child_node->child) {
(ACPI_TYPE_ANY, child_node, NULL)) {
/* There is at least one child of this node, visit it */ /* There is at least one child of this node, visit it */
......
...@@ -45,6 +45,8 @@ ...@@ -45,6 +45,8 @@
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include "accommon.h" #include "accommon.h"
#include "acnamesp.h" #include "acnamesp.h"
#include "acparser.h"
#include "amlcode.h"
#define _COMPONENT ACPI_NAMESPACE #define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfname") ACPI_MODULE_NAME("nsxfname")
...@@ -358,3 +360,151 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer) ...@@ -358,3 +360,151 @@ acpi_get_object_info(acpi_handle handle, struct acpi_buffer * buffer)
} }
ACPI_EXPORT_SYMBOL(acpi_get_object_info) ACPI_EXPORT_SYMBOL(acpi_get_object_info)
/******************************************************************************
*
* FUNCTION: acpi_install_method
*
* PARAMETERS: Buffer - An ACPI table containing one control method
*
* RETURN: Status
*
* DESCRIPTION: Install a control method into the namespace. If the method
* name already exists in the namespace, it is overwritten. The
* input buffer must contain a valid DSDT or SSDT containing a
* single control method.
*
******************************************************************************/
acpi_status acpi_install_method(u8 *buffer)
{
struct acpi_table_header *table =
ACPI_CAST_PTR(struct acpi_table_header, buffer);
u8 *aml_buffer;
u8 *aml_start;
char *path;
struct acpi_namespace_node *node;
union acpi_operand_object *method_obj;
struct acpi_parse_state parser_state;
u32 aml_length;
u16 opcode;
u8 method_flags;
acpi_status status;
/* Parameter validation */
if (!buffer) {
return AE_BAD_PARAMETER;
}
/* Table must be a DSDT or SSDT */
if (!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_DSDT) &&
!ACPI_COMPARE_NAME(table->signature, ACPI_SIG_SSDT)) {
return AE_BAD_HEADER;
}
/* First AML opcode in the table must be a control method */
parser_state.aml = buffer + sizeof(struct acpi_table_header);
opcode = acpi_ps_peek_opcode(&parser_state);
if (opcode != AML_METHOD_OP) {
return AE_BAD_PARAMETER;
}
/* Extract method information from the raw AML */
parser_state.aml += acpi_ps_get_opcode_size(opcode);
parser_state.pkg_end = acpi_ps_get_next_package_end(&parser_state);
path = acpi_ps_get_next_namestring(&parser_state);
method_flags = *parser_state.aml++;
aml_start = parser_state.aml;
aml_length = ACPI_PTR_DIFF(parser_state.pkg_end, aml_start);
/*
* Allocate resources up-front. We don't want to have to delete a new
* node from the namespace if we cannot allocate memory.
*/
aml_buffer = ACPI_ALLOCATE(aml_length);
if (!aml_buffer) {
return AE_NO_MEMORY;
}
method_obj = acpi_ut_create_internal_object(ACPI_TYPE_METHOD);
if (!method_obj) {
ACPI_FREE(aml_buffer);
return AE_NO_MEMORY;
}
/* Lock namespace for acpi_ns_lookup, we may be creating a new node */
status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) {
goto error_exit;
}
/* The lookup either returns an existing node or creates a new one */
status =
acpi_ns_lookup(NULL, path, ACPI_TYPE_METHOD, ACPI_IMODE_LOAD_PASS1,
ACPI_NS_DONT_OPEN_SCOPE | ACPI_NS_ERROR_IF_FOUND,
NULL, &node);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
if (ACPI_FAILURE(status)) { /* ns_lookup */
if (status != AE_ALREADY_EXISTS) {
goto error_exit;
}
/* Node existed previously, make sure it is a method node */
if (node->type != ACPI_TYPE_METHOD) {
status = AE_TYPE;
goto error_exit;
}
}
/* Copy the method AML to the local buffer */
ACPI_MEMCPY(aml_buffer, aml_start, aml_length);
/* Initialize the method object with the new method's information */
method_obj->method.aml_start = aml_buffer;
method_obj->method.aml_length = aml_length;
method_obj->method.param_count = (u8)
(method_flags & AML_METHOD_ARG_COUNT);
method_obj->method.method_flags = (u8)
(method_flags & ~AML_METHOD_ARG_COUNT);
if (method_flags & AML_METHOD_SERIALIZED) {
method_obj->method.sync_level = (u8)
((method_flags & AML_METHOD_SYNC_LEVEL) >> 4);
}
/*
* Now that it is complete, we can attach the new method object to
* the method Node (detaches/deletes any existing object)
*/
status = acpi_ns_attach_object(node, method_obj, ACPI_TYPE_METHOD);
/*
* Flag indicates AML buffer is dynamic, must be deleted later.
* Must be set only after attach above.
*/
node->flags |= ANOBJ_ALLOCATED_BUFFER;
/* Remove local reference to the method object */
acpi_ut_remove_reference(method_obj);
return status;
error_exit:
ACPI_FREE(aml_buffer);
ACPI_FREE(method_obj);
return status;
}
ACPI_EXPORT_SYMBOL(acpi_install_method)
...@@ -162,6 +162,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type) ...@@ -162,6 +162,7 @@ ACPI_EXPORT_SYMBOL(acpi_get_type)
acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle) acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
{ {
struct acpi_namespace_node *node; struct acpi_namespace_node *node;
struct acpi_namespace_node *parent_node;
acpi_status status; acpi_status status;
if (!ret_handle) { if (!ret_handle) {
...@@ -189,12 +190,12 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle) ...@@ -189,12 +190,12 @@ acpi_status acpi_get_parent(acpi_handle handle, acpi_handle * ret_handle)
/* Get the parent entry */ /* Get the parent entry */
*ret_handle = parent_node = acpi_ns_get_parent_node(node);
acpi_ns_convert_entry_to_handle(acpi_ns_get_parent_node(node)); *ret_handle = acpi_ns_convert_entry_to_handle(parent_node);
/* Return exception if parent is null */ /* Return exception if parent is null */
if (!acpi_ns_get_parent_node(node)) { if (!parent_node) {
status = AE_NULL_ENTRY; status = AE_NULL_ENTRY;
} }
...@@ -268,7 +269,7 @@ acpi_get_next_object(acpi_object_type type, ...@@ -268,7 +269,7 @@ acpi_get_next_object(acpi_object_type type,
/* Internal function does the real work */ /* Internal function does the real work */
node = acpi_ns_get_next_node(type, parent_node, child_node); node = acpi_ns_get_next_node_typed(type, parent_node, child_node);
if (!node) { if (!node) {
status = AE_NOT_FOUND; status = AE_NOT_FOUND;
goto unlock_and_exit; goto unlock_and_exit;
......
...@@ -547,7 +547,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, ...@@ -547,7 +547,7 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
if (!package_element || if (!package_element ||
(package_element->common.type != ACPI_TYPE_PACKAGE)) { (package_element->common.type != ACPI_TYPE_PACKAGE)) {
return_ACPI_STATUS (AE_AML_OPERAND_TYPE); return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
} }
/* /*
...@@ -593,9 +593,6 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object, ...@@ -593,9 +593,6 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
} else { } else {
temp_size_needed += temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node); acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
if (!temp_size_needed) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
} }
} else { } else {
/* /*
......
...@@ -338,13 +338,17 @@ acpi_resource_to_address64(struct acpi_resource *resource, ...@@ -338,13 +338,17 @@ acpi_resource_to_address64(struct acpi_resource *resource,
switch (resource->type) { switch (resource->type) {
case ACPI_RESOURCE_TYPE_ADDRESS16: case ACPI_RESOURCE_TYPE_ADDRESS16:
address16 = (struct acpi_resource_address16 *)&resource->data; address16 =
ACPI_CAST_PTR(struct acpi_resource_address16,
&resource->data);
ACPI_COPY_ADDRESS(out, address16); ACPI_COPY_ADDRESS(out, address16);
break; break;
case ACPI_RESOURCE_TYPE_ADDRESS32: case ACPI_RESOURCE_TYPE_ADDRESS32:
address32 = (struct acpi_resource_address32 *)&resource->data; address32 =
ACPI_CAST_PTR(struct acpi_resource_address32,
&resource->data);
ACPI_COPY_ADDRESS(out, address32); ACPI_COPY_ADDRESS(out, address32);
break; break;
......
...@@ -284,9 +284,9 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length) ...@@ -284,9 +284,9 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
if (length > sizeof(struct acpi_table_fadt)) { if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO, ACPI_WARNING((AE_INFO,
"FADT (revision %u) is longer than ACPI 2.0 version, " "FADT (revision %u) is longer than ACPI 2.0 version, "
"truncating length 0x%X to 0x%zX", "truncating length 0x%X to 0x%X",
table->revision, (unsigned)length, table->revision, length,
sizeof(struct acpi_table_fadt))); (u32)sizeof(struct acpi_table_fadt)));
} }
/* Clear the entire local FADT */ /* Clear the entire local FADT */
...@@ -441,7 +441,7 @@ static void acpi_tb_convert_fadt(void) ...@@ -441,7 +441,7 @@ static void acpi_tb_convert_fadt(void)
&acpi_gbl_FADT, &acpi_gbl_FADT,
fadt_info_table fadt_info_table
[i].length), [i].length),
address32); (u64) address32);
} }
} }
} }
...@@ -469,7 +469,6 @@ static void acpi_tb_convert_fadt(void) ...@@ -469,7 +469,6 @@ static void acpi_tb_convert_fadt(void)
static void acpi_tb_validate_fadt(void) static void acpi_tb_validate_fadt(void)
{ {
char *name; char *name;
u32 *address32;
struct acpi_generic_address *address64; struct acpi_generic_address *address64;
u8 length; u8 length;
u32 i; u32 i;
...@@ -505,15 +504,12 @@ static void acpi_tb_validate_fadt(void) ...@@ -505,15 +504,12 @@ static void acpi_tb_validate_fadt(void)
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) { for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
/* /*
* Generate pointers to the 32-bit and 64-bit addresses, get the * Generate pointer to the 64-bit address, get the register
* register length (width), and the register name * length (width) and the register name
*/ */
address64 = ACPI_ADD_PTR(struct acpi_generic_address, address64 = ACPI_ADD_PTR(struct acpi_generic_address,
&acpi_gbl_FADT, &acpi_gbl_FADT,
fadt_info_table[i].address64); fadt_info_table[i].address64);
address32 =
ACPI_ADD_PTR(u32, &acpi_gbl_FADT,
fadt_info_table[i].address32);
length = length =
*ACPI_ADD_PTR(u8, &acpi_gbl_FADT, *ACPI_ADD_PTR(u8, &acpi_gbl_FADT,
fadt_info_table[i].length); fadt_info_table[i].length);
......
...@@ -472,7 +472,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index) ...@@ -472,7 +472,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
* lock may block, and also since the execution of a namespace walk * lock may block, and also since the execution of a namespace walk
* must be allowed to use the interpreter. * must be allowed to use the interpreter.
*/ */
acpi_ut_release_mutex(ACPI_MTX_INTERPRETER); (void)acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock); status = acpi_ut_acquire_write_lock(&acpi_gbl_namespace_rw_lock);
acpi_ns_delete_namespace_by_owner(owner_id); acpi_ns_delete_namespace_by_owner(owner_id);
......
...@@ -676,6 +676,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, ...@@ -676,6 +676,7 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
{ {
u16 reference_count; u16 reference_count;
union acpi_operand_object *next_object; union acpi_operand_object *next_object;
acpi_status status;
/* Save fields from destination that we don't want to overwrite */ /* Save fields from destination that we don't want to overwrite */
...@@ -768,6 +769,28 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc, ...@@ -768,6 +769,28 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
} }
break; break;
/*
* For Mutex and Event objects, we cannot simply copy the underlying
* OS object. We must create a new one.
*/
case ACPI_TYPE_MUTEX:
status = acpi_os_create_mutex(&dest_desc->mutex.os_mutex);
if (ACPI_FAILURE(status)) {
return status;
}
break;
case ACPI_TYPE_EVENT:
status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0,
&dest_desc->event.
os_semaphore);
if (ACPI_FAILURE(status)) {
return status;
}
break;
default: default:
/* Nothing to do for other simple objects */ /* Nothing to do for other simple objects */
break; break;
......
...@@ -179,9 +179,9 @@ acpi_debug_print(u32 requested_debug_level, ...@@ -179,9 +179,9 @@ acpi_debug_print(u32 requested_debug_level,
if (thread_id != acpi_gbl_prev_thread_id) { if (thread_id != acpi_gbl_prev_thread_id) {
if (ACPI_LV_THREADS & acpi_dbg_level) { if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf acpi_os_printf
("\n**** Context Switch from TID %lX to TID %lX ****\n\n", ("\n**** Context Switch from TID %p to TID %p ****\n\n",
(unsigned long)acpi_gbl_prev_thread_id, ACPI_CAST_PTR(void, acpi_gbl_prev_thread_id),
(unsigned long)thread_id); ACPI_CAST_PTR(void, thread_id));
} }
acpi_gbl_prev_thread_id = thread_id; acpi_gbl_prev_thread_id = thread_id;
...@@ -194,7 +194,7 @@ acpi_debug_print(u32 requested_debug_level, ...@@ -194,7 +194,7 @@ acpi_debug_print(u32 requested_debug_level,
acpi_os_printf("%8s-%04ld ", module_name, line_number); acpi_os_printf("%8s-%04ld ", module_name, line_number);
if (ACPI_LV_THREADS & acpi_dbg_level) { if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf("[%04lX] ", (unsigned long)thread_id); acpi_os_printf("[%p] ", ACPI_CAST_PTR(void, thread_id));
} }
acpi_os_printf("[%02ld] %-22.22s: ", acpi_os_printf("[%02ld] %-22.22s: ",
......
...@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) ...@@ -75,6 +75,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
union acpi_operand_object *handler_desc; union acpi_operand_object *handler_desc;
union acpi_operand_object *second_desc; union acpi_operand_object *second_desc;
union acpi_operand_object *next_desc; union acpi_operand_object *next_desc;
union acpi_operand_object **last_obj_ptr;
ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object); ACPI_FUNCTION_TRACE_PTR(ut_delete_internal_obj, object);
...@@ -223,6 +224,26 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) ...@@ -223,6 +224,26 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
*/ */
handler_desc = object->region.handler; handler_desc = object->region.handler;
if (handler_desc) { if (handler_desc) {
next_desc =
handler_desc->address_space.region_list;
last_obj_ptr =
&handler_desc->address_space.region_list;
/* Remove the region object from the handler's list */
while (next_desc) {
if (next_desc == object) {
*last_obj_ptr =
next_desc->region.next;
break;
}
/* Walk the linked list of handler */
last_obj_ptr = &next_desc->region.next;
next_desc = next_desc->region.next;
}
if (handler_desc->address_space.handler_flags & if (handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) { ACPI_ADDR_HANDLER_DEFAULT_INSTALLED) {
......
...@@ -1033,11 +1033,12 @@ acpi_error(const char *module_name, u32 line_number, const char *format, ...) ...@@ -1033,11 +1033,12 @@ acpi_error(const char *module_name, u32 line_number, const char *format, ...)
{ {
va_list args; va_list args;
acpi_os_printf("ACPI Error (%s-%04d): ", module_name, line_number); acpi_os_printf("ACPI Error: ");
va_start(args, format); va_start(args, format);
acpi_os_vprintf(format, args); acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
line_number);
va_end(args); va_end(args);
} }
...@@ -1047,12 +1048,12 @@ acpi_exception(const char *module_name, ...@@ -1047,12 +1048,12 @@ acpi_exception(const char *module_name,
{ {
va_list args; va_list args;
acpi_os_printf("ACPI Exception (%s-%04d): %s, ", module_name, acpi_os_printf("ACPI Exception: %s, ", acpi_format_exception(status));
line_number, acpi_format_exception(status));
va_start(args, format); va_start(args, format);
acpi_os_vprintf(format, args); acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
line_number);
va_end(args); va_end(args);
} }
...@@ -1061,11 +1062,12 @@ acpi_warning(const char *module_name, u32 line_number, const char *format, ...) ...@@ -1061,11 +1062,12 @@ acpi_warning(const char *module_name, u32 line_number, const char *format, ...)
{ {
va_list args; va_list args;
acpi_os_printf("ACPI Warning (%s-%04d): ", module_name, line_number); acpi_os_printf("ACPI Warning: ");
va_start(args, format); va_start(args, format);
acpi_os_vprintf(format, args); acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION); acpi_os_printf(" %8.8X %s-%u\n", ACPI_CA_VERSION, module_name,
line_number);
va_end(args); va_end(args);
} }
...@@ -1074,10 +1076,6 @@ acpi_info(const char *module_name, u32 line_number, const char *format, ...) ...@@ -1074,10 +1076,6 @@ acpi_info(const char *module_name, u32 line_number, const char *format, ...)
{ {
va_list args; va_list args;
/*
* Removed module_name, line_number, and acpica version, not needed
* for info output
*/
acpi_os_printf("ACPI: "); acpi_os_printf("ACPI: ");
va_start(args, format); va_start(args, format);
......
...@@ -230,17 +230,18 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) ...@@ -230,17 +230,18 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) { if (i == mutex_id) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Mutex [%s] already acquired by this thread [%X]", "Mutex [%s] already acquired by this thread [%p]",
acpi_ut_get_mutex_name acpi_ut_get_mutex_name
(mutex_id), (mutex_id),
this_thread_id)); ACPI_CAST_PTR(void,
this_thread_id)));
return (AE_ALREADY_ACQUIRED); return (AE_ALREADY_ACQUIRED);
} }
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Invalid acquire order: Thread %X owns [%s], wants [%s]", "Invalid acquire order: Thread %p owns [%s], wants [%s]",
this_thread_id, ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(i), acpi_ut_get_mutex_name(i),
acpi_ut_get_mutex_name(mutex_id))); acpi_ut_get_mutex_name(mutex_id)));
...@@ -251,24 +252,24 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) ...@@ -251,24 +252,24 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
#endif #endif
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX attempting to acquire Mutex [%s]\n", "Thread %p attempting to acquire Mutex [%s]\n",
(unsigned long)this_thread_id, ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(mutex_id))); acpi_ut_get_mutex_name(mutex_id)));
status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
ACPI_WAIT_FOREVER); ACPI_WAIT_FOREVER);
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX acquired Mutex [%s]\n", "Thread %p acquired Mutex [%s]\n",
(unsigned long)this_thread_id, ACPI_CAST_PTR(void, this_thread_id),
acpi_ut_get_mutex_name(mutex_id))); acpi_ut_get_mutex_name(mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++; acpi_gbl_mutex_info[mutex_id].use_count++;
acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id; acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
} else { } else {
ACPI_EXCEPTION((AE_INFO, status, ACPI_EXCEPTION((AE_INFO, status,
"Thread %lX could not acquire Mutex [%X]", "Thread %p could not acquire Mutex [%X]",
(unsigned long)this_thread_id, mutex_id)); ACPI_CAST_PTR(void, this_thread_id), mutex_id));
} }
return (status); return (status);
...@@ -293,9 +294,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) ...@@ -293,9 +294,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
ACPI_FUNCTION_NAME(ut_release_mutex); ACPI_FUNCTION_NAME(ut_release_mutex);
this_thread_id = acpi_os_get_thread_id(); this_thread_id = acpi_os_get_thread_id();
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %p releasing Mutex [%s]\n",
"Thread %lX releasing Mutex [%s]\n", ACPI_CAST_PTR(void, this_thread_id),
(unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id))); acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) { if (mutex_id > ACPI_MAX_MUTEX) {
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */ /* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20090320 #define ACPI_CA_VERSION 0x20090521
#include "actypes.h" #include "actypes.h"
#include "actbl.h" #include "actbl.h"
...@@ -201,6 +201,8 @@ acpi_evaluate_object_typed(acpi_handle object, ...@@ -201,6 +201,8 @@ acpi_evaluate_object_typed(acpi_handle object,
acpi_status acpi_status
acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer); acpi_get_object_info(acpi_handle handle, struct acpi_buffer *return_buffer);
acpi_status acpi_install_method(u8 *buffer);
acpi_status acpi_status
acpi_get_next_object(acpi_object_type type, acpi_get_next_object(acpi_object_type type,
acpi_handle parent, acpi_handle parent,
...@@ -375,7 +377,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state); ...@@ -375,7 +377,7 @@ acpi_status acpi_leave_sleep_state_prep(u8 sleep_state);
acpi_status acpi_leave_sleep_state(u8 sleep_state); acpi_status acpi_leave_sleep_state(u8 sleep_state);
/* /*
* Debug output * Error/Warning output
*/ */
void ACPI_INTERNAL_VAR_XFACE void ACPI_INTERNAL_VAR_XFACE
acpi_error(const char *module_name, acpi_error(const char *module_name,
...@@ -394,6 +396,9 @@ void ACPI_INTERNAL_VAR_XFACE ...@@ -394,6 +396,9 @@ void ACPI_INTERNAL_VAR_XFACE
acpi_info(const char *module_name, acpi_info(const char *module_name,
u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3); u32 line_number, const char *format, ...) ACPI_PRINTF_LIKE(3);
/*
* Debug output
*/
#ifdef ACPI_DEBUG_OUTPUT #ifdef ACPI_DEBUG_OUTPUT
void ACPI_INTERNAL_VAR_XFACE void ACPI_INTERNAL_VAR_XFACE
......
...@@ -429,20 +429,12 @@ typedef unsigned long long acpi_integer; ...@@ -429,20 +429,12 @@ typedef unsigned long long acpi_integer;
/* Data manipulation */ /* Data manipulation */
#define ACPI_LOWORD(l) ((u16)(u32)(l)) #define ACPI_LOBYTE(integer) ((u8) (u16)(integer))
#define ACPI_HIWORD(l) ((u16)((((u32)(l)) >> 16) & 0xFFFF)) #define ACPI_HIBYTE(integer) ((u8) (((u16)(integer)) >> 8))
#define ACPI_LOBYTE(l) ((u8)(u16)(l)) #define ACPI_LOWORD(integer) ((u16) (u32)(integer))
#define ACPI_HIBYTE(l) ((u8)((((u16)(l)) >> 8) & 0xFF)) #define ACPI_HIWORD(integer) ((u16)(((u32)(integer)) >> 16))
#define ACPI_LODWORD(integer64) ((u32) (u64)(integer64))
/* Full 64-bit integer must be available on both 32-bit and 64-bit platforms */ #define ACPI_HIDWORD(integer64) ((u32)(((u64)(integer64)) >> 32))
struct acpi_integer_overlay {
u32 lo_dword;
u32 hi_dword;
};
#define ACPI_LODWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
#define ACPI_HIDWORD(integer) (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
#define ACPI_SET_BIT(target,bit) ((target) |= (bit)) #define ACPI_SET_BIT(target,bit) ((target) |= (bit))
#define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit)) #define ACPI_CLEAR_BIT(target,bit) ((target) &= ~(bit))
......
...@@ -62,4 +62,8 @@ ...@@ -62,4 +62,8 @@
*/ */
#define ACPI_UNUSED_VAR __attribute__ ((unused)) #define ACPI_UNUSED_VAR __attribute__ ((unused))
#ifdef _ANSI
#define inline
#endif
#endif /* __ACGCC_H__ */ #endif /* __ACGCC_H__ */
/****************************************************************************** /******************************************************************************
* *
* Name: aclinux.h - OS specific defines, etc. * Name: aclinux.h - OS specific defines, etc. for Linux
* *
*****************************************************************************/ *****************************************************************************/
/* /*
* Copyright (C) 2000 - 2008, Intel Corp. * Copyright (C) 2000 - 2009, Intel Corp.
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -44,10 +44,13 @@ ...@@ -44,10 +44,13 @@
#ifndef __ACLINUX_H__ #ifndef __ACLINUX_H__
#define __ACLINUX_H__ #define __ACLINUX_H__
/* Common (in-kernel/user-space) ACPICA configuration */
#define ACPI_USE_SYSTEM_CLIBRARY #define ACPI_USE_SYSTEM_CLIBRARY
#define ACPI_USE_DO_WHILE_0 #define ACPI_USE_DO_WHILE_0
#define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE #define ACPI_MUTEX_TYPE ACPI_BINARY_SEMAPHORE
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/string.h> #include <linux/string.h>
...@@ -63,14 +66,17 @@ ...@@ -63,14 +66,17 @@
#include <linux/spinlock_types.h> #include <linux/spinlock_types.h>
#include <asm/current.h> #include <asm/current.h>
/* Host-dependent types and defines */ /* Host-dependent types and defines for in-kernel ACPICA */
#define ACPI_MACHINE_WIDTH BITS_PER_LONG #define ACPI_MACHINE_WIDTH BITS_PER_LONG
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
#define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol);
#define strtoul simple_strtoul #define strtoul simple_strtoul
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
#define acpi_cpu_flags unsigned long
#define acpi_thread_id struct task_struct *
#else /* !__KERNEL__ */ #else /* !__KERNEL__ */
#include <stdarg.h> #include <stdarg.h>
...@@ -79,6 +85,11 @@ ...@@ -79,6 +85,11 @@
#include <ctype.h> #include <ctype.h>
#include <unistd.h> #include <unistd.h>
/* Host-dependent types and defines for user-space ACPICA */
#define ACPI_FLUSH_CPU_CACHE()
#define acpi_thread_id pthread_t
#if defined(__ia64__) || defined(__x86_64__) #if defined(__ia64__) || defined(__x86_64__)
#define ACPI_MACHINE_WIDTH 64 #define ACPI_MACHINE_WIDTH 64
#define COMPILER_DEPENDENT_INT64 long #define COMPILER_DEPENDENT_INT64 long
...@@ -94,17 +105,17 @@ ...@@ -94,17 +105,17 @@
#define __cdecl #define __cdecl
#endif #endif
#define ACPI_FLUSH_CPU_CACHE()
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
/* Linux uses GCC */ /* Linux uses GCC */
#include "acgcc.h" #include "acgcc.h"
#define acpi_cpu_flags unsigned long
#define acpi_thread_id struct task_struct *
#ifdef __KERNEL__
/*
* Overrides for in-kernel ACPICA
*/
static inline acpi_thread_id acpi_os_get_thread_id(void) static inline acpi_thread_id acpi_os_get_thread_id(void)
{ {
return current; return current;
...@@ -119,30 +130,32 @@ static inline acpi_thread_id acpi_os_get_thread_id(void) ...@@ -119,30 +130,32 @@ static inline acpi_thread_id acpi_os_get_thread_id(void)
#include <acpi/actypes.h> #include <acpi/actypes.h>
static inline void *acpi_os_allocate(acpi_size size) static inline void *acpi_os_allocate(acpi_size size)
{ {
return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
} }
static inline void *acpi_os_allocate_zeroed(acpi_size size) static inline void *acpi_os_allocate_zeroed(acpi_size size)
{ {
return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); return kzalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
} }
static inline void *acpi_os_acquire_object(acpi_cache_t * cache) static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
{ {
return kmem_cache_zalloc(cache, return kmem_cache_zalloc(cache,
irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
} }
#define ACPI_ALLOCATE(a) acpi_os_allocate(a) #define ACPI_ALLOCATE(a) acpi_os_allocate(a)
#define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a) #define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a)
#define ACPI_FREE(a) kfree(a) #define ACPI_FREE(a) kfree(a)
/* /* Used within ACPICA to show where it is safe to preempt execution */
* We need to show where it is safe to preempt execution of ACPICA
*/
#define ACPI_PREEMPTION_POINT() \ #define ACPI_PREEMPTION_POINT() \
do { \ do { \
if (!irqs_disabled()) \ if (!irqs_disabled()) \
cond_resched(); \ cond_resched(); \
} while (0) } while (0)
#endif /* __KERNEL__ */
#endif /* __ACLINUX_H__ */ #endif /* __ACLINUX_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment