Commit 4c90ece2 authored by Bob Moore's avatar Bob Moore Committed by Len Brown

ACPI: ACPICA 20060608

Converted the locking mutex used for the ACPI hardware
to a spinlock. This change should eliminate all problems
caused by attempting to acquire a semaphore at interrupt
level, and it means that all ACPICA external interfaces
that directly access the ACPI hardware can be safely
called from interrupt level.

Fixed a regression introduced in 20060526 where the ACPI
device initialization could be prematurely aborted with
an AE_NOT_FOUND if a device did not have an optional
_INI method.

Fixed an IndexField issue where a write to the Data
Register should be limited in size to the AccessSize
(width) of the IndexField itself. (BZ 433, Fiodor Suietov)

Fixed problem reports (Valery Podrezov) integrated: - Allow
store of ThermalZone objects to Debug object.
http://bugzilla.kernel.org/show_bug.cgi?id=5369
http://bugzilla.kernel.org/show_bug.cgi?id=5370

Fixed problem reports (Fiodor Suietov) integrated: -
acpi_get_table_header() doesn't handle multiple instances
correctly (BZ 364)

Removed four global mutexes that were obsolete and were
no longer being used.
Signed-off-by: default avatarBob Moore <robert.moore@intel.com>
Signed-off-by: default avatarLen Brown <len.brown@intel.com>
parent 4119532c
...@@ -507,7 +507,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, ...@@ -507,7 +507,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
* If this is the last thread executing the method, * If this is the last thread executing the method,
* we have additional cleanup to perform * we have additional cleanup to perform
*/ */
status = acpi_ut_acquire_mutex(ACPI_MTX_PARSER); status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_VOID; return_VOID;
} }
...@@ -600,7 +600,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, ...@@ -600,7 +600,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
} }
exit: exit:
(void)acpi_ut_release_mutex(ACPI_MTX_PARSER); (void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD);
return_VOID; return_VOID;
} }
......
...@@ -382,6 +382,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -382,6 +382,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
u32 status_reg; u32 status_reg;
u32 enable_reg; u32 enable_reg;
acpi_cpu_flags flags; acpi_cpu_flags flags;
acpi_cpu_flags hw_flags;
acpi_native_uint i; acpi_native_uint i;
acpi_native_uint j; acpi_native_uint j;
...@@ -393,9 +394,12 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -393,9 +394,12 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
return (int_status); return (int_status);
} }
/* Examine all GPE blocks attached to this interrupt level */ /* We need to hold the GPE lock now, hardware lock in the loop */
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Examine all GPE blocks attached to this interrupt level */
gpe_block = gpe_xrupt_list->gpe_block_list_head; gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) { while (gpe_block) {
/* /*
...@@ -409,6 +413,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -409,6 +413,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_register_info = &gpe_block->register_info[i]; gpe_register_info = &gpe_block->register_info[i];
hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
/* Read the Status Register */ /* Read the Status Register */
status = status =
...@@ -417,6 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -417,6 +423,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&gpe_register_info-> &gpe_register_info->
status_address); status_address);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
acpi_os_release_lock(acpi_gbl_hardware_lock,
hw_flags);
goto unlock_and_exit; goto unlock_and_exit;
} }
...@@ -427,6 +435,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -427,6 +435,8 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
&enable_reg, &enable_reg,
&gpe_register_info-> &gpe_register_info->
enable_address); enable_address);
acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
goto unlock_and_exit; goto unlock_and_exit;
} }
...@@ -499,7 +509,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) ...@@ -499,7 +509,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{ {
struct acpi_gpe_event_info *gpe_event_info = (void *)context; struct acpi_gpe_event_info *gpe_event_info = (void *)context;
u32 gpe_number = 0;
acpi_status status; acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info; struct acpi_gpe_event_info local_gpe_event_info;
struct acpi_evaluate_info *info; struct acpi_evaluate_info *info;
...@@ -565,10 +574,10 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) ...@@ -565,10 +574,10 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, ACPI_EXCEPTION((AE_INFO, status,
"While evaluating method [%4.4s] for GPE[%2X]", "While evaluating GPE method [%4.4s]",
acpi_ut_get_node_name acpi_ut_get_node_name
(local_gpe_event_info.dispatch. (local_gpe_event_info.dispatch.
method_node), gpe_number)); method_node)));
} }
} }
......
...@@ -785,6 +785,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, ...@@ -785,6 +785,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
{ {
acpi_status status; acpi_status status;
acpi_integer mask; acpi_integer mask;
acpi_integer width_mask;
acpi_integer merged_datum; acpi_integer merged_datum;
acpi_integer raw_datum = 0; acpi_integer raw_datum = 0;
u32 field_offset = 0; u32 field_offset = 0;
...@@ -809,8 +810,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, ...@@ -809,8 +810,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
/* Compute the number of datums (access width data items) */ /* Compute the number of datums (access width data items) */
width_mask =
ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width);
mask = mask =
ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset); width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field.
start_field_bit_offset);
datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length,
obj_desc->common_field.access_bit_width); obj_desc->common_field.access_bit_width);
...@@ -850,7 +854,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, ...@@ -850,7 +854,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
merged_datum = raw_datum >> merged_datum = raw_datum >>
(obj_desc->common_field.access_bit_width - (obj_desc->common_field.access_bit_width -
obj_desc->common_field.start_field_bit_offset); obj_desc->common_field.start_field_bit_offset);
mask = ACPI_INTEGER_MAX; mask = width_mask;
if (i == datum_count) { if (i == datum_count) {
break; break;
......
...@@ -322,8 +322,9 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state) ...@@ -322,8 +322,9 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
/* Since the bit position is one-based, subtract from 33 (65) */ /* Since the bit position is one-based, subtract from 33 (65) */
return_desc->integer.value = temp32 == 0 ? 0 : return_desc->integer.value =
(ACPI_INTEGER_BIT_SIZE + 1) - temp32; temp32 ==
0 ? 0 : (ACPI_INTEGER_BIT_SIZE + 1) - temp32;
break; break;
case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */ case AML_FROM_BCD_OP: /* from_bcd (BCDValue, Result) */
...@@ -698,6 +699,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) ...@@ -698,6 +699,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
goto cleanup; goto cleanup;
} }
/* Allocate a descriptor to hold the type. */ /* Allocate a descriptor to hold the type. */
return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER); return_desc = acpi_ut_create_internal_object(ACPI_TYPE_INTEGER);
...@@ -967,7 +969,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) ...@@ -967,7 +969,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_ut_add_reference acpi_ut_add_reference
(return_desc); (return_desc);
} }
break; break;
default: default:
...@@ -987,7 +988,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) ...@@ -987,7 +988,6 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) == if (ACPI_GET_DESCRIPTOR_TYPE(return_desc) ==
ACPI_DESC_TYPE_NAMED) { ACPI_DESC_TYPE_NAMED) {
return_desc = return_desc =
acpi_ns_get_attached_object((struct acpi_ns_get_attached_object((struct
acpi_namespace_node acpi_namespace_node
...@@ -1002,7 +1002,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state) ...@@ -1002,7 +1002,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
default: default:
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Unknown opcode in ref(%p) - %X", "Unknown opcode in reference(%p) - %X",
operand[0], operand[0],
operand[0]->reference.opcode)); operand[0]->reference.opcode));
......
...@@ -114,10 +114,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, ...@@ -114,10 +114,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
/* /*
* Several object types require no further processing: * Several object types require no further processing:
* 1) Devices rarely have an attached object, return the Node * 1) Device/Thermal objects don't have a "real" subobject, return the Node
* 2) Method locals and arguments have a pseudo-Node * 2) Method locals and arguments have a pseudo-Node
*/ */
if (entry_type == ACPI_TYPE_DEVICE || if ((entry_type == ACPI_TYPE_DEVICE) ||
(entry_type == ACPI_TYPE_THERMAL) ||
(node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) { (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
return_ACPI_STATUS(AE_OK); return_ACPI_STATUS(AE_OK);
} }
...@@ -216,7 +217,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr, ...@@ -216,7 +217,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
case ACPI_TYPE_METHOD: case ACPI_TYPE_METHOD:
case ACPI_TYPE_POWER: case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
case ACPI_TYPE_EVENT: case ACPI_TYPE_EVENT:
case ACPI_TYPE_REGION: case ACPI_TYPE_REGION:
......
...@@ -257,10 +257,24 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr, ...@@ -257,10 +257,24 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
case AML_INT_NAMEPATH_OP: /* Reference to a named object */ case AML_INT_NAMEPATH_OP: /* Reference to a named object */
/* Get the object pointed to by the namespace node */ /* Dereference the name */
if ((stack_desc->reference.node->type ==
ACPI_TYPE_DEVICE)
|| (stack_desc->reference.node->type ==
ACPI_TYPE_THERMAL)) {
/* These node types do not have 'real' subobjects */
*stack_ptr = (void *)stack_desc->reference.node;
} else {
/* Get the object pointed to by the namespace node */
*stack_ptr =
(stack_desc->reference.node)->object;
acpi_ut_add_reference(*stack_ptr);
}
*stack_ptr = (stack_desc->reference.node)->object;
acpi_ut_add_reference(*stack_ptr);
acpi_ut_remove_reference(stack_desc); acpi_ut_remove_reference(stack_desc);
break; break;
......
...@@ -89,7 +89,7 @@ acpi_status acpi_ex_enter_interpreter(void) ...@@ -89,7 +89,7 @@ acpi_status acpi_ex_enter_interpreter(void)
ACPI_FUNCTION_TRACE(ex_enter_interpreter); ACPI_FUNCTION_TRACE(ex_enter_interpreter);
status = acpi_ut_acquire_mutex(ACPI_MTX_EXECUTE); status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex")); ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
} }
...@@ -125,7 +125,7 @@ void acpi_ex_exit_interpreter(void) ...@@ -125,7 +125,7 @@ void acpi_ex_exit_interpreter(void)
ACPI_FUNCTION_TRACE(ex_exit_interpreter); ACPI_FUNCTION_TRACE(ex_exit_interpreter);
status = acpi_ut_release_mutex(ACPI_MTX_EXECUTE); status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex")); ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
} }
......
...@@ -61,10 +61,13 @@ ACPI_MODULE_NAME("hwregs") ...@@ -61,10 +61,13 @@ ACPI_MODULE_NAME("hwregs")
* DESCRIPTION: Clears all fixed and general purpose status bits * DESCRIPTION: Clears all fixed and general purpose status bits
* THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED
* *
* NOTE: TBD: Flags parameter is obsolete, to be removed
*
******************************************************************************/ ******************************************************************************/
acpi_status acpi_hw_clear_acpi_status(u32 flags) acpi_status acpi_hw_clear_acpi_status(u32 flags)
{ {
acpi_status status; acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_clear_acpi_status); ACPI_FUNCTION_TRACE(hw_clear_acpi_status);
...@@ -72,12 +75,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags) ...@@ -72,12 +75,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
ACPI_BITMASK_ALL_FIXED_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS,
(u16) acpi_gbl_FADT->xpm1a_evt_blk.address)); (u16) acpi_gbl_FADT->xpm1a_evt_blk.address));
if (flags & ACPI_MTX_LOCK) { lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK, status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
ACPI_REGISTER_PM1_STATUS, ACPI_REGISTER_PM1_STATUS,
...@@ -102,9 +100,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags) ...@@ -102,9 +100,7 @@ acpi_status acpi_hw_clear_acpi_status(u32 flags)
status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block); status = acpi_ev_walk_gpe_list(acpi_hw_clear_gpe_block);
unlock_and_exit: unlock_and_exit:
if (flags & ACPI_MTX_LOCK) { acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
}
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -264,6 +260,8 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) ...@@ -264,6 +260,8 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
* *
* DESCRIPTION: ACPI bit_register read function. * DESCRIPTION: ACPI bit_register read function.
* *
* NOTE: TBD: Flags parameter is obsolete, to be removed
*
******************************************************************************/ ******************************************************************************/
acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags) acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
...@@ -281,23 +279,12 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags) ...@@ -281,23 +279,12 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value, u32 flags)
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
if (flags & ACPI_MTX_LOCK) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* Read from the register */ /* Read from the register */
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, status = acpi_hw_register_read(ACPI_MTX_LOCK,
bit_reg_info->parent_register, bit_reg_info->parent_register,
&register_value); &register_value);
if (flags & ACPI_MTX_LOCK) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
}
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
/* Normalize the value that was read */ /* Normalize the value that was read */
...@@ -331,12 +318,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_register) ...@@ -331,12 +318,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_register)
* *
* DESCRIPTION: ACPI Bit Register write function. * DESCRIPTION: ACPI Bit Register write function.
* *
* NOTE: TBD: Flags parameter is obsolete, to be removed
*
******************************************************************************/ ******************************************************************************/
acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
{ {
u32 register_value = 0; u32 register_value = 0;
struct acpi_bit_register_info *bit_reg_info; struct acpi_bit_register_info *bit_reg_info;
acpi_status status; acpi_status status;
acpi_cpu_flags lock_flags;
ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id); ACPI_FUNCTION_TRACE_U32(acpi_set_register, register_id);
...@@ -349,12 +339,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) ...@@ -349,12 +339,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
if (flags & ACPI_MTX_LOCK) { lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
/* Always do a register read first so we can insert the new bits */ /* Always do a register read first so we can insert the new bits */
...@@ -462,9 +447,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) ...@@ -462,9 +447,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags)
unlock_and_exit: unlock_and_exit:
if (flags & ACPI_MTX_LOCK) { acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE);
}
/* Normalize the value that was read */ /* Normalize the value that was read */
...@@ -500,14 +483,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value) ...@@ -500,14 +483,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
u32 value1 = 0; u32 value1 = 0;
u32 value2 = 0; u32 value2 = 0;
acpi_status status; acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_register_read); ACPI_FUNCTION_TRACE(hw_register_read);
if (ACPI_MTX_LOCK == use_lock) { if (ACPI_MTX_LOCK == use_lock) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE); lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} }
switch (register_id) { switch (register_id) {
...@@ -585,7 +566,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value) ...@@ -585,7 +566,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
unlock_and_exit: unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) { if (ACPI_MTX_LOCK == use_lock) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE); acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
} }
if (ACPI_SUCCESS(status)) { if (ACPI_SUCCESS(status)) {
...@@ -613,14 +594,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value) ...@@ -613,14 +594,12 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value) acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
{ {
acpi_status status; acpi_status status;
acpi_cpu_flags lock_flags = 0;
ACPI_FUNCTION_TRACE(hw_register_write); ACPI_FUNCTION_TRACE(hw_register_write);
if (ACPI_MTX_LOCK == use_lock) { if (ACPI_MTX_LOCK == use_lock) {
status = acpi_ut_acquire_mutex(ACPI_MTX_HARDWARE); lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} }
switch (register_id) { switch (register_id) {
...@@ -710,7 +689,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value) ...@@ -710,7 +689,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
unlock_and_exit: unlock_and_exit:
if (ACPI_MTX_LOCK == use_lock) { if (ACPI_MTX_LOCK == use_lock) {
(void)acpi_ut_release_mutex(ACPI_MTX_HARDWARE); acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
} }
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
......
...@@ -557,10 +557,13 @@ acpi_ns_init_one_device(acpi_handle obj_handle, ...@@ -557,10 +557,13 @@ acpi_ns_init_one_device(acpi_handle obj_handle,
ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution", ACPI_EXCEPTION((AE_INFO, status, "during %s._INI execution",
scope_name)); scope_name));
ACPI_FREE(scope_name); ACPI_FREE(scope_name);
status = AE_OK;
} }
#endif #endif
/* Ignore errors from above */
status = AE_OK;
/* /*
* The _INI method has been run if present; call the Global Initialization * The _INI method has been run if present; call the Global Initialization
* Handler for this device. * Handler for this device.
......
...@@ -795,9 +795,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) ...@@ -795,9 +795,6 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n", ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout)); handle, units, timeout));
if (in_atomic())
timeout = 0;
switch (timeout) { switch (timeout) {
/* /*
* No Wait: * No Wait:
......
...@@ -160,12 +160,8 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header) ...@@ -160,12 +160,8 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
ACPI_MOVE_32_TO_32(&signature, table_header->signature); ACPI_MOVE_32_TO_32(&signature, table_header->signature);
if (!acpi_ut_valid_acpi_name(signature)) { if (!acpi_ut_valid_acpi_name(signature)) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO, "Invalid table signature 0x%8.8X",
"Table signature at %p [%p] has invalid characters", signature));
table_header, &signature));
ACPI_WARNING((AE_INFO, "Invalid table signature found: [%4.4s]",
ACPI_CAST_PTR(char, &signature)));
ACPI_DUMP_BUFFER(table_header, ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header)); sizeof(struct acpi_table_header));
...@@ -176,12 +172,9 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header) ...@@ -176,12 +172,9 @@ acpi_tb_validate_table_header(struct acpi_table_header *table_header)
if (table_header->length < sizeof(struct acpi_table_header)) { if (table_header->length < sizeof(struct acpi_table_header)) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
"Invalid length in table header %p name %4.4s", "Invalid length 0x%X in table with signature %4.4s",
table_header, (char *)&signature)); (u32) table_header->length,
ACPI_CAST_PTR(char, &signature)));
ACPI_WARNING((AE_INFO,
"Invalid table header length (0x%X) found",
(u32) table_header->length));
ACPI_DUMP_BUFFER(table_header, ACPI_DUMP_BUFFER(table_header,
sizeof(struct acpi_table_header)); sizeof(struct acpi_table_header));
......
...@@ -691,7 +691,7 @@ char *acpi_ut_get_descriptor_name(void *object) ...@@ -691,7 +691,7 @@ char *acpi_ut_get_descriptor_name(void *object)
char *acpi_ut_get_mutex_name(u32 mutex_id) char *acpi_ut_get_mutex_name(u32 mutex_id)
{ {
if (mutex_id > MAX_MUTEX) { if (mutex_id > ACPI_MAX_MUTEX) {
return ("Invalid Mutex ID"); return ("Invalid Mutex ID");
} }
...@@ -760,7 +760,7 @@ void acpi_ut_init_globals(void) ...@@ -760,7 +760,7 @@ void acpi_ut_init_globals(void)
/* Mutex locked flags */ /* Mutex locked flags */
for (i = 0; i < NUM_MUTEX; i++) { for (i = 0; i < ACPI_NUM_MUTEX; i++) {
acpi_gbl_mutex_info[i].mutex = NULL; acpi_gbl_mutex_info[i].mutex = NULL;
acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED; acpi_gbl_mutex_info[i].thread_id = ACPI_MUTEX_NOT_ACQUIRED;
acpi_gbl_mutex_info[i].use_count = 0; acpi_gbl_mutex_info[i].use_count = 0;
......
...@@ -73,14 +73,21 @@ acpi_status acpi_ut_mutex_initialize(void) ...@@ -73,14 +73,21 @@ acpi_status acpi_ut_mutex_initialize(void)
/* /*
* Create each of the predefined mutex objects * Create each of the predefined mutex objects
*/ */
for (i = 0; i < NUM_MUTEX; i++) { for (i = 0; i < ACPI_NUM_MUTEX; i++) {
status = acpi_ut_create_mutex(i); status = acpi_ut_create_mutex(i);
if (ACPI_FAILURE(status)) { if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
} }
/* Create the spinlocks for use at interrupt level */
status = acpi_os_create_lock(&acpi_gbl_gpe_lock); status = acpi_os_create_lock(&acpi_gbl_gpe_lock);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
status = acpi_os_create_lock(&acpi_gbl_hardware_lock);
return_ACPI_STATUS(status); return_ACPI_STATUS(status);
} }
...@@ -105,11 +112,14 @@ void acpi_ut_mutex_terminate(void) ...@@ -105,11 +112,14 @@ void acpi_ut_mutex_terminate(void)
/* /*
* Delete each predefined mutex object * Delete each predefined mutex object
*/ */
for (i = 0; i < NUM_MUTEX; i++) { for (i = 0; i < ACPI_NUM_MUTEX; i++) {
(void)acpi_ut_delete_mutex(i); (void)acpi_ut_delete_mutex(i);
} }
/* Delete the spinlocks */
acpi_os_delete_lock(acpi_gbl_gpe_lock); acpi_os_delete_lock(acpi_gbl_gpe_lock);
acpi_os_delete_lock(acpi_gbl_hardware_lock);
return_VOID; return_VOID;
} }
...@@ -131,7 +141,7 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id) ...@@ -131,7 +141,7 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id)
ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id); ACPI_FUNCTION_TRACE_U32(ut_create_mutex, mutex_id);
if (mutex_id > MAX_MUTEX) { if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
...@@ -165,7 +175,7 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) ...@@ -165,7 +175,7 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id)
ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id);
if (mutex_id > MAX_MUTEX) { if (mutex_id > ACPI_MAX_MUTEX) {
return_ACPI_STATUS(AE_BAD_PARAMETER); return_ACPI_STATUS(AE_BAD_PARAMETER);
} }
...@@ -196,7 +206,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) ...@@ -196,7 +206,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
ACPI_FUNCTION_NAME(ut_acquire_mutex); ACPI_FUNCTION_NAME(ut_acquire_mutex);
if (mutex_id > MAX_MUTEX) { if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER); return (AE_BAD_PARAMETER);
} }
...@@ -213,7 +223,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) ...@@ -213,7 +223,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
* the mutex ordering rule. This indicates a coding error somewhere in * the mutex ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code. * the ACPI subsystem code.
*/ */
for (i = mutex_id; i < MAX_MUTEX; i++) { for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) { if (i == mutex_id) {
ACPI_ERROR((AE_INFO, ACPI_ERROR((AE_INFO,
...@@ -284,7 +294,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) ...@@ -284,7 +294,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
"Thread %X releasing Mutex [%s]\n", this_thread_id, "Thread %X releasing Mutex [%s]\n", this_thread_id,
acpi_ut_get_mutex_name(mutex_id))); acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > MAX_MUTEX) { if (mutex_id > ACPI_MAX_MUTEX) {
return (AE_BAD_PARAMETER); return (AE_BAD_PARAMETER);
} }
...@@ -309,7 +319,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) ...@@ -309,7 +319,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* ordering rule. This indicates a coding error somewhere in * ordering rule. This indicates a coding error somewhere in
* the ACPI subsystem code. * the ACPI subsystem code.
*/ */
for (i = mutex_id; i < MAX_MUTEX; i++) { for (i = mutex_id; i < ACPI_MAX_MUTEX; i++) {
if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) { if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
if (i == mutex_id) { if (i == mutex_id) {
continue; continue;
......
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */ /* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20060526 #define ACPI_CA_VERSION 0x20060608
/* /*
* OS name, used for the _OS object. The _OS object is essentially obsolete, * OS name, used for the _OS object. The _OS object is essentially obsolete,
...@@ -171,15 +171,8 @@ ...@@ -171,15 +171,8 @@
#define ACPI_MAX_ADDRESS_SPACE 255 #define ACPI_MAX_ADDRESS_SPACE 255
/* Array sizes. Used for range checking also */ /* Array sizes. Used for range checking also */
#define ACPI_MAX_MATCH_OPCODE 5
#if 0 #define ACPI_MAX_MATCH_OPCODE 5
#define ACPI_NUM_ACCESS_TYPES 6
#define ACPI_NUM_UPDATE_RULES 3
#define ACPI_NUM_LOCK_RULES 2
#define ACPI_NUM_FIELD_NAMES 2
#define ACPI_NUM_OPCODES 256
#endif
/* RSDP checksums */ /* RSDP checksums */
......
...@@ -186,7 +186,7 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1]; ...@@ -186,7 +186,7 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1];
* actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs.
* (The table maps local handles to the real OS handles) * (The table maps local handles to the real OS handles)
*/ */
ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[NUM_MUTEX]; ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
/***************************************************************************** /*****************************************************************************
* *
...@@ -314,7 +314,11 @@ ACPI_EXTERN struct acpi_fixed_event_handler ...@@ -314,7 +314,11 @@ ACPI_EXTERN struct acpi_fixed_event_handler
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
/* Spinlocks */
ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock; ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock;
ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock;
/***************************************************************************** /*****************************************************************************
* *
......
...@@ -72,52 +72,55 @@ union acpi_parse_object; ...@@ -72,52 +72,55 @@ union acpi_parse_object;
* Predefined handles for the mutex objects used within the subsystem * Predefined handles for the mutex objects used within the subsystem
* All mutex objects are automatically created by acpi_ut_mutex_initialize. * All mutex objects are automatically created by acpi_ut_mutex_initialize.
* *
* The acquire/release ordering protocol is implied via this list. Mutexes * The acquire/release ordering protocol is implied via this list. Mutexes
* with a lower value must be acquired before mutexes with a higher value. * with a lower value must be acquired before mutexes with a higher value.
* *
* NOTE: any changes here must be reflected in the acpi_gbl_mutex_names table also! * NOTE: any changes here must be reflected in the acpi_gbl_mutex_names
* table below also!
*/ */
#define ACPI_MTX_EXECUTE 0 #define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */
#define ACPI_MTX_INTERPRETER 1 #define ACPI_MTX_CONTROL_METHOD 1 /* Control method termination [TBD: may no longer be necessary] */
#define ACPI_MTX_PARSER 2 #define ACPI_MTX_TABLES 2 /* Data for ACPI tables */
#define ACPI_MTX_DISPATCHER 3 #define ACPI_MTX_NAMESPACE 3 /* ACPI Namespace */
#define ACPI_MTX_TABLES 4 #define ACPI_MTX_EVENTS 4 /* Data for ACPI events */
#define ACPI_MTX_OP_REGIONS 5 #define ACPI_MTX_CACHES 5 /* Internal caches, general purposes */
#define ACPI_MTX_NAMESPACE 6 #define ACPI_MTX_MEMORY 6 /* Debug memory tracking lists */
#define ACPI_MTX_EVENTS 7 #define ACPI_MTX_DEBUG_CMD_COMPLETE 7 /* AML debugger */
#define ACPI_MTX_HARDWARE 8 #define ACPI_MTX_DEBUG_CMD_READY 8 /* AML debugger */
#define ACPI_MTX_CACHES 9
#define ACPI_MTX_MEMORY 10 #define ACPI_MAX_MUTEX 8
#define ACPI_MTX_DEBUG_CMD_COMPLETE 11 #define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
#define ACPI_MTX_DEBUG_CMD_READY 12
#define MAX_MUTEX 12
#define NUM_MUTEX MAX_MUTEX+1
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
#ifdef DEFINE_ACPI_GLOBALS #ifdef DEFINE_ACPI_GLOBALS
/* Names for the mutexes used in the subsystem */ /* Debug names for the mutexes above */
static char *acpi_gbl_mutex_names[] = { static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
"ACPI_MTX_Execute",
"ACPI_MTX_Interpreter", "ACPI_MTX_Interpreter",
"ACPI_MTX_Parser", "ACPI_MTX_Method",
"ACPI_MTX_Dispatcher",
"ACPI_MTX_Tables", "ACPI_MTX_Tables",
"ACPI_MTX_OpRegions",
"ACPI_MTX_Namespace", "ACPI_MTX_Namespace",
"ACPI_MTX_Events", "ACPI_MTX_Events",
"ACPI_MTX_Hardware",
"ACPI_MTX_Caches", "ACPI_MTX_Caches",
"ACPI_MTX_Memory", "ACPI_MTX_Memory",
"ACPI_MTX_DebugCmdComplete", "ACPI_MTX_DebugCmdComplete",
"ACPI_MTX_DebugCmdReady", "ACPI_MTX_DebugCmdReady"
}; };
#endif #endif
#endif #endif
/*
* Predefined handles for spinlocks used within the subsystem.
* These spinlocks are created by acpi_ut_mutex_initialize
*/
#define ACPI_LOCK_GPES 0
#define ACPI_LOCK_HARDWARE 1
#define ACPI_MAX_LOCK 1
#define ACPI_NUM_LOCK ACPI_MAX_LOCK+1
/* Owner IDs are used to track namespace nodes for selective deletion */ /* Owner IDs are used to track namespace nodes for selective deletion */
typedef u8 acpi_owner_id; typedef u8 acpi_owner_id;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment