Commit b526ca43 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6:
  acpi,msi-laptop: Fall back to EC polling mode for MSI laptop specific EC commands
  sony-laptop: rename SONY_LAPTOP_OLD to a more meaningful SONYPI_COMPAT
  asus-laptop: version bump and lindent
  asus-laptop: fix light sens init
  asus-laptop: add GPS support
  asus-laptop: notify ALL events
  ACPICA: Lindent
  ACPI: created a dedicated workqueue for notify() execution
  Revert "ACPICA: fix AML mutex re-entrancy"
  Revert "Execute AML Notify() requests on stack."
  Revert "ACPICA: revert "acpi_serialize" changes"
  ACPI: delete un-reliable concept of cooling mode
  ACPI: thermal trip points are read-only
parents 9b6a5174 f685648e
......@@ -231,8 +231,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
* Obtain the method mutex if necessary. Do not acquire mutex for a
* recursive call.
*/
if (acpi_os_get_thread_id() !=
obj_desc->method.mutex->mutex.owner_thread_id) {
if (!walk_state ||
!obj_desc->method.mutex->mutex.owner_thread ||
(walk_state->thread !=
obj_desc->method.mutex->mutex.owner_thread)) {
/*
* Acquire the method mutex. This releases the interpreter if we
* block (and reacquires it before it returns)
......@@ -246,14 +248,14 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
}
/* Update the mutex and walk info and save the original sync_level */
obj_desc->method.mutex->mutex.owner_thread_id =
acpi_os_get_thread_id();
if (walk_state) {
obj_desc->method.mutex->mutex.
original_sync_level =
walk_state->thread->current_sync_level;
obj_desc->method.mutex->mutex.owner_thread =
walk_state->thread;
walk_state->thread->current_sync_level =
obj_desc->method.sync_level;
} else {
......@@ -567,7 +569,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
acpi_os_release_mutex(method_desc->method.mutex->mutex.
os_mutex);
method_desc->method.mutex->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
method_desc->method.mutex->mutex.owner_thread = NULL;
}
}
......
......@@ -866,8 +866,7 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
((op->common.parent->common.aml_opcode != AML_PACKAGE_OP) &&
(op->common.parent->common.aml_opcode !=
AML_VAR_PACKAGE_OP)
&& (op->common.parent->common.aml_opcode !=
AML_NAME_OP))) {
&& (op->common.parent->common.aml_opcode != AML_NAME_OP))) {
walk_state->result_obj = obj_desc;
}
}
......
......@@ -556,10 +556,9 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
* indicate this to the interpreter, set the
* object to the root
*/
obj_desc =
ACPI_CAST_PTR(union
acpi_operand_object,
acpi_gbl_root_node);
obj_desc = ACPI_CAST_PTR(union
acpi_operand_object,
acpi_gbl_root_node);
status = AE_OK;
} else {
/*
......
......@@ -630,12 +630,9 @@ struct acpi_walk_state *acpi_ds_pop_walk_state(struct acpi_thread_state *thread)
*
******************************************************************************/
struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
union acpi_parse_object
*origin,
union acpi_operand_object
*method_desc,
struct acpi_thread_state
struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
*origin, union acpi_operand_object
*method_desc, struct acpi_thread_state
*thread)
{
struct acpi_walk_state *walk_state;
......
......@@ -147,9 +147,10 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event,
return 0;
}
static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count)
static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event,
unsigned count, int force_poll)
{
if (acpi_ec_mode == EC_POLL) {
if (unlikely(force_poll) || acpi_ec_mode == EC_POLL) {
unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
while (time_before(jiffies, delay)) {
if (acpi_ec_check_status(ec, event, 0))
......@@ -173,14 +174,15 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count)
static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
const u8 * wdata, unsigned wdata_len,
u8 * rdata, unsigned rdata_len)
u8 * rdata, unsigned rdata_len,
int force_poll)
{
int result = 0;
unsigned count = atomic_read(&ec->event_count);
acpi_ec_write_cmd(ec, command);
for (; wdata_len > 0; --wdata_len) {
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
if (result) {
printk(KERN_ERR PREFIX
"write_cmd timeout, command = %d\n", command);
......@@ -191,7 +193,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
}
if (!rdata_len) {
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count, force_poll);
if (result) {
printk(KERN_ERR PREFIX
"finish-write timeout, command = %d\n", command);
......@@ -202,7 +204,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
}
for (; rdata_len > 0; --rdata_len) {
result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count);
result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count, force_poll);
if (result) {
printk(KERN_ERR PREFIX "read timeout, command = %d\n",
command);
......@@ -217,7 +219,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
const u8 * wdata, unsigned wdata_len,
u8 * rdata, unsigned rdata_len)
u8 * rdata, unsigned rdata_len,
int force_poll)
{
int status;
u32 glk;
......@@ -240,7 +243,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
/* Make sure GPE is enabled before doing transaction */
acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0, 0);
if (status) {
printk(KERN_DEBUG PREFIX
"input buffer is not empty, aborting transaction\n");
......@@ -249,7 +252,8 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
status = acpi_ec_transaction_unlocked(ec, command,
wdata, wdata_len,
rdata, rdata_len);
rdata, rdata_len,
force_poll);
end:
......@@ -267,12 +271,12 @@ static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
int acpi_ec_burst_enable(struct acpi_ec *ec)
{
u8 d;
return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1);
return acpi_ec_transaction(ec, ACPI_EC_BURST_ENABLE, NULL, 0, &d, 1, 0);
}
int acpi_ec_burst_disable(struct acpi_ec *ec)
{
return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0);
return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
......@@ -281,7 +285,7 @@ static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
u8 d;
result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_READ,
&address, 1, &d, 1);
&address, 1, &d, 1, 0);
*data = d;
return result;
}
......@@ -290,7 +294,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data)
{
u8 wdata[2] = { address, data };
return acpi_ec_transaction(ec, ACPI_EC_COMMAND_WRITE,
wdata, 2, NULL, 0);
wdata, 2, NULL, 0, 0);
}
/*
......@@ -349,13 +353,15 @@ EXPORT_SYMBOL(ec_write);
int ec_transaction(u8 command,
const u8 * wdata, unsigned wdata_len,
u8 * rdata, unsigned rdata_len)
u8 * rdata, unsigned rdata_len,
int force_poll)
{
if (!first_ec)
return -ENODEV;
return acpi_ec_transaction(first_ec, command, wdata,
wdata_len, rdata, rdata_len);
wdata_len, rdata, rdata_len,
force_poll);
}
EXPORT_SYMBOL(ec_transaction);
......@@ -374,7 +380,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
* bit to be cleared (and thus clearing the interrupt source).
*/
result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1);
result = acpi_ec_transaction(ec, ACPI_EC_COMMAND_QUERY, NULL, 0, &d, 1, 0);
if (result)
return result;
......@@ -410,6 +416,7 @@ static u32 acpi_ec_gpe_handler(void *data)
acpi_status status = AE_OK;
u8 value;
struct acpi_ec *ec = data;
atomic_inc(&ec->event_count);
if (acpi_ec_mode == EC_INTR) {
......
......@@ -341,9 +341,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
/* A Non-NULL gpe_device means this is a GPE Block Device */
obj_desc =
acpi_ns_get_attached_object((struct acpi_namespace_node *)
gpe_device);
obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
gpe_device);
if (!obj_desc || !obj_desc->device.gpe_block) {
return (NULL);
}
......
......@@ -1033,8 +1033,7 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD)
&& (gpe_event_info->
flags & ACPI_GPE_TYPE_RUNTIME)) {
&& (gpe_event_info->flags & ACPI_GPE_TYPE_RUNTIME)) {
gpe_enabled_count++;
}
......
......@@ -196,15 +196,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
notify_info->notify.value = (u16) notify_value;
notify_info->notify.handler_obj = handler_obj;
acpi_ex_exit_interpreter();
acpi_ev_notify_dispatch(notify_info);
status = acpi_ex_enter_interpreter();
status =
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch,
notify_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
acpi_ut_delete_generic_state(notify_info);
}
}
if (!handler_obj) {
......@@ -323,8 +320,9 @@ static u32 acpi_ev_global_lock_handler(void *context)
acpi_gbl_global_lock_acquired = TRUE;
/* Send a unit to the semaphore */
if (ACPI_FAILURE(acpi_os_signal_semaphore(
acpi_gbl_global_lock_semaphore, 1))) {
if (ACPI_FAILURE
(acpi_os_signal_semaphore
(acpi_gbl_global_lock_semaphore, 1))) {
ACPI_ERROR((AE_INFO,
"Could not signal Global Lock semaphore"));
}
......@@ -450,7 +448,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
}
if (ACPI_FAILURE(status)) {
status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, timeout);
status =
acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex,
timeout);
}
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
......
......@@ -291,7 +291,6 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
u32 bit_width, acpi_integer * value)
{
acpi_status status;
acpi_status status2;
acpi_adr_space_handler handler;
acpi_adr_space_setup region_setup;
union acpi_operand_object *handler_desc;
......@@ -345,7 +344,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* setup will potentially execute control methods
* (e.g., _REG method for this region)
*/
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
handler_desc->address_space.context,
......@@ -353,10 +352,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
/* Re-enter the interpreter */
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
acpi_ex_reacquire_interpreter();
/* Check for failure of the Region Setup */
......@@ -409,7 +405,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* exit the interpreter because the handler *might* block -- we don't
* know what it will do, so we can't hold the lock on the intepreter.
*/
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
}
/* Call the handler */
......@@ -430,10 +426,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
* We just returned from a non-default handler, we must re-enter the
* interpreter
*/
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
return_ACPI_STATUS(status2);
}
acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
......
......@@ -228,7 +228,8 @@ acpi_ev_pci_config_region_setup(acpi_handle handle,
/* Install a handler for this PCI root bridge */
status = acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
status =
acpi_install_address_space_handler((acpi_handle) pci_root_node, ACPI_ADR_SPACE_PCI_CONFIG, ACPI_DEFAULT_HANDLER, NULL, NULL);
if (ACPI_FAILURE(status)) {
if (status == AE_SAME_HANDLER) {
/*
......
......@@ -91,7 +91,6 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler)
ACPI_EXPORT_SYMBOL(acpi_install_exception_handler)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_install_fixed_event_handler
......@@ -768,11 +767,9 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
return (AE_BAD_PARAMETER);
}
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return (status);
}
/* Must lock interpreter to prevent race conditions */
acpi_ex_enter_interpreter();
status = acpi_ev_acquire_global_lock(timeout);
acpi_ex_exit_interpreter();
......
......@@ -472,7 +472,6 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
}
ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
......@@ -568,7 +567,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
ACPI_EXPORT_SYMBOL(acpi_get_gpe_status)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_block
......
......@@ -512,9 +512,8 @@ acpi_ex_convert_to_string(union acpi_operand_object * obj_desc,
* Create a new string object and string buffer
* (-1 because of extra separator included in string_length from above)
*/
return_desc =
acpi_ut_create_string_object((acpi_size)
(string_length - 1));
return_desc = acpi_ut_create_string_object((acpi_size)
(string_length - 1));
if (!return_desc) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
......
......@@ -50,7 +50,6 @@
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("excreate")
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
......@@ -583,10 +582,7 @@ acpi_ex_create_method(u8 * aml_start,
* Get the sync_level. If method is serialized, a mutex will be
* created for this method when it is parsed.
*/
if (acpi_gbl_all_methods_serialized) {
obj_desc->method.sync_level = 0;
obj_desc->method.method_flags |= AML_METHOD_SERIALIZED;
} else if (method_flags & AML_METHOD_SERIALIZED) {
if (method_flags & AML_METHOD_SERIALIZED) {
/*
* ACPI 1.0: sync_level = 0
* ACPI 2.0: sync_level = sync_level in method declaration
......
......@@ -134,7 +134,7 @@ static struct acpi_exdump_info acpi_ex_dump_method[8] = {
static struct acpi_exdump_info acpi_ex_dump_mutex[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_mutex), NULL},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(mutex.sync_level), "Sync Level"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread_id), "Owner Thread"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"},
{ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth),
"Acquire Depth"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"}
......@@ -451,9 +451,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
ACPI_FUNCTION_NAME(ex_dump_operand)
if (!
((ACPI_LV_EXEC & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
if (!((ACPI_LV_EXEC & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return;
}
......@@ -844,9 +843,8 @@ void acpi_ex_dump_namespace_node(struct acpi_namespace_node *node, u32 flags)
ACPI_FUNCTION_ENTRY();
if (!flags) {
if (!
((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return;
}
}
......@@ -1011,9 +1009,8 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
}
if (!flags) {
if (!
((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
if (!((ACPI_LV_OBJECTS & acpi_dbg_level)
&& (_COMPONENT & acpi_dbg_layer))) {
return_VOID;
}
}
......
......@@ -66,9 +66,10 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
*
******************************************************************************/
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
struct acpi_thread_state *thread)
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
{
struct acpi_thread_state *thread = obj_desc->mutex.owner_thread;
if (!thread) {
return;
}
......@@ -173,13 +174,16 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Support for multiple acquires by the owning thread */
if (obj_desc->mutex.owner_thread_id == acpi_os_get_thread_id()) {
/*
* The mutex is already owned by this thread, just increment the
* acquisition depth
*/
obj_desc->mutex.acquisition_depth++;
return_ACPI_STATUS(AE_OK);
if (obj_desc->mutex.owner_thread) {
if (obj_desc->mutex.owner_thread->thread_id ==
walk_state->thread->thread_id) {
/*
* The mutex is already owned by this thread, just increment the
* acquisition depth
*/
obj_desc->mutex.acquisition_depth++;
return_ACPI_STATUS(AE_OK);
}
}
/* Acquire the mutex, wait if necessary. Special case for Global Lock */
......@@ -202,7 +206,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
/* Have the mutex: update mutex and walk info and save the sync_level */
obj_desc->mutex.owner_thread_id = acpi_os_get_thread_id();
obj_desc->mutex.owner_thread = walk_state->thread;
obj_desc->mutex.acquisition_depth = 1;
obj_desc->mutex.original_sync_level =
walk_state->thread->current_sync_level;
......@@ -242,7 +246,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* The mutex must have been previously acquired in order to release it */
if (!obj_desc->mutex.owner_thread_id) {
if (!obj_desc->mutex.owner_thread) {
ACPI_ERROR((AE_INFO,
"Cannot release Mutex [%4.4s], not acquired",
acpi_ut_get_node_name(obj_desc->mutex.node)));
......@@ -262,14 +266,15 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
* The Mutex is owned, but this thread must be the owner.
* Special case for Global Lock, any thread can release
*/
if ((obj_desc->mutex.owner_thread_id !=
if ((obj_desc->mutex.owner_thread->thread_id !=
walk_state->thread->thread_id)
&& (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
ACPI_ERROR((AE_INFO,
"Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
(unsigned long)walk_state->thread->thread_id,
acpi_ut_get_node_name(obj_desc->mutex.node),
(unsigned long)obj_desc->mutex.owner_thread_id));
(unsigned long)obj_desc->mutex.owner_thread->
thread_id));
return_ACPI_STATUS(AE_AML_NOT_OWNER);
}
......@@ -296,7 +301,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Unlink the mutex from the owner's list */
acpi_ex_unlink_mutex(obj_desc, walk_state->thread);
acpi_ex_unlink_mutex(obj_desc);
/* Release the mutex, special case for Global Lock */
......@@ -308,7 +313,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
/* Update the mutex and restore sync_level */
obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
obj_desc->mutex.owner_thread = NULL;
walk_state->thread->current_sync_level =
obj_desc->mutex.original_sync_level;
......@@ -363,7 +368,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
/* Mark mutex unowned */
obj_desc->mutex.owner_thread_id = ACPI_MUTEX_NOT_ACQUIRED;
obj_desc->mutex.owner_thread = NULL;
/* Update Thread sync_level (Last mutex is the important one) */
......
......@@ -177,8 +177,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Bytes from stream:\n"));
for (index = 0;
(index < ACPI_NAME_SIZE)
for (index = 0; (index < ACPI_NAME_SIZE)
&& (acpi_ut_valid_acpi_char(*aml_address, 0)); index++) {
char_buf[index] = *aml_address++;
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "%c\n", char_buf[index]));
......
......@@ -242,7 +242,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
obj_desc->common_field.bit_length,
0xFFFFFFFF
/* Temp until we pass region_length as parameter */
);
);
bit_length = byte_alignment * 8;
#endif
......
......@@ -354,8 +354,7 @@ acpi_ex_resolve_operands(u16 opcode,
if ((opcode == AML_STORE_OP) &&
(ACPI_GET_OBJECT_TYPE(*stack_ptr) ==
ACPI_TYPE_LOCAL_REFERENCE)
&& ((*stack_ptr)->reference.opcode ==
AML_INDEX_OP)) {
&& ((*stack_ptr)->reference.opcode == AML_INDEX_OP)) {
goto next_operand;
}
break;
......
......@@ -66,7 +66,6 @@ ACPI_MODULE_NAME("exsystem")
acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
{
acpi_status status;
acpi_status status2;
ACPI_FUNCTION_TRACE(ex_system_wait_semaphore);
......@@ -79,7 +78,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* We must wait, so unlock the interpreter */
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
status = acpi_os_wait_semaphore(semaphore, 1, timeout);
......@@ -89,13 +88,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
/* Reacquire the interpreter */
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
/* Report fatal error, could not acquire interpreter */
return_ACPI_STATUS(status2);
}
acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
......@@ -119,7 +112,6 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout)
acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
{
acpi_status status;
acpi_status status2;
ACPI_FUNCTION_TRACE(ex_system_wait_mutex);
......@@ -132,7 +124,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* We must wait, so unlock the interpreter */
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
status = acpi_os_acquire_mutex(mutex, timeout);
......@@ -142,13 +134,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout)
/* Reacquire the interpreter */
status2 = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status2)) {
/* Report fatal error, could not acquire interpreter */
return_ACPI_STATUS(status2);
}
acpi_ex_reacquire_interpreter();
}
return_ACPI_STATUS(status);
......@@ -209,20 +195,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
acpi_status acpi_ex_system_do_suspend(acpi_integer how_long)
{
acpi_status status;
ACPI_FUNCTION_ENTRY();
/* Since this thread will sleep, we must release the interpreter */
acpi_ex_exit_interpreter();
acpi_ex_relinquish_interpreter();
acpi_os_sleep(how_long);
/* And now we must get the interpreter again */
status = acpi_ex_enter_interpreter();
return (status);
acpi_ex_reacquire_interpreter();
return (AE_OK);
}
/*******************************************************************************
......
......@@ -76,14 +76,15 @@ static u32 acpi_ex_digits_needed(acpi_integer value, u32 base);
*
* PARAMETERS: None
*
* RETURN: Status
* RETURN: None
*
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
* the interpreter region is a fatal system error
* DESCRIPTION: Enter the interpreter execution region. Failure to enter
* the interpreter region is a fatal system error. Used in
* conjunction with exit_interpreter.
*
******************************************************************************/
acpi_status acpi_ex_enter_interpreter(void)
void acpi_ex_enter_interpreter(void)
{
acpi_status status;
......@@ -91,31 +92,55 @@ acpi_status acpi_ex_enter_interpreter(void)
status = acpi_ut_acquire_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not acquire interpreter mutex"));
ACPI_ERROR((AE_INFO,
"Could not acquire AML Interpreter mutex"));
}
return_ACPI_STATUS(status);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_exit_interpreter
* FUNCTION: acpi_ex_reacquire_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Exit the interpreter execution region
* DESCRIPTION: Reacquire the interpreter execution region from within the
* interpreter code. Failure to enter the interpreter region is a
* fatal system error. Used in conjuction with
* relinquish_interpreter
*
******************************************************************************/
void acpi_ex_reacquire_interpreter(void)
{
ACPI_FUNCTION_TRACE(ex_reacquire_interpreter);
/*
* If the global serialized flag is set, do not release the interpreter,
* since it was not actually released by acpi_ex_relinquish_interpreter.
* This forces the interpreter to be single threaded.
*/
if (!acpi_gbl_all_methods_serialized) {
acpi_ex_enter_interpreter();
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_exit_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* Cases where the interpreter is unlocked:
* 1) Completion of the execution of a control method
* 2) Method blocked on a Sleep() AML opcode
* 3) Method blocked on an Acquire() AML opcode
* 4) Method blocked on a Wait() AML opcode
* 5) Method blocked to acquire the global lock
* 6) Method blocked to execute a serialized control method that is
* already executing
* 7) About to invoke a user-installed opregion handler
* DESCRIPTION: Exit the interpreter execution region. This is the top level
* routine used to exit the interpreter when all processing has
* been completed.
*
******************************************************************************/
......@@ -127,7 +152,46 @@ void acpi_ex_exit_interpreter(void)
status = acpi_ut_release_mutex(ACPI_MTX_INTERPRETER);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Could not release interpreter mutex"));
ACPI_ERROR((AE_INFO,
"Could not release AML Interpreter mutex"));
}
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_relinquish_interpreter
*
* PARAMETERS: None
*
* RETURN: None
*
* DESCRIPTION: Exit the interpreter execution region, from within the
* interpreter - before attempting an operation that will possibly
* block the running thread.
*
* Cases where the interpreter is unlocked internally
* 1) Method to be blocked on a Sleep() AML opcode
* 2) Method to be blocked on an Acquire() AML opcode
* 3) Method to be blocked on a Wait() AML opcode
* 4) Method to be blocked to acquire the global lock
* 5) Method to be blocked waiting to execute a serialized control method
* that is currently executing
* 6) About to invoke a user-installed opregion handler
*
******************************************************************************/
void acpi_ex_relinquish_interpreter(void)
{
ACPI_FUNCTION_TRACE(ex_relinquish_interpreter);
/*
* If the global serialized flag is set, do not release the interpreter.
* This forces the interpreter to be single threaded.
*/
if (!acpi_gbl_all_methods_serialized) {
acpi_ex_exit_interpreter();
}
return_VOID;
......@@ -141,8 +205,8 @@ void acpi_ex_exit_interpreter(void)
*
* RETURN: none
*
* DESCRIPTION: Truncate a number to 32-bits if the currently executing method
* belongs to a 32-bit ACPI table.
* DESCRIPTION: Truncate an ACPI Integer to 32 bits if the execution mode is
* 32-bit, as determined by the revision of the DSDT.
*
******************************************************************************/
......
......@@ -152,7 +152,6 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
ACPI_EXPORT_SYMBOL(acpi_get_firmware_waking_vector)
#endif
/*******************************************************************************
*
* FUNCTION: acpi_enter_sleep_state_prep
......
......@@ -75,7 +75,7 @@ ACPI_MODULE_NAME("nseval")
* MUTEX: Locks interpreter
*
******************************************************************************/
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
acpi_status acpi_ns_evaluate(struct acpi_evaluate_info * info)
{
acpi_status status;
......@@ -154,11 +154,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
* Execute the method via the interpreter. The interpreter is locked
* here before calling into the AML parser
*/
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_ex_enter_interpreter();
status = acpi_ps_execute_method(info);
acpi_ex_exit_interpreter();
} else {
......@@ -182,10 +178,7 @@ acpi_status acpi_ns_evaluate(struct acpi_evaluate_info *info)
* resolution, we must lock it because we could access an opregion.
* The opregion access code assumes that the interpreter is locked.
*/
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
acpi_ex_enter_interpreter();
/* Function has a strange interface */
......
......@@ -214,7 +214,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
u32 level, void *context, void **return_value)
{
acpi_object_type type;
acpi_status status;
acpi_status status = AE_OK;
struct acpi_init_walk_info *info =
(struct acpi_init_walk_info *)context;
struct acpi_namespace_node *node =
......@@ -268,10 +268,7 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
/*
* Must lock the interpreter before executing AML code
*/
status = acpi_ex_enter_interpreter();
if (ACPI_FAILURE(status)) {
return (status);
}
acpi_ex_enter_interpreter();
/*
* Each of these types can contain executable AML code within the
......
......@@ -65,10 +65,8 @@ ACPI_MODULE_NAME("nswalk")
* within Scope is returned.
*
******************************************************************************/
struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
struct acpi_namespace_node
*parent_node,
struct acpi_namespace_node
struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
*parent_node, struct acpi_namespace_node
*child_node)
{
struct acpi_namespace_node *next_node = NULL;
......
......@@ -48,7 +48,6 @@
#define _COMPONENT ACPI_NAMESPACE
ACPI_MODULE_NAME("nsxfeval")
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
......@@ -73,8 +72,8 @@ ACPI_MODULE_NAME("nsxfeval")
acpi_status
acpi_evaluate_object_typed(acpi_handle handle,
acpi_string pathname,
struct acpi_object_list * external_params,
struct acpi_buffer * return_buffer,
struct acpi_object_list *external_params,
struct acpi_buffer *return_buffer,
acpi_object_type return_type)
{
acpi_status status;
......@@ -143,7 +142,6 @@ acpi_evaluate_object_typed(acpi_handle handle,
ACPI_EXPORT_SYMBOL(acpi_evaluate_object_typed)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_evaluate_object
......@@ -170,7 +168,6 @@ acpi_evaluate_object(acpi_handle handle,
struct acpi_buffer *return_buffer)
{
acpi_status status;
acpi_status status2;
struct acpi_evaluate_info *info;
acpi_size buffer_space_needed;
u32 i;
......@@ -329,14 +326,12 @@ acpi_evaluate_object(acpi_handle handle,
* Delete the internal return object. NOTE: Interpreter must be
* locked to avoid race condition.
*/
status2 = acpi_ex_enter_interpreter();
if (ACPI_SUCCESS(status2)) {
acpi_ex_enter_interpreter();
/* Remove one reference on the return object (should delete it) */
/* Remove one reference on the return object (should delete it) */
acpi_ut_remove_reference(info->return_object);
acpi_ex_exit_interpreter();
}
acpi_ut_remove_reference(info->return_object);
acpi_ex_exit_interpreter();
}
cleanup:
......
......@@ -71,6 +71,7 @@ static unsigned int acpi_irq_irq;
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
static struct workqueue_struct *kacpid_wq;
static struct workqueue_struct *kacpi_notify_wq;
static void __init acpi_request_region (struct acpi_generic_address *addr,
unsigned int length, char *desc)
......@@ -137,8 +138,9 @@ acpi_status acpi_os_initialize1(void)
return AE_NULL_ENTRY;
}
kacpid_wq = create_singlethread_workqueue("kacpid");
kacpi_notify_wq = create_singlethread_workqueue("kacpi_notify");
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
return AE_OK;
}
......@@ -150,6 +152,7 @@ acpi_status acpi_os_terminate(void)
}
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
return AE_OK;
}
......@@ -601,6 +604,23 @@ void acpi_os_derive_pci_id(acpi_handle rhandle, /* upper bound */
}
static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
if (!dpc) {
printk(KERN_ERR PREFIX "Invalid (NULL) context\n");
return;
}
dpc->function(dpc->context);
kfree(dpc);
/* Yield cpu to notify thread */
cond_resched();
return;
}
static void acpi_os_execute_notify(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
......@@ -637,14 +657,12 @@ acpi_status acpi_os_execute(acpi_execute_type type,
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
ACPI_FUNCTION_TRACE("os_queue_for_execution");
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
if (!function)
return_ACPI_STATUS(AE_BAD_PARAMETER);
return AE_BAD_PARAMETER;
/*
* Allocate/initialize DPC structure. Note that this memory will be
......@@ -662,14 +680,21 @@ acpi_status acpi_os_execute(acpi_execute_type type,
dpc->function = function;
dpc->context = context;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
if (!queue_work(kacpid_wq, &dpc->work)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
if (type == OSL_NOTIFY_HANDLER) {
INIT_WORK(&dpc->work, acpi_os_execute_notify);
if (!queue_work(kacpi_notify_wq, &dpc->work)) {
status = AE_ERROR;
kfree(dpc);
}
} else {
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
if (!queue_work(kacpid_wq, &dpc->work)) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
"Call to queue_work() failed.\n"));
kfree(dpc);
status = AE_ERROR;
status = AE_ERROR;
kfree(dpc);
}
}
return_ACPI_STATUS(status);
}
......
This diff is collapsed.
......@@ -567,7 +567,8 @@ acpi_rs_get_pci_routing_table_length(union acpi_operand_object *package_object,
(*sub_object_list)->string.
length + 1);
} else {
temp_size_needed += acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
temp_size_needed +=
acpi_ns_get_pathname_length((*sub_object_list)->reference.node);
}
} else {
/*
......
......@@ -267,16 +267,19 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
* If BIOS erroneously reversed the _PRT source_name and source_index,
* then reverse them back.
*/
if (ACPI_GET_OBJECT_TYPE (sub_object_list[3]) != ACPI_TYPE_INTEGER) {
if (ACPI_GET_OBJECT_TYPE(sub_object_list[3]) !=
ACPI_TYPE_INTEGER) {
if (acpi_gbl_enable_interpreter_slack) {
source_name_index = 3;
source_index_index = 2;
printk(KERN_WARNING "ACPI: Handling Garbled _PRT entry\n");
printk(KERN_WARNING
"ACPI: Handling Garbled _PRT entry\n");
} else {
ACPI_ERROR((AE_INFO,
"(PRT[%X].source_index) Need Integer, found %s",
index,
acpi_ut_get_object_type_name(sub_object_list[3])));
"(PRT[%X].source_index) Need Integer, found %s",
index,
acpi_ut_get_object_type_name
(sub_object_list[3])));
return_ACPI_STATUS(AE_BAD_DATA);
}
}
......
......@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsdump")
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/* Local prototypes */
static void acpi_rs_out_string(char *title, char *value);
......@@ -489,10 +488,9 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/*
* Optional resource_source for Address resources
*/
acpi_rs_dump_resource_source(ACPI_CAST_PTR
(struct
acpi_resource_source,
target));
acpi_rs_dump_resource_source(ACPI_CAST_PTR(struct
acpi_resource_source,
target));
break;
default:
......
......@@ -142,7 +142,7 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
};
#endif
#endif /* ACPI_FUTURE_USAGE */
#endif /* ACPI_FUTURE_USAGE */
/*
* Base sizes for external AML resource descriptors, indexed by internal type.
* Includes size of the descriptor header (1 byte for small descriptors,
......
......@@ -153,10 +153,9 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform the conversion */
status = acpi_rs_convert_resource_to_aml(resource,
ACPI_CAST_PTR(union
aml_resource,
aml),
status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
aml_resource,
aml),
acpi_gbl_set_resource_dispatch
[resource->type]);
if (ACPI_FAILURE(status)) {
......
......@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_RESOURCES
ACPI_MODULE_NAME("rsmisc")
#define INIT_RESOURCE_TYPE(i) i->resource_offset
#define INIT_RESOURCE_LENGTH(i) i->aml_offset
#define INIT_TABLE_LENGTH(i) i->value
......@@ -429,8 +428,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
* Optional resource_source (Index and String)
*/
aml_length =
acpi_rs_set_resource_source(aml,
(acpi_rs_length)
acpi_rs_set_resource_source(aml, (acpi_rs_length)
aml_length, source);
acpi_rs_set_resource_length(aml_length, aml);
break;
......
......@@ -353,10 +353,8 @@ acpi_rs_get_resource_source(acpi_rs_length resource_length,
*
* Zero the entire area of the buffer.
*/
total_length =
(u32)
ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) +
1;
total_length = (u32)
ACPI_STRLEN(ACPI_CAST_PTR(char, &aml_resource_source[1])) + 1;
total_length = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(total_length);
ACPI_MEMSET(resource_source->string_ptr, 0, total_length);
......
......@@ -217,7 +217,6 @@ acpi_get_current_resources(acpi_handle device_handle,
}
ACPI_EXPORT_SYMBOL(acpi_get_current_resources)
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
......@@ -261,7 +260,6 @@ acpi_get_possible_resources(acpi_handle device_handle,
ACPI_EXPORT_SYMBOL(acpi_get_possible_resources)
#endif /* ACPI_FUTURE_USAGE */
/*******************************************************************************
*
* FUNCTION: acpi_set_current_resources
......@@ -496,7 +494,6 @@ ACPI_EXPORT_SYMBOL(acpi_rs_match_vendor_resource)
* each resource in the list.
*
******************************************************************************/
acpi_status
acpi_walk_resources(acpi_handle device_handle,
char *name,
......
......@@ -222,7 +222,7 @@ static struct hibernation_ops acpi_hibernation_ops = {
.enter = acpi_hibernation_enter,
.finish = acpi_hibernation_finish,
};
#endif /* CONFIG_SOFTWARE_SUSPEND */
#endif /* CONFIG_SOFTWARE_SUSPEND */
/*
* Toshiba fails to preserve interrupts over S1, reinitialization
......@@ -276,4 +276,3 @@ int __init acpi_sleep_init(void)
return 0;
}
......@@ -349,8 +349,7 @@ acpi_system_write_alarm(struct file *file,
end:
return_VALUE(result ? result : count);
}
#endif /* HAVE_ACPI_LEGACY_ALARM */
#endif /* HAVE_ACPI_LEGACY_ALARM */
extern struct list_head acpi_wakeup_device_list;
extern spinlock_t acpi_device_lock;
......@@ -380,8 +379,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->wakeup.state.enabled ? "enabled" : "disabled");
if (ldev)
seq_printf(seq, "%s:%s",
ldev->bus ? ldev->bus->name : "no-bus",
ldev->bus_id);
ldev->bus ? ldev->bus->name : "no-bus",
ldev->bus_id);
seq_printf(seq, "\n");
put_device(ldev);
......@@ -490,7 +489,7 @@ static u32 rtc_handler(void *context)
return ACPI_INTERRUPT_HANDLED;
}
#endif /* HAVE_ACPI_LEGACY_ALARM */
#endif /* HAVE_ACPI_LEGACY_ALARM */
static int __init acpi_sleep_proc_init(void)
{
......@@ -517,7 +516,7 @@ static int __init acpi_sleep_proc_init(void)
entry->proc_fops = &acpi_system_alarm_fops;
acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
#endif /* HAVE_ACPI_LEGACY_ALARM */
#endif /* HAVE_ACPI_LEGACY_ALARM */
/* 'wakeup device' [R/W] */
entry =
......
......@@ -334,7 +334,8 @@ static void acpi_tb_convert_fadt(void)
(acpi_gbl_FADT.xpm1a_event_block.address +
pm1_register_length));
/* Don't forget to copy space_id of the GAS */
acpi_gbl_xpm1a_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
acpi_gbl_xpm1a_enable.space_id =
acpi_gbl_FADT.xpm1a_event_block.space_id;
/* The PM1B register block is optional, ignore if not present */
......@@ -344,7 +345,8 @@ static void acpi_tb_convert_fadt(void)
(acpi_gbl_FADT.xpm1b_event_block.
address + pm1_register_length));
/* Don't forget to copy space_id of the GAS */
acpi_gbl_xpm1b_enable.space_id = acpi_gbl_FADT.xpm1a_event_block.space_id;
acpi_gbl_xpm1b_enable.space_id =
acpi_gbl_FADT.xpm1a_event_block.space_id;
}
......
......@@ -201,6 +201,7 @@ acpi_status acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_load_table
......@@ -262,7 +263,7 @@ ACPI_EXPORT_SYMBOL(acpi_load_table)
acpi_status
acpi_get_table_header(char *signature,
acpi_native_uint instance,
struct acpi_table_header *out_table_header)
struct acpi_table_header * out_table_header)
{
acpi_native_uint i;
acpi_native_uint j;
......@@ -321,7 +322,6 @@ acpi_get_table_header(char *signature,
ACPI_EXPORT_SYMBOL(acpi_get_table_header)
/******************************************************************************
*
* FUNCTION: acpi_unload_table_id
......@@ -346,11 +346,11 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
continue;
}
/*
* Delete all namespace objects owned by this table. Note that these
* objects can appear anywhere in the namespace by virtue of the AML
* "Scope" operator. Thus, we need to track ownership by an ID, not
* simply a position within the hierarchy
*/
* Delete all namespace objects owned by this table. Note that these
* objects can appear anywhere in the namespace by virtue of the AML
* "Scope" operator. Thus, we need to track ownership by an ID, not
* simply a position within the hierarchy
*/
acpi_tb_delete_namespace_by_owner(i);
status = acpi_tb_release_owner_id(i);
acpi_tb_set_table_loaded_flag(i, FALSE);
......@@ -376,7 +376,7 @@ ACPI_EXPORT_SYMBOL(acpi_unload_table_id)
*****************************************************************************/
acpi_status
acpi_get_table(char *signature,
acpi_native_uint instance, struct acpi_table_header ** out_table)
acpi_native_uint instance, struct acpi_table_header **out_table)
{
acpi_native_uint i;
acpi_native_uint j;
......
......@@ -59,8 +59,6 @@
#define ACPI_THERMAL_NOTIFY_CRITICAL 0xF0
#define ACPI_THERMAL_NOTIFY_HOT 0xF1
#define ACPI_THERMAL_MODE_ACTIVE 0x00
#define ACPI_THERMAL_MODE_PASSIVE 0x01
#define ACPI_THERMAL_MODE_CRITICAL 0xff
#define ACPI_THERMAL_PATH_POWEROFF "/sbin/poweroff"
#define ACPI_THERMAL_MAX_ACTIVE 10
......@@ -86,9 +84,6 @@ static int acpi_thermal_resume(struct acpi_device *device);
static int acpi_thermal_state_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_temp_open_fs(struct inode *inode, struct file *file);
static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file);
static ssize_t acpi_thermal_write_trip_points(struct file *,
const char __user *, size_t,
loff_t *);
static int acpi_thermal_cooling_open_fs(struct inode *inode, struct file *file);
static ssize_t acpi_thermal_write_cooling_mode(struct file *,
const char __user *, size_t,
......@@ -167,7 +162,6 @@ struct acpi_thermal {
unsigned long temperature;
unsigned long last_temperature;
unsigned long polling_frequency;
u8 cooling_mode;
volatile u8 zombie;
struct acpi_thermal_flags flags;
struct acpi_thermal_state state;
......@@ -193,7 +187,6 @@ static const struct file_operations acpi_thermal_temp_fops = {
static const struct file_operations acpi_thermal_trip_fops = {
.open = acpi_thermal_trip_open_fs,
.read = seq_read,
.write = acpi_thermal_write_trip_points,
.llseek = seq_lseek,
.release = single_release,
};
......@@ -297,11 +290,6 @@ static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
if (ACPI_FAILURE(status))
return -ENODEV;
tz->cooling_mode = mode;
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Cooling mode [%s]\n",
mode ? "passive" : "active"));
return 0;
}
......@@ -889,67 +877,6 @@ static int acpi_thermal_trip_open_fs(struct inode *inode, struct file *file)
return single_open(file, acpi_thermal_trip_seq_show, PDE(inode)->data);
}
static ssize_t
acpi_thermal_write_trip_points(struct file *file,
const char __user * buffer,
size_t count, loff_t * ppos)
{
struct seq_file *m = file->private_data;
struct acpi_thermal *tz = m->private;
char *limit_string;
int num, critical, hot, passive;
int *active;
int i = 0;
limit_string = kzalloc(ACPI_THERMAL_MAX_LIMIT_STR_LEN, GFP_KERNEL);
if (!limit_string)
return -ENOMEM;
active = kmalloc(ACPI_THERMAL_MAX_ACTIVE * sizeof(int), GFP_KERNEL);
if (!active) {
kfree(limit_string);
return -ENOMEM;
}
if (!tz || (count > ACPI_THERMAL_MAX_LIMIT_STR_LEN - 1)) {
count = -EINVAL;
goto end;
}
if (copy_from_user(limit_string, buffer, count)) {
count = -EFAULT;
goto end;
}
limit_string[count] = '\0';
num = sscanf(limit_string, "%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d",
&critical, &hot, &passive,
&active[0], &active[1], &active[2], &active[3], &active[4],
&active[5], &active[6], &active[7], &active[8],
&active[9]);
if (!(num >= 5 && num < (ACPI_THERMAL_MAX_ACTIVE + 3))) {
count = -EINVAL;
goto end;
}
tz->trips.critical.temperature = CELSIUS_TO_KELVIN(critical);
tz->trips.hot.temperature = CELSIUS_TO_KELVIN(hot);
tz->trips.passive.temperature = CELSIUS_TO_KELVIN(passive);
for (i = 0; i < num - 3; i++) {
if (!(tz->trips.active[i].flags.valid))
break;
tz->trips.active[i].temperature = CELSIUS_TO_KELVIN(active[i]);
}
end:
kfree(active);
kfree(limit_string);
return count;
}
static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
{
struct acpi_thermal *tz = seq->private;
......@@ -958,15 +885,10 @@ static int acpi_thermal_cooling_seq_show(struct seq_file *seq, void *offset)
if (!tz)
goto end;
if (!tz->flags.cooling_mode) {
if (!tz->flags.cooling_mode)
seq_puts(seq, "<setting not supported>\n");
}
if (tz->cooling_mode == ACPI_THERMAL_MODE_CRITICAL)
seq_printf(seq, "cooling mode: critical\n");
else
seq_printf(seq, "cooling mode: %s\n",
tz->cooling_mode ? "passive" : "active");
seq_puts(seq, "0 - Active; 1 - Passive\n");
end:
return 0;
......@@ -1223,28 +1145,6 @@ static int acpi_thermal_get_info(struct acpi_thermal *tz)
result = acpi_thermal_set_cooling_mode(tz, ACPI_THERMAL_MODE_ACTIVE);
if (!result)
tz->flags.cooling_mode = 1;
else {
/* Oh,we have not _SCP method.
Generally show cooling_mode by _ACx, _PSV,spec 12.2 */
tz->flags.cooling_mode = 0;
if (tz->trips.active[0].flags.valid
&& tz->trips.passive.flags.valid) {
if (tz->trips.passive.temperature >
tz->trips.active[0].temperature)
tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
else
tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
} else if (!tz->trips.active[0].flags.valid
&& tz->trips.passive.flags.valid) {
tz->cooling_mode = ACPI_THERMAL_MODE_PASSIVE;
} else if (tz->trips.active[0].flags.valid
&& !tz->trips.passive.flags.valid) {
tz->cooling_mode = ACPI_THERMAL_MODE_ACTIVE;
} else {
/* _ACx and _PSV are optional, but _CRT is required */
tz->cooling_mode = ACPI_THERMAL_MODE_CRITICAL;
}
}
/* Get default polling frequency [_TZP] (optional) */
if (tzp)
......
......@@ -107,7 +107,6 @@ acpi_status acpi_ut_create_caches(void)
if (ACPI_FAILURE(status)) {
return (status);
}
#ifdef ACPI_DBG_TRACK_ALLOCATIONS
/* Memory allocation lists */
......
......@@ -45,7 +45,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utcache")
#ifdef ACPI_USE_LOCAL_CACHE
/*******************************************************************************
*
......@@ -64,7 +63,7 @@ ACPI_MODULE_NAME("utcache")
acpi_status
acpi_os_create_cache(char *cache_name,
u16 object_size,
u16 max_depth, struct acpi_memory_list **return_cache)
u16 max_depth, struct acpi_memory_list ** return_cache)
{
struct acpi_memory_list *cache;
......
......@@ -814,7 +814,9 @@ acpi_ut_copy_ielement_to_ielement(u8 object_type,
/*
* Create the object array
*/
target_object->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.count + 1) * sizeof(void *));
target_object->package.elements =
ACPI_ALLOCATE_ZEROED(((acpi_size) source_object->package.
count + 1) * sizeof(void *));
if (!target_object->package.elements) {
status = AE_NO_MEMORY;
goto error_exit;
......
......@@ -45,7 +45,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdebug")
#ifdef ACPI_DEBUG_OUTPUT
static acpi_thread_id acpi_gbl_prev_thread_id;
static char *acpi_gbl_fn_entry_str = "----Entry";
......@@ -181,7 +180,8 @@ acpi_ut_debug_print(u32 requested_debug_level,
if (ACPI_LV_THREADS & acpi_dbg_level) {
acpi_os_printf
("\n**** Context Switch from TID %lX to TID %lX ****\n\n",
(unsigned long)acpi_gbl_prev_thread_id, (unsigned long)thread_id);
(unsigned long)acpi_gbl_prev_thread_id,
(unsigned long)thread_id);
}
acpi_gbl_prev_thread_id = thread_id;
......
......@@ -170,6 +170,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
acpi_os_delete_mutex(object->mutex.os_mutex);
acpi_gbl_global_lock_mutex = NULL;
} else {
acpi_ex_unlink_mutex(object);
acpi_os_delete_mutex(object->mutex.os_mutex);
}
break;
......
......@@ -55,12 +55,10 @@ ACPI_EXPORT_SYMBOL(acpi_gbl_FADT)
* Static global variable initialization.
*
******************************************************************************/
/*
* We want the debug switches statically initialized so they
* are already set when the debugger is entered.
*/
/* Debug switch - level and trace mask */
u32 acpi_dbg_level = ACPI_DEBUG_DEFAULT;
......@@ -735,5 +733,5 @@ void acpi_ut_init_globals(void)
}
ACPI_EXPORT_SYMBOL(acpi_dbg_level)
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
ACPI_EXPORT_SYMBOL(acpi_gpe_count)
ACPI_EXPORT_SYMBOL(acpi_dbg_layer)
ACPI_EXPORT_SYMBOL(acpi_gpe_count)
......@@ -802,9 +802,8 @@ acpi_ut_strtoul64(char *string, u32 base, acpi_integer * ret_integer)
valid_digits++;
if (sign_of0x
&& ((valid_digits > 16)
|| ((valid_digits > 8) && mode32))) {
if (sign_of0x && ((valid_digits > 16)
|| ((valid_digits > 8) && mode32))) {
/*
* This is to_integer operation case.
* No any restrictions for string-to-integer conversion,
......@@ -1049,6 +1048,7 @@ acpi_ut_exception(char *module_name,
acpi_os_vprintf(format, args);
acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
}
EXPORT_SYMBOL(acpi_ut_exception);
void ACPI_INTERNAL_VAR_XFACE
......
......@@ -244,7 +244,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX attempting to acquire Mutex [%s]\n",
(unsigned long) this_thread_id,
(unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex,
......@@ -252,7 +252,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
if (ACPI_SUCCESS(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX acquired Mutex [%s]\n",
(unsigned long) this_thread_id,
(unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
acpi_gbl_mutex_info[mutex_id].use_count++;
......@@ -260,7 +260,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
} else {
ACPI_EXCEPTION((AE_INFO, status,
"Thread %lX could not acquire Mutex [%X]",
(unsigned long) this_thread_id, mutex_id));
(unsigned long)this_thread_id, mutex_id));
}
return (status);
......@@ -287,7 +287,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
this_thread_id = acpi_os_get_thread_id();
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
"Thread %lX releasing Mutex [%s]\n",
(unsigned long) this_thread_id,
(unsigned long)this_thread_id,
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
......
......@@ -46,7 +46,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
#if defined(ACPI_DISASSEMBLER) || defined (ACPI_DEBUGGER)
/*
* Strings used to decode resource descriptors.
......
......@@ -337,7 +337,6 @@ acpi_status acpi_terminate(void)
}
ACPI_EXPORT_SYMBOL(acpi_terminate)
#ifdef ACPI_FUTURE_USAGE
/*******************************************************************************
*
......@@ -470,7 +469,6 @@ acpi_install_initialization_handler(acpi_init_handler handler, u32 function)
ACPI_EXPORT_SYMBOL(acpi_install_initialization_handler)
#endif /* ACPI_FUTURE_USAGE */
/*****************************************************************************
*
* FUNCTION: acpi_purge_cached_objects
......
......@@ -130,7 +130,7 @@ config SONY_LAPTOP
Read <file:Documentation/sony-laptop.txt> for more information.
config SONY_LAPTOP_OLD
config SONYPI_COMPAT
bool "Sonypi compatibility"
depends on SONY_LAPTOP
---help---
......
......@@ -30,7 +30,7 @@
* Eric Burghard - LED display support for W1N
* Josh Green - Light Sens support
* Thomas Tuttle - His first patch for led support was very helpfull
*
* Sam Lin - GPS support
*/
#include <linux/autoconf.h>
......@@ -48,7 +48,7 @@
#include <acpi/acpi_bus.h>
#include <asm/uaccess.h>
#define ASUS_LAPTOP_VERSION "0.41"
#define ASUS_LAPTOP_VERSION "0.42"
#define ASUS_HOTK_NAME "Asus Laptop Support"
#define ASUS_HOTK_CLASS "hotkey"
......@@ -83,6 +83,7 @@
#define PLED_ON 0x20 //Phone LED
#define GLED_ON 0x40 //Gaming LED
#define LCD_ON 0x80 //LCD backlight
#define GPS_ON 0x100 //GPS
#define ASUS_LOG ASUS_HOTK_FILE ": "
#define ASUS_ERR KERN_ERR ASUS_LOG
......@@ -148,7 +149,7 @@ ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L4R M6R A3G
M6A M6V VX-1 V6J V6V W3Z */
"\\_SB.PCI0.P0P2.VGA.GETD", /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V
S5A M5A z33A W1Jc W2V */
S5A M5A z33A W1Jc W2V G1 */
"\\_SB.PCI0.P0P3.VGA.GETD", /* A6V A6Q */
"\\_SB.PCI0.P0PA.VGA.GETD", /* A6T, A6M */
"\\_SB.PCI0.PCI1.VGAC.NMAP", /* L3C */
......@@ -162,6 +163,12 @@ ASUS_HANDLE(display_get, "\\_SB.PCI0.P0P1.VGA.GETD", /* A6B, A6K A6R A7D F3JM L
ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
/* GPS */
/* R2H use different handle for GPS on/off */
ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
/*
* This is the main structure, we can use it to store anything interesting
* about the hotk device
......@@ -278,12 +285,28 @@ static int read_wireless_status(int mask)
return (hotk->status & mask) ? 1 : 0;
}
static int read_gps_status(void)
{
ulong status;
acpi_status rv = AE_OK;
rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
if (ACPI_FAILURE(rv))
printk(ASUS_WARNING "Error reading GPS status\n");
else
return status ? 1 : 0;
return (hotk->status & GPS_ON) ? 1 : 0;
}
/* Generic LED functions */
static int read_status(int mask)
{
/* There is a special method for both wireless devices */
if (mask == BT_ON || mask == WL_ON)
return read_wireless_status(mask);
else if (mask == GPS_ON)
return read_gps_status();
return (hotk->status & mask) ? 1 : 0;
}
......@@ -299,6 +322,10 @@ static void write_status(acpi_handle handle, int out, int mask)
case GLED_ON:
out = (out & 0x1) + 1;
break;
case GPS_ON:
handle = (out) ? gps_on_handle : gps_off_handle;
out = 0x02;
break;
default:
out &= 0x1;
break;
......@@ -667,6 +694,21 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
return rv;
}
/*
* GPS
*/
static ssize_t show_gps(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", read_status(GPS_ON));
}
static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
return store_status(buf, count, NULL, GPS_ON);
}
static void asus_hotk_notify(acpi_handle handle, u32 event, void *data)
{
/* TODO Find a better way to handle events count. */
......@@ -715,6 +757,7 @@ static ASUS_CREATE_DEVICE_ATTR(display);
static ASUS_CREATE_DEVICE_ATTR(ledd);
static ASUS_CREATE_DEVICE_ATTR(ls_switch);
static ASUS_CREATE_DEVICE_ATTR(ls_level);
static ASUS_CREATE_DEVICE_ATTR(gps);
static struct attribute *asuspf_attributes[] = {
&dev_attr_infos.attr,
......@@ -724,6 +767,7 @@ static struct attribute *asuspf_attributes[] = {
&dev_attr_ledd.attr,
&dev_attr_ls_switch.attr,
&dev_attr_ls_level.attr,
&dev_attr_gps.attr,
NULL
};
......@@ -763,6 +807,9 @@ static void asus_hotk_add_fs(void)
ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
}
if (gps_status_handle && gps_on_handle && gps_off_handle)
ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
}
static int asus_handle_init(char *name, acpi_handle * handle,
......@@ -890,9 +937,13 @@ static int asus_hotk_get_info(void)
/* There is a lot of models with "ALSL", but a few get
a real light sens, so we need to check it. */
if (ASUS_HANDLE_INIT(ls_switch))
if (!ASUS_HANDLE_INIT(ls_switch))
ASUS_HANDLE_INIT(ls_level);
ASUS_HANDLE_INIT(gps_on);
ASUS_HANDLE_INIT(gps_off);
ASUS_HANDLE_INIT(gps_status);
kfree(model);
return AE_OK;
......@@ -950,7 +1001,7 @@ static int asus_hotk_add(struct acpi_device *device)
* We install the handler, it will receive the hotk in parameter, so, we
* could add other data to the hotk struct
*/
status = acpi_install_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
status = acpi_install_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
asus_hotk_notify, hotk);
if (ACPI_FAILURE(status))
printk(ASUS_ERR "Error installing notify handler\n");
......@@ -981,6 +1032,9 @@ static int asus_hotk_add(struct acpi_device *device)
if (ls_level_handle)
set_light_sens_level(hotk->light_level);
/* GPS is on by default */
write_status(NULL, 1, GPS_ON);
end:
if (result) {
kfree(hotk->name);
......@@ -997,7 +1051,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
if (!device || !acpi_driver_data(device))
return -EINVAL;
status = acpi_remove_notify_handler(hotk->handle, ACPI_SYSTEM_NOTIFY,
status = acpi_remove_notify_handler(hotk->handle, ACPI_ALL_NOTIFY,
asus_hotk_notify);
if (ACPI_FAILURE(status))
printk(ASUS_ERR "Error removing notify handler\n");
......
......@@ -85,7 +85,7 @@ static int set_lcd_level(int level)
buf[0] = 0x80;
buf[1] = (u8) (level*31);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, buf, sizeof(buf), NULL, 0, 1);
}
static int get_lcd_level(void)
......@@ -93,7 +93,7 @@ static int get_lcd_level(void)
u8 wdata = 0, rdata;
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1);
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
if (result < 0)
return result;
......@@ -105,7 +105,7 @@ static int get_auto_brightness(void)
u8 wdata = 4, rdata;
int result;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1);
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, &wdata, 1, &rdata, 1, 1);
if (result < 0)
return result;
......@@ -119,14 +119,14 @@ static int set_auto_brightness(int enable)
wdata[0] = 4;
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1);
result = ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 1, &rdata, 1, 1);
if (result < 0)
return result;
wdata[0] = 0x84;
wdata[1] = (rdata & 0xF7) | (enable ? 8 : 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0);
return ec_transaction(MSI_EC_COMMAND_LCD_LEVEL, wdata, 2, NULL, 0, 1);
}
static int get_wireless_state(int *wlan, int *bluetooth)
......@@ -134,7 +134,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
u8 wdata = 0, rdata;
int result;
result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1, 1);
if (result < 0)
return -1;
......
......@@ -63,7 +63,7 @@
#include <asm/uaccess.h>
#include <linux/sonypi.h>
#include <linux/sony-laptop.h>
#ifdef CONFIG_SONY_LAPTOP_OLD
#ifdef CONFIG_SONYPI_COMPAT
#include <linux/poll.h>
#include <linux/miscdevice.h>
#endif
......@@ -114,7 +114,7 @@ MODULE_PARM_DESC(camera,
"set this to 1 to enable Motion Eye camera controls "
"(only use it if you have a C1VE or C1VN model)");
#ifdef CONFIG_SONY_LAPTOP_OLD
#ifdef CONFIG_SONYPI_COMPAT
static int minor = -1;
module_param(minor, int, 0);
MODULE_PARM_DESC(minor,
......@@ -1504,7 +1504,7 @@ static struct attribute_group spic_attribute_group = {
};
/******** SONYPI compatibility **********/
#ifdef CONFIG_SONY_LAPTOP_OLD
#ifdef CONFIG_SONYPI_COMPAT
/* battery / brightness / temperature addresses */
#define SONYPI_BAT_FLAGS 0x81
......@@ -1798,7 +1798,7 @@ static void sonypi_compat_exit(void)
static int sonypi_compat_init(void) { return 0; }
static void sonypi_compat_exit(void) { }
static void sonypi_compat_report_event(u8 event) { }
#endif /* CONFIG_SONY_LAPTOP_OLD */
#endif /* CONFIG_SONYPI_COMPAT */
/*
* ACPI callbacks
......
......@@ -290,12 +290,9 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state *walk_state);
acpi_status
acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state *walk_state);
struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id,
union acpi_parse_object
*origin,
union acpi_operand_object
*mth_desc,
struct acpi_thread_state
struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union acpi_parse_object
*origin, union acpi_operand_object
*mth_desc, struct acpi_thread_state
*thread);
acpi_status
......
......@@ -319,7 +319,7 @@ ACPI_EXTERN struct acpi_fixed_event_handler
acpi_gbl_fixed_event_handlers[ACPI_NUM_FIXED_EVENTS];
ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head;
ACPI_EXTERN struct acpi_gpe_block_info
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
*acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS];
/*****************************************************************************
*
......
......@@ -253,8 +253,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc,
struct acpi_thread_state *thread);
void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc);
/*
* exprep - ACPI AML execution - prep utilities
......@@ -446,10 +445,14 @@ acpi_ex_copy_integer_to_buffer_field(union acpi_operand_object *source_desc,
/*
* exutils - interpreter/scanner utilities
*/
acpi_status acpi_ex_enter_interpreter(void);
void acpi_ex_enter_interpreter(void);
void acpi_ex_exit_interpreter(void);
void acpi_ex_reacquire_interpreter(void);
void acpi_ex_relinquish_interpreter(void);
void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
u8 acpi_ex_acquire_global_lock(u32 rule);
......
......@@ -630,7 +630,7 @@ ACPI_PARSE_COMMON};
* and bytelists.
*/
struct acpi_parse_obj_named {
ACPI_PARSE_COMMON u8 * path;
ACPI_PARSE_COMMON u8 *path;
u8 *data; /* AML body or bytelist data */
u32 length; /* AML length */
u32 name; /* 4-byte name or zero if no name */
......
......@@ -100,10 +100,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_walk_callback user_function,
void *context, void **return_value);
struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type,
struct acpi_namespace_node
*parent,
struct acpi_namespace_node
struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct acpi_namespace_node
*parent, struct acpi_namespace_node
*child);
/*
......
......@@ -155,7 +155,7 @@ struct acpi_object_event {
struct acpi_object_mutex {
ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */
u16 acquisition_depth; /* Allow multiple Acquires, same thread */
acpi_thread_id owner_thread_id; /* Current owner of the mutex */
struct acpi_thread_state *owner_thread; /* Current owner of the mutex */
acpi_mutex os_mutex; /* Actual OS synchronization object */
union acpi_operand_object *prev; /* Link for list of acquired mutexes */
union acpi_operand_object *next; /* Link for list of acquired mutexes */
......@@ -216,7 +216,7 @@ struct acpi_object_processor {
/* The next two fields take advantage of the 3-byte space before NOTIFY_INFO */
u8 proc_id;
u8 length;
ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
ACPI_COMMON_NOTIFY_INFO acpi_io_address address;
};
struct acpi_object_thermal_zone {
......
......@@ -91,7 +91,8 @@ typedef int (*acpi_op_remove) (struct acpi_device * device, int type);
typedef int (*acpi_op_lock) (struct acpi_device * device, int type);
typedef int (*acpi_op_start) (struct acpi_device * device);
typedef int (*acpi_op_stop) (struct acpi_device * device, int type);
typedef int (*acpi_op_suspend) (struct acpi_device * device, pm_message_t state);
typedef int (*acpi_op_suspend) (struct acpi_device * device,
pm_message_t state);
typedef int (*acpi_op_resume) (struct acpi_device * device);
typedef int (*acpi_op_scan) (struct acpi_device * device);
typedef int (*acpi_op_bind) (struct acpi_device * device);
......@@ -296,7 +297,7 @@ struct acpi_device {
void *driver_data;
struct device dev;
struct acpi_bus_ops bus_ops; /* workaround for different code path for hotplug */
enum acpi_bus_removal_type removal_type; /* indicate for different removal type */
enum acpi_bus_removal_type removal_type; /* indicate for different removal type */
};
#define acpi_driver_data(d) ((d)->driver_data)
......@@ -338,7 +339,7 @@ int acpi_bus_add(struct acpi_device **child, struct acpi_device *parent,
acpi_handle handle, int type);
int acpi_bus_trim(struct acpi_device *start, int rmdevice);
int acpi_bus_start(struct acpi_device *device);
acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd);
acpi_status acpi_bus_get_ejd(acpi_handle handle, acpi_handle * ejd);
int acpi_match_ids(struct acpi_device *device, char *ids);
int acpi_create_dir(struct acpi_device *);
void acpi_remove_dir(struct acpi_device *);
......@@ -363,6 +364,6 @@ acpi_handle acpi_get_child(acpi_handle, acpi_integer);
acpi_handle acpi_get_pci_rootbridge_handle(unsigned int, unsigned int);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)((dev)->archdata.acpi_handle))
#endif /* CONFIG_ACPI */
#endif /* CONFIG_ACPI */
#endif /*__ACPI_BUS_H__*/
......@@ -113,7 +113,8 @@ extern int is_dock_device(acpi_handle handle);
extern int register_dock_notifier(struct notifier_block *nb);
extern void unregister_dock_notifier(struct notifier_block *nb);
extern int register_hotplug_dock_device(acpi_handle handle,
acpi_notify_handler handler, void *context);
acpi_notify_handler handler,
void *context);
extern void unregister_hotplug_dock_device(acpi_handle handle);
#else
static inline int is_dock_device(acpi_handle handle)
......@@ -128,7 +129,8 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
{
}
static inline int register_hotplug_dock_device(acpi_handle handle,
acpi_notify_handler handler, void *context)
acpi_notify_handler handler,
void *context)
{
return -ENODEV;
}
......
......@@ -8,7 +8,7 @@
#if MAX_NUMNODES > 256
#define MAX_PXM_DOMAINS MAX_NUMNODES
#else
#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
#define MAX_PXM_DOMAINS (256) /* Old pxm spec is defined 8 bit */
#endif
extern int __cpuinitdata pxm_to_node_map[MAX_PXM_DOMAINS];
......
......@@ -143,7 +143,8 @@ void acpi_os_release_mutex(acpi_mutex handle);
*/
void *acpi_os_allocate(acpi_size size);
void __iomem *acpi_os_map_memory(acpi_physical_address where, acpi_native_uint length);
void __iomem *acpi_os_map_memory(acpi_physical_address where,
acpi_native_uint length);
void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size);
......
......@@ -344,7 +344,7 @@ typedef u32 acpi_integer;
/* 64-bit integers */
typedef unsigned long long acpi_integer;
typedef unsigned long long acpi_integer;
#define ACPI_INTEGER_MAX ACPI_UINT64_MAX
#define ACPI_INTEGER_BIT_SIZE 64
#define ACPI_MAX_DECIMAL_DIGITS 20 /* 2^64 = 18,446,744,073,709,551,616 */
......
......@@ -498,7 +498,8 @@ acpi_ut_display_init_pathname(u8 type,
acpi_status
acpi_ut_walk_aml_resources(u8 * aml,
acpi_size aml_length,
acpi_walk_aml_callback user_function, void **context);
acpi_walk_aml_callback user_function,
void **context);
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index);
......
......@@ -103,7 +103,10 @@
#define acpi_thread_id struct task_struct *
static inline acpi_thread_id acpi_os_get_thread_id(void) { return current; }
static inline acpi_thread_id acpi_os_get_thread_id(void)
{
return current;
}
/*
* The irqs_disabled() check is for resume from RAM.
......@@ -112,15 +115,19 @@ static inline acpi_thread_id acpi_os_get_thread_id(void) { return current; }
* to quiet __might_sleep() in kmalloc() and resume does not.
*/
#include <acpi/actypes.h>
static inline void *acpi_os_allocate(acpi_size size) {
return kmalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
static inline void *acpi_os_allocate(acpi_size size)
{
return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline void *acpi_os_allocate_zeroed(acpi_size size) {
return kzalloc(size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
static inline void *acpi_os_allocate_zeroed(acpi_size size)
{
return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
static inline void *acpi_os_acquire_object(acpi_cache_t * cache) {
return kmem_cache_zalloc(cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL);
static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
{
return kmem_cache_zalloc(cache,
irqs_disabled()? GFP_ATOMIC : GFP_KERNEL);
}
#define ACPI_ALLOCATE(a) acpi_os_allocate(a)
......
......@@ -18,7 +18,7 @@
#define ACPI_PDC_REVISION_ID 0x1
#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
#define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
#define ACPI_PSD_REV0_ENTRIES 5
/*
......@@ -189,8 +189,9 @@ struct acpi_processor_errata {
} piix4;
};
extern int acpi_processor_preregister_performance(
struct acpi_processor_performance **performance);
extern int acpi_processor_preregister_performance(struct
acpi_processor_performance
**performance);
extern int acpi_processor_register_performance(struct acpi_processor_performance
*performance, unsigned int cpu);
......@@ -213,7 +214,8 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr);
void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
unsigned int cpu);
int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx, struct acpi_power_register *reg);
struct acpi_processor_cx *cx,
struct acpi_power_register *reg);
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cstate);
#else
static inline void acpi_processor_power_init_bm_check(struct
......@@ -224,12 +226,14 @@ static inline void acpi_processor_power_init_bm_check(struct
return;
}
static inline int acpi_processor_ffh_cstate_probe(unsigned int cpu,
struct acpi_processor_cx *cx, struct acpi_power_register *reg)
struct acpi_processor_cx *cx,
struct acpi_power_register
*reg)
{
return -1;
}
static inline void acpi_processor_ffh_cstate_enter(
struct acpi_processor_cx *cstate)
static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
*cstate)
{
return;
}
......
......@@ -182,7 +182,8 @@ extern int ec_read(u8 addr, u8 *val);
extern int ec_write(u8 addr, u8 val);
extern int ec_transaction(u8 command,
const u8 *wdata, unsigned wdata_len,
u8 *rdata, unsigned rdata_len);
u8 *rdata, unsigned rdata_len,
int force_poll);
#endif /*CONFIG_ACPI_EC*/
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment