diff options
36 files changed, 377 insertions, 211 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 2311dad7a57..bc3478581f6 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -229,6 +229,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted. use by PCI Format: <irq>,<irq>... + acpi_no_auto_serialize [HW,ACPI] + Disable auto-serialization of AML methods + AML control methods that contain the opcodes to create + named objects will be marked as "Serialized" by the + auto-serialization feature. + This feature is enabled by default. + This option allows to turn off the feature. + acpi_no_auto_ssdt [HW,ACPI] Disable automatic loading of SSDT acpica_no_return_repair [HW, ACPI] @@ -306,8 +314,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. acpi_sci= [HW,ACPI] ACPI System Control Interrupt trigger mode Format: { level | edge | high | low } - acpi_serialize [HW,ACPI] force serialization of AML methods - acpi_skip_timer_override [HW,ACPI] Recognize and ignore IRQ0/pin2 Interrupt Override. For broken nForce2 BIOS resulting in XT-PIC timer. diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index b6ce00b2be9..5f96daf8566 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -232,7 +232,7 @@ defined in include/linux/pm.h: equal to zero); the initial value of it is 1 (i.e. runtime PM is initially disabled for all devices) - unsigned int runtime_error; + int runtime_error; - if set, there was a fatal error (one of the callbacks returned error code as described in Section 2), so the helper funtions will not work until this flag is cleared; this is the error code returned by the failing @@ -401,11 +401,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: int pm_runtime_disable(struct device *dev); - increment the device's 'power.disable_depth' field (if the value of that field was previously zero, this prevents subsystem-level runtime PM - callbacks from being run for the device), make sure that all of the pending - runtime PM operations on the device are either completed or canceled; - returns 1 if there was a resume request pending and it was necessary to - execute the subsystem-level resume callback for the device to satisfy that - request, otherwise 0 is returned + callbacks from being run for the device), make sure that all of the + pending runtime PM operations on the device are either completed or + canceled; returns 1 if there was a resume request pending and it was + necessary to execute the subsystem-level resume callback for the device + to satisfy that request, otherwise 0 is returned int pm_runtime_barrier(struct device *dev); - check if there's a resume request pending for the device and resume it @@ -667,11 +667,11 @@ driver/base/power/generic_ops.c: int pm_generic_runtime_suspend(struct device *dev); - invoke the ->runtime_suspend() callback provided by the driver of this - device and return its result, or return -EINVAL if not defined + device and return its result, or return 0 if not defined int pm_generic_runtime_resume(struct device *dev); - invoke the ->runtime_resume() callback provided by the driver of this - device and return its result, or return -EINVAL if not defined + device and return its result, or return 0 if not defined int pm_generic_suspend(struct device *dev); - if the device has not been suspended at run time, invoke the ->suspend() @@ -727,15 +727,12 @@ driver/base/power/generic_ops.c: int pm_generic_restore_noirq(struct device *dev); - invoke the ->restore_noirq() callback provided by the device's driver -These functions can be assigned to the ->runtime_idle(), ->runtime_suspend(), +These functions are the defaults used by the PM core, if a subsystem doesn't +provide its own callbacks for ->runtime_idle(), ->runtime_suspend(), ->runtime_resume(), ->suspend(), ->suspend_noirq(), ->resume(), ->resume_noirq(), ->freeze(), ->freeze_noirq(), ->thaw(), ->thaw_noirq(), -->poweroff(), ->poweroff_noirq(), ->restore(), ->restore_noirq() callback -pointers in the subsystem-level dev_pm_ops structures. - -If a subsystem wishes to use all of them at the same time, it can simply assign -the GENERIC_SUBSYS_PM_OPS macro, defined in include/linux/pm.h, to its -dev_pm_ops structure pointer. +->poweroff(), ->poweroff_noirq(), ->restore(), ->restore_noirq() in the +subsystem-level dev_pm_ops structure. Device drivers that wish to use the same function as a system suspend, freeze, poweroff and runtime suspend callback, and similarly for system resume, thaw, @@ -873,7 +870,7 @@ Here is a schematic pseudo-code example: foo->is_suspended = 0; pm_runtime_mark_last_busy(&foo->dev); if (foo->num_pending_requests > 0) - foo_process_requests(foo); + foo_process_next_request(foo); unlock(&foo->private_lock); return 0; } diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h index 5b472c43c31..d3e2cc395d7 100644 --- a/drivers/acpi/acpica/acdispat.h +++ b/drivers/acpi/acpica/acdispat.h @@ -139,20 +139,21 @@ acpi_ds_init_field_objects(union acpi_parse_object *op, struct acpi_walk_state *walk_state); /* - * dsload - Parser/Interpreter interface, pass 1 namespace load callbacks + * dsload - Parser/Interpreter interface */ acpi_status acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number); +/* dsload - pass 1 namespace load callbacks */ + acpi_status acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state, union acpi_parse_object **out_op); acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state); -/* - * dsload - Parser/Interpreter interface, pass 2 namespace load callbacks - */ +/* dsload - pass 2 namespace load callbacks */ + acpi_status acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, union acpi_parse_object **out_op); @@ -200,7 +201,9 @@ void acpi_ds_method_data_init(struct acpi_walk_state *walk_state); /* * dsmethod - Parser/Interpreter interface - control method parsing */ -acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node); +acpi_status +acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, + union acpi_operand_object *obj_desc); acpi_status acpi_ds_call_control_method(struct acpi_thread_state *thread, diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 8f40bb972ae..49bbc71fad5 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -93,12 +93,13 @@ ACPI_INIT_GLOBAL(u8, acpi_gbl_enable_interpreter_slack, FALSE); /* - * Automatically serialize ALL control methods? Default is FALSE, meaning - * to use the Serialized/not_serialized method flags on a per method basis. - * Only change this if the ASL code is poorly written and cannot handle - * reentrancy even though methods are marked "NotSerialized". + * Automatically serialize all methods that create named objects? Default + * is TRUE, meaning that all non_serialized methods are scanned once at + * table load time to determine those that create named objects. Methods + * that create named objects are marked Serialized in order to prevent + * possible run-time problems if they are entered by more than one thread. */ -ACPI_INIT_GLOBAL(u8, acpi_gbl_all_methods_serialized, FALSE); +ACPI_INIT_GLOBAL(u8, acpi_gbl_auto_serialize_methods, TRUE); /* * Create the predefined _OSI method in the namespace? Default is TRUE diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h index c54267748be..b01f71ce052 100644 --- a/drivers/acpi/acpica/acinterp.h +++ b/drivers/acpi/acpica/acinterp.h @@ -458,10 +458,6 @@ void acpi_ex_enter_interpreter(void); void acpi_ex_exit_interpreter(void); -void acpi_ex_reacquire_interpreter(void); - -void acpi_ex_relinquish_interpreter(void); - u8 acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc); void acpi_ex_acquire_global_lock(u32 rule); diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h index 1a4d61805eb..22fb6449d3d 100644 --- a/drivers/acpi/acpica/acobject.h +++ b/drivers/acpi/acpica/acobject.h @@ -193,7 +193,8 @@ struct acpi_object_method { #define ACPI_METHOD_INTERNAL_ONLY 0x02 /* Method is implemented internally (_OSI) */ #define ACPI_METHOD_SERIALIZED 0x04 /* Method is serialized */ #define ACPI_METHOD_SERIALIZED_PENDING 0x08 /* Method is to be marked serialized */ -#define ACPI_METHOD_MODIFIED_NAMESPACE 0x10 /* Method modified the namespace */ +#define ACPI_METHOD_IGNORE_SYNC_LEVEL 0x10 /* Method was auto-serialized at table load time */ +#define ACPI_METHOD_MODIFIED_NAMESPACE 0x20 /* Method modified the namespace */ /****************************************************************************** * diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h index 5d2989a1b68..cf7346110bd 100644 --- a/drivers/acpi/acpica/acstruct.h +++ b/drivers/acpi/acpica/acstruct.h @@ -133,6 +133,9 @@ struct acpi_init_walk_info { u32 table_index; u32 object_count; u32 method_count; + u32 serial_method_count; + u32 non_serial_method_count; + u32 serialized_method_count; u32 device_count; u32 op_region_count; u32 field_count; diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c index 96644d5ac0e..aee5e45f6d3 100644 --- a/drivers/acpi/acpica/dsinit.c +++ b/drivers/acpi/acpica/dsinit.c @@ -83,8 +83,8 @@ acpi_ds_init_one_object(acpi_handle obj_handle, (struct acpi_init_walk_info *)context; struct acpi_namespace_node *node = (struct acpi_namespace_node *)obj_handle; - acpi_object_type type; acpi_status status; + union acpi_operand_object *obj_desc; ACPI_FUNCTION_ENTRY(); @@ -100,9 +100,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle, /* And even then, we are only interested in a few object types */ - type = acpi_ns_get_type(obj_handle); - - switch (type) { + switch (acpi_ns_get_type(obj_handle)) { case ACPI_TYPE_REGION: status = acpi_ds_initialize_region(obj_handle); @@ -117,8 +115,44 @@ acpi_ds_init_one_object(acpi_handle obj_handle, break; case ACPI_TYPE_METHOD: - + /* + * Auto-serialization support. We will examine each method that is + * not_serialized to determine if it creates any Named objects. If + * it does, it will be marked serialized to prevent problems if + * the method is entered by two or more threads and an attempt is + * made to create the same named object twice -- which results in + * an AE_ALREADY_EXISTS exception and method abort. + */ info->method_count++; + obj_desc = acpi_ns_get_attached_object(node); + if (!obj_desc) { + break; + } + + /* Ignore if already serialized */ + + if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { + info->serial_method_count++; + break; + } + + if (acpi_gbl_auto_serialize_methods) { + + /* Parse/scan method and serialize it if necessary */ + + acpi_ds_auto_serialize_method(node, obj_desc); + if (obj_desc->method. + info_flags & ACPI_METHOD_SERIALIZED) { + + /* Method was just converted to Serialized */ + + info->serial_method_count++; + info->serialized_method_count++; + break; + } + } + + info->non_serial_method_count++; break; case ACPI_TYPE_DEVICE: @@ -170,7 +204,6 @@ acpi_ds_initialize_objects(u32 table_index, ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "**** Starting initialization of namespace objects ****\n")); - ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, "Parsing all Control Methods:")); /* Set all init info to zero */ @@ -205,14 +238,16 @@ acpi_ds_initialize_objects(u32 table_index, } ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, - "\nTable [%4.4s](id %4.4X) - %u Objects with %u Devices %u Methods %u Regions\n", + "Table [%4.4s] (id %4.4X) - %4u Objects with %3u Devices, " + "%3u Regions, %3u Methods (%u/%u/%u Serial/Non/Cvt)\n", table->signature, owner_id, info.object_count, - info.device_count, info.method_count, - info.op_region_count)); + info.device_count, info.op_region_count, + info.method_count, info.serial_method_count, + info.non_serial_method_count, + info.serialized_method_count)); - ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, - "%u Methods, %u Regions\n", info.method_count, - info.op_region_count)); + ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "%u Methods, %u Regions\n", + info.method_count, info.op_region_count)); return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index 2c6d42c2bc0..3c7f7378b94 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -49,16 +49,155 @@ #ifdef ACPI_DISASSEMBLER #include "acdisasm.h" #endif +#include "acparser.h" +#include "amlcode.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsmethod") /* Local prototypes */ static acpi_status +acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op); + +static acpi_status acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); /******************************************************************************* * + * FUNCTION: acpi_ds_auto_serialize_method + * + * PARAMETERS: node - Namespace Node of the method + * obj_desc - Method object attached to node + * + * RETURN: Status + * + * DESCRIPTION: Parse a control method AML to scan for control methods that + * need serialization due to the creation of named objects. + * + * NOTE: It is a bit of overkill to mark all such methods serialized, since + * there is only a problem if the method actually blocks during execution. + * A blocking operation is, for example, a Sleep() operation, or any access + * to an operation region. However, it is probably not possible to easily + * detect whether a method will block or not, so we simply mark all suspicious + * methods as serialized. + * + * NOTE2: This code is essentially a generic routine for parsing a single + * control method. + * + ******************************************************************************/ + +acpi_status +acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, + union acpi_operand_object *obj_desc) +{ + acpi_status status; + union acpi_parse_object *op = NULL; + struct acpi_walk_state *walk_state; + + ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node); + + ACPI_DEBUG_PRINT((ACPI_DB_PARSE, + "Method auto-serialization parse [%4.4s] %p\n", + acpi_ut_get_node_name(node), node)); + + /* Create/Init a root op for the method parse tree */ + + op = acpi_ps_alloc_op(AML_METHOD_OP); + if (!op) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + acpi_ps_set_name(op, node->name.integer); + op->common.node = node; + + /* Create and initialize a new walk state */ + + walk_state = + acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); + if (!walk_state) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + + status = + acpi_ds_init_aml_walk(walk_state, op, node, + obj_desc->method.aml_start, + obj_desc->method.aml_length, NULL, 0); + if (ACPI_FAILURE(status)) { + acpi_ds_delete_walk_state(walk_state); + return_ACPI_STATUS(status); + } + + walk_state->descending_callback = acpi_ds_detect_named_opcodes; + + /* Parse the method, scan for creation of named objects */ + + status = acpi_ps_parse_aml(walk_state); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + + acpi_ps_delete_parse_tree(op); + return_ACPI_STATUS(status); +} + +/******************************************************************************* + * + * FUNCTION: acpi_ds_detect_named_opcodes + * + * PARAMETERS: walk_state - Current state of the parse tree walk + * out_op - Unused, required for parser interface + * + * RETURN: Status + * + * DESCRIPTION: Descending callback used during the loading of ACPI tables. + * Currently used to detect methods that must be marked serialized + * in order to avoid problems with the creation of named objects. + * + ******************************************************************************/ + +static acpi_status +acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, + union acpi_parse_object **out_op) +{ + + ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes); + + /* We are only interested in opcodes that create a new name */ + + if (! + (walk_state->op_info-> + flags & (AML_NAMED | AML_CREATE | AML_FIELD))) { + return (AE_OK); + } + + /* + * At this point, we know we have a Named object opcode. + * Mark the method as serialized. Later code will create a mutex for + * this method to enforce serialization. + * + * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the + * Sync Level mechanism for this method, even though it is now serialized. + * Otherwise, there can be conflicts with existing ASL code that actually + * uses sync levels. + */ + walk_state->method_desc->method.sync_level = 0; + walk_state->method_desc->method.info_flags |= + (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); + + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Method serialized [%4.4s] %p - [%s] (%4.4X)\n", + walk_state->method_node->name.ascii, + walk_state->method_node, walk_state->op_info->name, + walk_state->opcode)); + + /* Abort the parse, no need to examine this method any further */ + + return (AE_CTRL_TERMINATE); +} + +/******************************************************************************* + * * FUNCTION: acpi_ds_method_error * * PARAMETERS: status - Execution status @@ -74,7 +213,7 @@ acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); ******************************************************************************/ acpi_status -acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) +acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state) { ACPI_FUNCTION_ENTRY(); @@ -217,13 +356,19 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, /* * The current_sync_level (per-thread) must be less than or equal to * the sync level of the method. This mechanism provides some - * deadlock prevention + * deadlock prevention. + * + * If the method was auto-serialized, we just ignore the sync level + * mechanism, because auto-serialization of methods can interfere + * with ASL code that actually uses sync levels. * * Top-level method invocation has no walk state at this point */ if (walk_state && - (walk_state->thread->current_sync_level > - obj_desc->method.mutex->mutex.sync_level)) { + (!(obj_desc->method. + info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL)) + && (walk_state->thread->current_sync_level > + obj_desc->method.mutex->mutex.sync_level)) { ACPI_ERROR((AE_INFO, "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)", acpi_ut_get_node_name(method_node), @@ -668,7 +813,8 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, method_desc->method.info_flags &= ~ACPI_METHOD_SERIALIZED_PENDING; method_desc->method.info_flags |= - ACPI_METHOD_SERIALIZED; + (ACPI_METHOD_SERIALIZED | + ACPI_METHOD_IGNORE_SYNC_LEVEL); method_desc->method.sync_level = 0; } diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index bd7811c6416..15623da2620 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -73,8 +73,20 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number) { switch (pass_number) { + case 0: + + /* Parse only - caller will setup callbacks */ + + walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 | + ACPI_PARSE_DELETE_TREE | ACPI_PARSE_DISASSEMBLE; + walk_state->descending_callback = NULL; + walk_state->ascending_callback = NULL; + break; + case 1: + /* Load pass 1 */ + walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 | ACPI_PARSE_DELETE_TREE; walk_state->descending_callback = acpi_ds_load1_begin_op; @@ -83,6 +95,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number) case 2: + /* Load pass 2 */ + walk_state->parse_flags = ACPI_PARSE_LOAD_PASS1 | ACPI_PARSE_DELETE_TREE; walk_state->descending_callback = acpi_ds_load2_begin_op; @@ -91,6 +105,8 @@ acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number) case 3: + /* Execution pass */ + #ifndef ACPI_NO_METHOD_EXECUTION walk_state->parse_flags |= ACPI_PARSE_EXECUTE | ACPI_PARSE_DELETE_TREE; diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c index 841caed11c0..f7da64123ed 100644 --- a/drivers/acpi/acpica/exsystem.c +++ b/drivers/acpi/acpica/exsystem.c @@ -77,7 +77,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) /* We must wait, so unlock the interpreter */ - acpi_ex_relinquish_interpreter(); + acpi_ex_exit_interpreter(); status = acpi_os_wait_semaphore(semaphore, 1, timeout); @@ -87,7 +87,7 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) /* Reacquire the interpreter */ - acpi_ex_reacquire_interpreter(); + acpi_ex_enter_interpreter(); } return_ACPI_STATUS(status); @@ -123,7 +123,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) /* We must wait, so unlock the interpreter */ - acpi_ex_relinquish_interpreter(); + acpi_ex_exit_interpreter(); status = acpi_os_acquire_mutex(mutex, timeout); @@ -133,7 +133,7 @@ acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) /* Reacquire the interpreter */ - acpi_ex_reacquire_interpreter(); + acpi_ex_enter_interpreter(); } return_ACPI_STATUS(status); @@ -198,7 +198,7 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long) /* Since this thread will sleep, we must release the interpreter */ - acpi_ex_relinquish_interpreter(); + acpi_ex_exit_interpreter(); /* * For compatibility with other ACPI implementations and to prevent @@ -212,7 +212,7 @@ acpi_status acpi_ex_system_do_sleep(u64 how_long) /* And now we must get the interpreter again */ - acpi_ex_reacquire_interpreter(); + acpi_ex_enter_interpreter(); return (AE_OK); } diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c index 5b16c5484be..d9d72dff2a7 100644 --- a/drivers/acpi/acpica/exutils.c +++ b/drivers/acpi/acpica/exutils.c @@ -100,37 +100,6 @@ void acpi_ex_enter_interpreter(void) /******************************************************************************* * - * FUNCTION: acpi_ex_reacquire_interpreter - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Reacquire the interpreter execution region from within the - * interpreter code. Failure to enter the interpreter region is a - * fatal system error. Used in conjunction with - * relinquish_interpreter - * - ******************************************************************************/ - -void acpi_ex_reacquire_interpreter(void) -{ - ACPI_FUNCTION_TRACE(ex_reacquire_interpreter); - - /* - * If the global serialized flag is set, do not release the interpreter, - * since it was not actually released by acpi_ex_relinquish_interpreter. - * This forces the interpreter to be single threaded. - */ - if (!acpi_gbl_all_methods_serialized) { - acpi_ex_enter_interpreter(); - } - - return_VOID; -} - -/******************************************************************************* - * * FUNCTION: acpi_ex_exit_interpreter * * PARAMETERS: None @@ -139,7 +108,16 @@ void acpi_ex_reacquire_interpreter(void) * * DESCRIPTION: Exit the interpreter execution region. This is the top level * routine used to exit the interpreter when all processing has - * been completed. + * been completed, or when the method blocks. + * + * Cases where the interpreter is unlocked internally: + * 1) Method will be blocked on a Sleep() AML opcode + * 2) Method will be blocked on an Acquire() AML opcode + * 3) Method will be blocked on a Wait() AML opcode + * 4) Method will be blocked to acquire the global lock + * 5) Method will be blocked waiting to execute a serialized control + * method that is currently executing + * 6) About to invoke a user-installed opregion handler * ******************************************************************************/ @@ -160,44 +138,6 @@ void acpi_ex_exit_interpreter(void) /******************************************************************************* * - * FUNCTION: acpi_ex_relinquish_interpreter - * - * PARAMETERS: None - * - * RETURN: None - * - * DESCRIPTION: Exit the interpreter execution region, from within the - * interpreter - before attempting an operation that will possibly - * block the running thread. - * - * Cases where the interpreter is unlocked internally - * 1) Method to be blocked on a Sleep() AML opcode - * 2) Method to be blocked on an Acquire() AML opcode - * 3) Method to be blocked on a Wait() AML opcode - * 4) Method to be blocked to acquire the global lock - * 5) Method to be blocked waiting to execute a serialized control method - * that is currently executing - * 6) About to invoke a user-installed opregion handler - * - ******************************************************************************/ - -void acpi_ex_relinquish_interpreter(void) -{ - ACPI_FUNCTION_TRACE(ex_relinquish_interpreter); - - /* - * If the global serialized flag is set, do not release the interpreter. - * This forces the interpreter to be single threaded. - */ - if (!acpi_gbl_all_methods_serialized) { - acpi_ex_exit_interpreter(); - } - - return_VOID; -} - -/******************************************************************************* - * * FUNCTION: acpi_ex_truncate_for32bit_table * * PARAMETERS: obj_desc - Object to be truncated diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c index 5b74677bf74..a3fb7e4c080 100644 --- a/drivers/acpi/acpica/nsinit.c +++ b/drivers/acpi/acpica/nsinit.c @@ -111,9 +111,8 @@ acpi_status acpi_ns_initialize_objects(void) info.object_count)); ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, - "%u Control Methods found\n", info.method_count)); - ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, - "%u Op Regions found\n", info.op_region_count)); + "%u Control Methods found\n%u Op Regions found\n", + info.method_count, info.op_region_count)); return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c index 7ae521ce8d3..7c9d0181f34 100644 --- a/drivers/acpi/acpica/nsload.c +++ b/drivers/acpi/acpica/nsload.c @@ -128,12 +128,12 @@ unlock: * parse trees. */ ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "**** Begin Table Method Parsing and Object Initialization\n")); + "**** Begin Table Object Initialization\n")); status = acpi_ds_initialize_objects(table_index, node); ACPI_DEBUG_PRINT((ACPI_DB_INFO, - "**** Completed Table Method Parsing and Object Initialization\n")); + "**** Completed Table Object Initialization\n")); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c index 646d1a3f6e2..b058e2390fd 100644 --- a/drivers/acpi/acpica/psloop.c +++ b/drivers/acpi/acpica/psloop.c @@ -480,6 +480,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state) status = AE_OK; } + if (status == AE_CTRL_TERMINATE) { + return_ACPI_STATUS(status); + } + status = acpi_ps_complete_op(walk_state, &op, status); diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index af1f46cd37a..a6885077d59 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -219,7 +219,10 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, status = walk_state->descending_callback(walk_state, op); if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "During name lookup/catalog")); + if (status != AE_CTRL_TERMINATE) { + ACPI_EXCEPTION((AE_INFO, status, + "During name lookup/catalog")); + } return_ACPI_STATUS(status); } @@ -230,7 +233,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state, status = acpi_ps_next_parse_state(walk_state, *op, status); if (ACPI_FAILURE(status)) { if (status == AE_CTRL_PENDING) { - return_ACPI_STATUS(AE_CTRL_PARSE_PENDING); + status = AE_CTRL_PARSE_PENDING; } return_ACPI_STATUS(status); } diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 27f84af4e33..f7fd72ac69c 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -1537,17 +1537,21 @@ static int __init osi_setup(char *str) __setup("acpi_osi=", osi_setup); -/* enable serialization to combat AE_ALREADY_EXISTS errors */ -static int __init acpi_serialize_setup(char *str) +/* + * Disable the auto-serialization of named objects creation methods. + * + * This feature is enabled by default. It marks the AML control methods + * that contain the opcodes to create named objects as "Serialized". + */ +static int __init acpi_no_auto_serialize_setup(char *str) { - printk(KERN_INFO PREFIX "serialize enabled\n"); - - acpi_gbl_all_methods_serialized = TRUE; + acpi_gbl_auto_serialize_methods = FALSE; + pr_info("ACPI: auto-serialization disabled\n"); return 1; } -__setup("acpi_serialize", acpi_serialize_setup); +__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); /* Check of resource interference between native drivers and ACPI * OperationRegions (SystemIO and System Memory only). diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 86d73d5d503..71e2065639a 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -70,28 +70,6 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, return 0; } -static int map_gic_id(struct acpi_subtable_header *entry, - int device_declaration, u32 acpi_id, int *apic_id) -{ - struct acpi_madt_generic_interrupt *gic = - (struct acpi_madt_generic_interrupt *)entry; - - if (!(gic->flags & ACPI_MADT_ENABLED)) - return -ENODEV; - - /* - * In the GIC interrupt model, logical processors are - * required to have a Processor Device object in the DSDT, - * so we should check device_declaration here - */ - if (device_declaration && (gic->uid == acpi_id)) { - *apic_id = gic->gic_id; - return 0; - } - - return -EINVAL; -} - static int map_madt_entry(int type, u32 acpi_id) { unsigned long madt_end, entry; @@ -127,9 +105,6 @@ static int map_madt_entry(int type, u32 acpi_id) } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { if (!map_lsapic_id(header, type, acpi_id, &apic_id)) break; - } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { - if (!map_gic_id(header, type, acpi_id, &apic_id)) - break; } entry += header->length; } @@ -160,8 +135,6 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) map_lapic_id(header, acpi_id, &apic_id); } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { map_lsapic_id(header, type, acpi_id, &apic_id); - } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { - map_gic_id(header, type, acpi_id, &apic_id); } exit: diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index a2e55bfdf57..96a92db83ca 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c @@ -285,7 +285,7 @@ int pm_generic_restore(struct device *dev) EXPORT_SYMBOL_GPL(pm_generic_restore); /** - * pm_generic_complete - Generic routine competing a device power transition. + * pm_generic_complete - Generic routine completing a device power transition. * @dev: Device to handle. * * Complete a device power transition during a system-wide power transition. diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c index a05b876f375..bc447b9003c 100644 --- a/drivers/cpufreq/cpufreq-nforce2.c +++ b/drivers/cpufreq/cpufreq-nforce2.c @@ -270,7 +270,7 @@ static int nforce2_target(struct cpufreq_policy *policy, pr_debug("Old CPU frequency %d kHz, new %d kHz\n", freqs.old, freqs.new); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Disable IRQs */ /* local_irq_save(flags); */ @@ -285,7 +285,7 @@ static int nforce2_target(struct cpufreq_policy *policy, /* Enable IRQs */ /* local_irq_restore(flags); */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3aa7a7a226b..abda6609d3e 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -331,16 +331,15 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, * function. It is called twice on all CPU frequency changes that have * external effects. */ -void cpufreq_notify_transition(struct cpufreq_policy *policy, +static void cpufreq_notify_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, unsigned int state) { for_each_cpu(freqs->cpu, policy->cpus) __cpufreq_notify_transition(policy, freqs, state); } -EXPORT_SYMBOL_GPL(cpufreq_notify_transition); /* Do post notifications when there are chances that transition has failed */ -void cpufreq_notify_post_transition(struct cpufreq_policy *policy, +static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int transition_failed) { cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); @@ -351,7 +350,41 @@ void cpufreq_notify_post_transition(struct cpufreq_policy *policy, cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); } -EXPORT_SYMBOL_GPL(cpufreq_notify_post_transition); + +void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ +wait: + wait_event(policy->transition_wait, !policy->transition_ongoing); + + spin_lock(&policy->transition_lock); + + if (unlikely(policy->transition_ongoing)) { + spin_unlock(&policy->transition_lock); + goto wait; + } + + policy->transition_ongoing = true; + + spin_unlock(&policy->transition_lock); + + cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin); + +void cpufreq_freq_transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, int transition_failed) +{ + if (unlikely(WARN_ON(!policy->transition_ongoing))) + return; + + cpufreq_notify_post_transition(policy, freqs, transition_failed); + + policy->transition_ongoing = false; + + wake_up(&policy->transition_wait); +} +EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end); /********************************************************************* @@ -985,6 +1018,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void) INIT_LIST_HEAD(&policy->policy_list); init_rwsem(&policy->rwsem); + spin_lock_init(&policy->transition_lock); + init_waitqueue_head(&policy->transition_wait); return policy; @@ -1470,8 +1505,8 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, policy = per_cpu(cpufreq_cpu_data, cpu); read_unlock_irqrestore(&cpufreq_driver_lock, flags); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_begin(policy, &freqs); + cpufreq_freq_transition_end(policy, &freqs, 0); } /** @@ -1652,14 +1687,13 @@ void cpufreq_resume(void) cpufreq_suspended = false; list_for_each_entry(policy, &cpufreq_policy_list, policy_list) { - if (__cpufreq_governor(policy, CPUFREQ_GOV_START) + if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) + pr_err("%s: Failed to resume driver: %p\n", __func__, + policy); + else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) pr_err("%s: Failed to start governor for policy: %p\n", __func__, policy); - else if (cpufreq_driver->resume - && cpufreq_driver->resume(policy)) - pr_err("%s: Failed to resume driver: %p\n", __func__, - policy); /* * schedule call cpufreq_update_policy() for boot CPU, i.e. last @@ -1832,8 +1866,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", __func__, policy->cpu, freqs.old, freqs.new); - cpufreq_notify_transition(policy, &freqs, - CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); } retval = cpufreq_driver->target_index(policy, index); @@ -1842,7 +1875,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, __func__, retval); if (notify) - cpufreq_notify_post_transition(policy, &freqs, retval); + cpufreq_freq_transition_end(policy, &freqs, retval); } out: diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c index 7f776aa91e2..a6b8214d7b7 100644 --- a/drivers/cpufreq/exynos5440-cpufreq.c +++ b/drivers/cpufreq/exynos5440-cpufreq.c @@ -219,7 +219,7 @@ static int exynos_target(struct cpufreq_policy *policy, unsigned int index) freqs.old = policy->cur; freqs.new = freq_table[index].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Set the target frequency in all C0_3_PSTATE register */ for_each_cpu(i, policy->cpus) { @@ -258,7 +258,7 @@ static void exynos_cpufreq_work(struct work_struct *work) dev_crit(dvfs_info->dev, "New frequency out of range\n"); freqs.new = freqs.old; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); cpufreq_cpu_put(policy); mutex_unlock(&cpufreq_lock); diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c index d83e8266a58..1d723dc8880 100644 --- a/drivers/cpufreq/gx-suspmod.c +++ b/drivers/cpufreq/gx-suspmod.c @@ -265,7 +265,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) freqs.new = new_khz; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); local_irq_save(flags); if (new_khz != stock_freq) { @@ -314,7 +314,7 @@ static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) gx_params->pci_suscfg = suscfg; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); pr_debug("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); diff --git a/drivers/cpufreq/integrator-cpufreq.c b/drivers/cpufreq/integrator-cpufreq.c index 0e27844e8c2..e5122f1bfe7 100644 --- a/drivers/cpufreq/integrator-cpufreq.c +++ b/drivers/cpufreq/integrator-cpufreq.c @@ -122,7 +122,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, return 0; } - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); cm_osc = __raw_readl(cm_base + INTEGRATOR_HDR_OSC_OFFSET); @@ -143,7 +143,7 @@ static int integrator_set_target(struct cpufreq_policy *policy, */ set_cpus_allowed(current, cpus_allowed); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index bcb9a6d0ae1..099967302bf 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -778,7 +778,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) pr_info("intel_pstate CPU %d exiting\n", cpu_num); - del_timer(&all_cpu_data[cpu_num]->timer); + del_timer_sync(&all_cpu_data[cpu_num]->timer); intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); kfree(all_cpu_data[cpu_num]); all_cpu_data[cpu_num] = NULL; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 7b94da3d2d1..5c440f87ba8 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -269,7 +269,7 @@ static void longhaul_setstate(struct cpufreq_policy *policy, freqs.old = calc_speed(longhaul_get_cpu_mult()); freqs.new = speed; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n", fsb, mult/10, mult%10, print_speed(speed/1000)); @@ -386,7 +386,7 @@ retry_loop: } } /* Report true CPU frequency */ - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); if (!bm_timeout) printk(KERN_INFO PFX "Warning: Timeout while waiting for " diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index 1c0f1067af7..728a2d87949 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -215,7 +215,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = target_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); input_buffer = 0x1 | (((target_freq * 100) / (ioread32(&pcch_hdr->nominal) * 1000)) << 8); @@ -231,7 +231,7 @@ static int pcc_cpufreq_target(struct cpufreq_policy *policy, status = ioread16(&pcch_hdr->status); iowrite16(0, &pcch_hdr->status); - cpufreq_notify_post_transition(policy, &freqs, status != CMD_COMPLETE); + cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE); spin_unlock(&pcc_lock); if (status != CMD_COMPLETE) { diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c index ce27e6c26c9..62c6f2e5afc 100644 --- a/drivers/cpufreq/powernow-k6.c +++ b/drivers/cpufreq/powernow-k6.c @@ -148,11 +148,11 @@ static int powernow_k6_target(struct cpufreq_policy *policy, freqs.old = busfreq * powernow_k6_get_cpu_multiplier(); freqs.new = busfreq * clock_ratio[best_i].driver_data; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); powernow_k6_set_cpu_multiplier(best_i); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c index 0e68e027562..f911645c3f6 100644 --- a/drivers/cpufreq/powernow-k7.c +++ b/drivers/cpufreq/powernow-k7.c @@ -269,7 +269,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) freqs.new = powernow_table[index].frequency; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); /* Now do the magic poking into the MSRs. */ @@ -290,7 +290,7 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index) if (have_a0 == 1) local_irq_enable(); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); return 0; } diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 27eb2be44de..770a9e1b346 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -963,9 +963,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, policy = cpufreq_cpu_get(smp_processor_id()); cpufreq_cpu_put(policy); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); res = transition_fid_vid(data, fid, vid); - cpufreq_notify_post_transition(policy, &freqs, res); + cpufreq_freq_transition_end(policy, &freqs, res); return res; } diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 25069741b50..a3dc192d21f 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -217,7 +217,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, s3c_cpufreq_updateclk(clk_pclk, cpu_new.freq.pclk); /* start the frequency change */ - cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs.freqs); /* If hclk is staying the same, then we do not need to * re-write the IO or the refresh timings whilst we are changing @@ -261,7 +261,7 @@ static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, local_irq_restore(flags); /* notify everyone we've done this */ - cpufreq_notify_transition(policy, &freqs.freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs.freqs, 0); s3c_freq_dbg("%s: finished\n", __func__); return 0; diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 696170ebd3a..86628e22b2a 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -68,10 +68,10 @@ static int sh_cpufreq_target(struct cpufreq_policy *policy, freqs.new = (freq + 500) / 1000; freqs.flags = 0; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); set_cpus_allowed_ptr(current, &cpus_allowed); clk_set_rate(cpuclk, freq); - cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE); + cpufreq_freq_transition_end(policy, &freqs, 0); dev_dbg(dev, "set frequency %lu Hz\n", freq); diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c index 36cc330b874..13be802b617 100644 --- a/drivers/cpufreq/unicore2-cpufreq.c +++ b/drivers/cpufreq/unicore2-cpufreq.c @@ -44,9 +44,9 @@ static int ucv2_target(struct cpufreq_policy *policy, freqs.old = policy->cur; freqs.new = target_freq; - cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE); + cpufreq_freq_transition_begin(policy, &freqs); ret = clk_set_rate(policy->mclk, target_freq * 1000); - cpufreq_notify_post_transition(policy, &freqs, ret); + cpufreq_freq_transition_end(policy, &freqs, ret); return ret; } diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index bacddd102ae..01712cbfd92 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -385,7 +385,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res) * device is active because it itself may be in use */ if (!dev->active) { if (request_irq(*irq, pnp_test_handler, - IRQF_DISABLED | IRQF_PROBE_SHARED, "pnp", NULL)) + IRQF_PROBE_SHARED, "pnp", NULL)) return 0; free_irq(*irq, NULL); } diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index b0b01b13ea9..44f5e974960 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h @@ -71,7 +71,7 @@ extern u32 acpi_dbg_layer; /* ACPICA runtime options */ -extern u8 acpi_gbl_all_methods_serialized; +extern u8 acpi_gbl_auto_serialize_methods; extern u8 acpi_gbl_copy_dsdt_locally; extern u8 acpi_gbl_create_osi_method; extern u8 acpi_gbl_disable_auto_repair; diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 2d2e62c8666..c48e595f623 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -16,6 +16,7 @@ #include <linux/completion.h> #include <linux/kobject.h> #include <linux/notifier.h> +#include <linux/spinlock.h> #include <linux/sysfs.h> /********************************************************************* @@ -104,6 +105,11 @@ struct cpufreq_policy { * __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); */ struct rw_semaphore rwsem; + + /* Synchronization for frequency transitions */ + bool transition_ongoing; /* Tracks transition status */ + spinlock_t transition_lock; + wait_queue_head_t transition_wait; }; /* Only for ACPI */ @@ -333,9 +339,9 @@ static inline void cpufreq_resume(void) {} int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list); -void cpufreq_notify_transition(struct cpufreq_policy *policy, - struct cpufreq_freqs *freqs, unsigned int state); -void cpufreq_notify_post_transition(struct cpufreq_policy *policy, +void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs); +void cpufreq_freq_transition_end(struct cpufreq_policy *policy, struct cpufreq_freqs *freqs, int transition_failed); #else /* CONFIG_CPU_FREQ */ |