diff options
Diffstat (limited to 'drivers/acpi/osl.c')
| -rw-r--r-- | drivers/acpi/osl.c | 476 |
1 files changed, 392 insertions, 84 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 9eaf708f588..bad25b070fe 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -39,7 +39,6 @@ #include <linux/workqueue.h> #include <linux/nmi.h> #include <linux/acpi.h> -#include <linux/acpi_io.h> #include <linux/efi.h> #include <linux/ioport.h> #include <linux/list.h> @@ -49,18 +48,15 @@ #include <asm/io.h> #include <asm/uaccess.h> -#include <acpi/acpi.h> -#include <acpi/acpi_bus.h> -#include <acpi/processor.h> +#include "internal.h" #define _COMPONENT ACPI_OS_SERVICES ACPI_MODULE_NAME("osl"); -#define PREFIX "ACPI: " + struct acpi_os_dpc { acpi_osd_exec_callback function; void *context; struct work_struct work; - int wait; }; #ifdef CONFIG_ACPI_CUSTOM_DSDT @@ -79,13 +75,14 @@ extern char line_buf[80]; static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl, u32 pm1b_ctrl); +static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a, + u32 val_b); static acpi_osd_handler acpi_irq_handler; static void *acpi_irq_context; static struct workqueue_struct *kacpid_wq; static struct workqueue_struct *kacpi_notify_wq; -struct workqueue_struct *kacpi_hotplug_wq; -EXPORT_SYMBOL(kacpi_hotplug_wq); +static struct workqueue_struct *kacpi_hotplug_wq; /* * This list of permanent mappings is for memory that may be accessed from @@ -141,7 +138,8 @@ static struct osi_linux { unsigned int enable:1; unsigned int dmi:1; unsigned int cmdline:1; -} osi_linux = {0, 0, 0}; + unsigned int default_disabling:1; +} osi_linux = {0, 0, 0, 0}; static u32 acpi_osi_handler(acpi_string interface, u32 supported) { @@ -237,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args) static unsigned long acpi_rsdp; static int __init setup_acpi_rsdp(char *arg) { - acpi_rsdp = simple_strtoul(arg, NULL, 16); + if (kstrtoul(arg, 16, &acpi_rsdp)) + return -EINVAL; return 0; } early_param("acpi_rsdp", setup_acpi_rsdp); @@ -250,7 +249,7 @@ acpi_physical_address __init acpi_os_get_root_pointer(void) return acpi_rsdp; #endif - if (efi_enabled) { + if (efi_enabled(EFI_CONFIG_TABLES)) { if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) return efi.acpi20; else if (efi.acpi != EFI_INVALID_TABLE_ADDR) @@ -357,7 +356,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) } void __iomem *__init_refok -acpi_os_map_memory(acpi_physical_address phys, acpi_size size) +acpi_os_map_iomem(acpi_physical_address phys, acpi_size size) { struct acpi_ioremap *map; void __iomem *virt; @@ -403,10 +402,17 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) list_add_tail_rcu(&map->list, &acpi_ioremaps); - out: +out: mutex_unlock(&acpi_ioremap_lock); return map->virt + (phys - map->phys); } +EXPORT_SYMBOL_GPL(acpi_os_map_iomem); + +void *__init_refok +acpi_os_map_memory(acpi_physical_address phys, acpi_size size) +{ + return (void *)acpi_os_map_iomem(phys, size); +} EXPORT_SYMBOL_GPL(acpi_os_map_memory); static void acpi_os_drop_map_ref(struct acpi_ioremap *map) @@ -424,7 +430,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) } } -void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) +void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size) { struct acpi_ioremap *map; @@ -445,6 +451,12 @@ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size) acpi_os_map_cleanup(map); } +EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem); + +void __ref acpi_os_unmap_memory(void *virt, acpi_size size) +{ + return acpi_os_unmap_iomem((void __iomem *)virt, size); +} EXPORT_SYMBOL_GPL(acpi_os_unmap_memory); void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size) @@ -466,7 +478,7 @@ int acpi_os_map_generic_address(struct acpi_generic_address *gas) if (!addr || !gas->bit_width) return -EINVAL; - virt = acpi_os_map_memory(addr, gas->bit_width / 8); + virt = acpi_os_map_iomem(addr, gas->bit_width / 8); if (!virt) return -EIO; @@ -534,6 +546,161 @@ acpi_os_predefined_override(const struct acpi_predefined_names *init_val, return AE_OK; } +#ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE +#include <linux/earlycpio.h> +#include <linux/memblock.h> + +static u64 acpi_tables_addr; +static int all_tables_size; + +/* Copied from acpica/tbutils.c:acpi_tb_checksum() */ +static u8 __init acpi_table_checksum(u8 *buffer, u32 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) + sum = (u8) (sum + *(buffer++)); + return sum; +} + +/* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */ +static const char * const table_sigs[] = { + ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ, + ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT, + ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF, + ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET, + ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI, + ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA, + ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT, + ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, + ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL }; + +#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) + +#define ACPI_OVERRIDE_TABLES 64 +static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES]; + +#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT) + +void __init acpi_initrd_override(void *data, size_t size) +{ + int sig, no, table_nr = 0, total_offset = 0; + long offset = 0; + struct acpi_table_header *table; + char cpio_path[32] = "kernel/firmware/acpi/"; + struct cpio_data file; + + if (data == NULL || size == 0) + return; + + for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) { + file = find_cpio_data(cpio_path, data, size, &offset); + if (!file.data) + break; + + data += offset; + size -= offset; + + if (file.size < sizeof(struct acpi_table_header)) { + pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n", + cpio_path, file.name); + continue; + } + + table = file.data; + + for (sig = 0; table_sigs[sig]; sig++) + if (!memcmp(table->signature, table_sigs[sig], 4)) + break; + + if (!table_sigs[sig]) { + pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n", + cpio_path, file.name); + continue; + } + if (file.size != table->length) { + pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n", + cpio_path, file.name); + continue; + } + if (acpi_table_checksum(file.data, table->length)) { + pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n", + cpio_path, file.name); + continue; + } + + pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n", + table->signature, cpio_path, file.name, table->length); + + all_tables_size += table->length; + acpi_initrd_files[table_nr].data = file.data; + acpi_initrd_files[table_nr].size = file.size; + table_nr++; + } + if (table_nr == 0) + return; + + acpi_tables_addr = + memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT, + all_tables_size, PAGE_SIZE); + if (!acpi_tables_addr) { + WARN_ON(1); + return; + } + /* + * Only calling e820_add_reserve does not work and the + * tables are invalid (memory got used) later. + * memblock_reserve works as expected and the tables won't get modified. + * But it's not enough on X86 because ioremap will + * complain later (used by acpi_os_map_memory) that the pages + * that should get mapped are not marked "reserved". + * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area) + * works fine. + */ + memblock_reserve(acpi_tables_addr, all_tables_size); + arch_reserve_mem_area(acpi_tables_addr, all_tables_size); + + /* + * early_ioremap only can remap 256k one time. If we map all + * tables one time, we will hit the limit. Need to map chunks + * one by one during copying the same as that in relocate_initrd(). + */ + for (no = 0; no < table_nr; no++) { + unsigned char *src_p = acpi_initrd_files[no].data; + phys_addr_t size = acpi_initrd_files[no].size; + phys_addr_t dest_addr = acpi_tables_addr + total_offset; + phys_addr_t slop, clen; + char *dest_p; + + total_offset += size; + + while (size) { + slop = dest_addr & ~PAGE_MASK; + clen = size; + if (clen > MAP_CHUNK_SIZE - slop) + clen = MAP_CHUNK_SIZE - slop; + dest_p = early_ioremap(dest_addr & PAGE_MASK, + clen + slop); + memcpy(dest_p + slop, src_p, clen); + early_iounmap(dest_p, clen + slop); + src_p += clen; + dest_addr += clen; + size -= clen; + } + } +} +#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */ + +static void acpi_table_taint(struct acpi_table_header *table) +{ + pr_warn(PREFIX + "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n", + table->signature, table->oem_table_id); + add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE); +} + + acpi_status acpi_os_table_override(struct acpi_table_header * existing_table, struct acpi_table_header ** new_table) @@ -547,24 +714,73 @@ acpi_os_table_override(struct acpi_table_header * existing_table, if (strncmp(existing_table->signature, "DSDT", 4) == 0) *new_table = (struct acpi_table_header *)AmlCode; #endif - if (*new_table != NULL) { - printk(KERN_WARNING PREFIX "Override [%4.4s-%8.8s], " - "this is unsafe: tainting kernel\n", - existing_table->signature, - existing_table->oem_table_id); - add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); - } + if (*new_table != NULL) + acpi_table_taint(existing_table); return AE_OK; } acpi_status acpi_os_physical_table_override(struct acpi_table_header *existing_table, - acpi_physical_address * new_address, - u32 *new_table_length) + acpi_physical_address *address, + u32 *table_length) { - return AE_SUPPORT; -} +#ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE + *table_length = 0; + *address = 0; + return AE_OK; +#else + int table_offset = 0; + struct acpi_table_header *table; + + *table_length = 0; + *address = 0; + + if (!acpi_tables_addr) + return AE_OK; + + do { + if (table_offset + ACPI_HEADER_SIZE > all_tables_size) { + WARN_ON(1); + return AE_OK; + } + + table = acpi_os_map_memory(acpi_tables_addr + table_offset, + ACPI_HEADER_SIZE); + + if (table_offset + table->length > all_tables_size) { + acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); + WARN_ON(1); + return AE_OK; + } + table_offset += table->length; + + if (memcmp(existing_table->signature, table->signature, 4)) { + acpi_os_unmap_memory(table, + ACPI_HEADER_SIZE); + continue; + } + + /* Only override tables with matching oem id */ + if (memcmp(table->oem_table_id, existing_table->oem_table_id, + ACPI_OEM_TABLE_ID_SIZE)) { + acpi_os_unmap_memory(table, + ACPI_HEADER_SIZE); + continue; + } + + table_offset -= table->length; + *table_length = table->length; + acpi_os_unmap_memory(table, ACPI_HEADER_SIZE); + *address = acpi_tables_addr + table_offset; + break; + } while (table_offset + ACPI_HEADER_SIZE < all_tables_size); + + if (*address != 0) + acpi_table_taint(existing_table); + return AE_OK; +#endif +} static irqreturn_t acpi_irq(int irq, void *dev_id) { @@ -607,7 +823,7 @@ acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler, acpi_irq_handler = handler; acpi_irq_context = context; - if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) { + if (request_irq(irq, acpi_irq, IRQF_SHARED | IRQF_NO_SUSPEND, "acpi", acpi_irq)) { printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq); acpi_irq_handler = NULL; return AE_NOT_ACQUIRED; @@ -633,7 +849,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler) void acpi_os_sleep(u64 ms) { - schedule_timeout_interruptible(msecs_to_jiffies(ms)); + msleep(ms); } void acpi_os_stall(u32 us) @@ -656,19 +872,9 @@ void acpi_os_stall(u32 us) */ u64 acpi_os_get_timer(void) { - static u64 t; - -#ifdef CONFIG_HPET - /* TBD: use HPET if available */ -#endif - -#ifdef CONFIG_X86_PM_TIMER - /* TBD: default to PM timer if HPET was not available */ -#endif - if (!t) - printk(KERN_ERR PREFIX "acpi_os_get_timer() TBD\n"); - - return ++t; + u64 time_ns = ktime_to_ns(ktime_get()); + do_div(time_ns, 100); + return time_ns; } acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width) @@ -890,9 +1096,6 @@ static void acpi_os_execute_deferred(struct work_struct *work) { struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work); - if (dpc->wait) - acpi_os_wait_events_complete(); - dpc->function(dpc->context); kfree(dpc); } @@ -912,8 +1115,8 @@ static void acpi_os_execute_deferred(struct work_struct *work) * ******************************************************************************/ -static acpi_status __acpi_os_execute(acpi_execute_type type, - acpi_osd_exec_callback function, void *context, int hp) +acpi_status acpi_os_execute(acpi_execute_type type, + acpi_osd_exec_callback function, void *context) { acpi_status status = AE_OK; struct acpi_os_dpc *dpc; @@ -932,7 +1135,7 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, * having a static work_struct. */ - dpc = kmalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); + dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); if (!dpc) return AE_NO_MEMORY; @@ -940,21 +1143,17 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, dpc->context = context; /* - * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq - * because the hotplug code may call driver .remove() functions, - * which invoke flush_scheduled_work/acpi_os_wait_events_complete - * to flush these workqueues. + * To prevent lockdep from complaining unnecessarily, make sure that + * there is a different static lockdep key for each workqueue by using + * INIT_WORK() for each of them separately. */ - queue = hp ? kacpi_hotplug_wq : - (type == OSL_NOTIFY_HANDLER ? kacpi_notify_wq : kacpid_wq); - dpc->wait = hp ? 1 : 0; - - if (queue == kacpi_hotplug_wq) - INIT_WORK(&dpc->work, acpi_os_execute_deferred); - else if (queue == kacpi_notify_wq) + if (type == OSL_NOTIFY_HANDLER) { + queue = kacpi_notify_wq; INIT_WORK(&dpc->work, acpi_os_execute_deferred); - else + } else { + queue = kacpid_wq; INIT_WORK(&dpc->work, acpi_os_execute_deferred); + } /* * On some machines, a software-initiated SMI causes corruption unless @@ -973,37 +1172,70 @@ static acpi_status __acpi_os_execute(acpi_execute_type type, } return status; } +EXPORT_SYMBOL(acpi_os_execute); -acpi_status acpi_os_execute(acpi_execute_type type, - acpi_osd_exec_callback function, void *context) +void acpi_os_wait_events_complete(void) { - return __acpi_os_execute(type, function, context, 0); + flush_workqueue(kacpid_wq); + flush_workqueue(kacpi_notify_wq); } -EXPORT_SYMBOL(acpi_os_execute); -acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function, - void *context) +struct acpi_hp_work { + struct work_struct work; + struct acpi_device *adev; + u32 src; +}; + +static void acpi_hotplug_work_fn(struct work_struct *work) { - return __acpi_os_execute(0, function, context, 1); + struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work); + + acpi_os_wait_events_complete(); + acpi_device_hotplug(hpw->adev, hpw->src); + kfree(hpw); } -void acpi_os_wait_events_complete(void) +acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src) { - flush_workqueue(kacpid_wq); - flush_workqueue(kacpi_notify_wq); + struct acpi_hp_work *hpw; + + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "Scheduling hotplug event (%p, %u) for deferred execution.\n", + adev, src)); + + hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); + if (!hpw) + return AE_NO_MEMORY; + + INIT_WORK(&hpw->work, acpi_hotplug_work_fn); + hpw->adev = adev; + hpw->src = src; + /* + * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because + * the hotplug code may call driver .remove() functions, which may + * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush + * these workqueues. + */ + if (!queue_work(kacpi_hotplug_wq, &hpw->work)) { + kfree(hpw); + return AE_ERROR; + } + return AE_OK; } -EXPORT_SYMBOL(acpi_os_wait_events_complete); +bool acpi_queue_hotplug_work(struct work_struct *work) +{ + return queue_work(kacpi_hotplug_wq, work); +} acpi_status acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle) { struct semaphore *sem = NULL; - sem = acpi_os_allocate(sizeof(struct semaphore)); + sem = acpi_os_allocate_zeroed(sizeof(struct semaphore)); if (!sem) return AE_NO_MEMORY; - memset(sem, 0, sizeof(struct semaphore)); sema_init(sem, initial_units); @@ -1061,7 +1293,7 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout) jiffies = MAX_SCHEDULE_TIMEOUT; else jiffies = msecs_to_jiffies(timeout); - + ret = down_timeout(sem, jiffies); if (ret) status = AE_TIME; @@ -1152,7 +1384,7 @@ static int __init acpi_os_name_setup(char *str) if (!str || !*str) return 0; - for (; count-- && str && *str; str++) { + for (; count-- && *str; str++) { if (isalnum(*str) || *str == ' ' || *str == ':') *p++ = *str; else if (*str == '\'' || *str == '"') @@ -1176,8 +1408,8 @@ struct osi_setup_entry { bool enable; }; -static struct osi_setup_entry __initdata - osi_setup_entries[OSI_STRING_ENTRIES_MAX] = { +static struct osi_setup_entry + osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = { {"Module Device", true}, {"Processor Device", true}, {"3.0 _SCP Extensions", true}, @@ -1201,6 +1433,17 @@ void __init acpi_osi_setup(char *str) if (*str == '!') { str++; + if (*str == '\0') { + osi_linux.default_disabling = 1; + return; + } else if (*str == '*') { + acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS); + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { + osi = &osi_setup_entries[i]; + osi->enable = false; + } + return; + } enable = false; } @@ -1266,6 +1509,13 @@ static void __init acpi_osi_setup_late(void) int i; acpi_status status; + if (osi_linux.default_disabling) { + status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS); + + if (ACPI_SUCCESS(status)) + printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n"); + } + for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) { osi = &osi_setup_entries[i]; str = osi->string; @@ -1300,17 +1550,21 @@ static int __init osi_setup(char *str) __setup("acpi_osi=", osi_setup); -/* enable serialization to combat AE_ALREADY_EXISTS errors */ -static int __init acpi_serialize_setup(char *str) +/* + * Disable the auto-serialization of named objects creation methods. + * + * This feature is enabled by default. It marks the AML control methods + * that contain the opcodes to create named objects as "Serialized". + */ +static int __init acpi_no_auto_serialize_setup(char *str) { - printk(KERN_INFO PREFIX "serialize enabled\n"); - - acpi_gbl_all_methods_serialized = TRUE; + acpi_gbl_auto_serialize_methods = FALSE; + pr_info("ACPI: auto-serialization disabled\n"); return 1; } -__setup("acpi_serialize", acpi_serialize_setup); +__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup); /* Check of resource interference between native drivers and ACPI * OperationRegions (SystemIO and System Memory only). @@ -1370,7 +1624,7 @@ int acpi_check_resource_conflict(const struct resource *res) else space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY; - length = res->end - res->start + 1; + length = resource_size(res); if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) warn = 1; clash = acpi_check_address_range(space_id, res->start, length, warn); @@ -1530,12 +1784,43 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object) } #endif +static int __init acpi_no_static_ssdt_setup(char *s) +{ + acpi_gbl_disable_ssdt_table_install = TRUE; + pr_info("ACPI: static SSDT installation disabled\n"); + + return 0; +} + +early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup); + +static int __init acpi_disable_return_repair(char *s) +{ + printk(KERN_NOTICE PREFIX + "ACPI: Predefined validation mechanism disabled\n"); + acpi_gbl_disable_auto_repair = TRUE; + + return 1; +} + +__setup("acpica_no_return_repair", acpi_disable_return_repair); + acpi_status __init acpi_os_initialize(void) { acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block); acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); + if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { + /* + * Use acpi_os_map_generic_address to pre-map the reset + * register if it's in system memory. + */ + int rv; + + rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register); + pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv); + } return AE_OK; } @@ -1544,7 +1829,7 @@ acpi_status __init acpi_os_initialize1(void) { kacpid_wq = alloc_workqueue("kacpid", 0, 1); kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1); - kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1); + kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0); BUG_ON(!kacpid_wq); BUG_ON(!kacpi_notify_wq); BUG_ON(!kacpi_hotplug_wq); @@ -1564,6 +1849,8 @@ acpi_status acpi_os_terminate(void) acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); + if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) + acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register); destroy_workqueue(kacpid_wq); destroy_workqueue(kacpi_notify_wq); @@ -1592,3 +1879,24 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, { __acpi_os_prepare_sleep = func; } + +acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, + u32 val_b) +{ + int rc = 0; + if (__acpi_os_prepare_extended_sleep) + rc = __acpi_os_prepare_extended_sleep(sleep_state, + val_a, val_b); + if (rc < 0) + return AE_ERROR; + else if (rc > 0) + return AE_CTRL_SKIP; + + return AE_OK; +} + +void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, + u32 val_a, u32 val_b)) +{ + __acpi_os_prepare_extended_sleep = func; +} |
