aboutsummaryrefslogtreecommitdiff
path: root/drivers/acpi/osl.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/osl.c')
-rw-r--r--drivers/acpi/osl.c233
1 files changed, 143 insertions, 90 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index e5f416c7f66..bad25b070fe 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -39,7 +39,6 @@
#include <linux/workqueue.h>
#include <linux/nmi.h>
#include <linux/acpi.h>
-#include <linux/acpi_io.h>
#include <linux/efi.h>
#include <linux/ioport.h>
#include <linux/list.h>
@@ -49,19 +48,15 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include <acpi/acpi.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/processor.h>
#include "internal.h"
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
-#define PREFIX "ACPI: "
+
struct acpi_os_dpc {
acpi_osd_exec_callback function;
void *context;
struct work_struct work;
- int wait;
};
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -240,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args)
static unsigned long acpi_rsdp;
static int __init setup_acpi_rsdp(char *arg)
{
- acpi_rsdp = simple_strtoul(arg, NULL, 16);
+ if (kstrtoul(arg, 16, &acpi_rsdp))
+ return -EINVAL;
return 0;
}
early_param("acpi_rsdp", setup_acpi_rsdp);
@@ -360,7 +356,7 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
}
void __iomem *__init_refok
-acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
void __iomem *virt;
@@ -406,10 +402,17 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
list_add_tail_rcu(&map->list, &acpi_ioremaps);
- out:
+out:
mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
+EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
+
+void *__init_refok
+acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
+{
+ return (void *)acpi_os_map_iomem(phys, size);
+}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
@@ -427,7 +430,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
}
}
-void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
+void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{
struct acpi_ioremap *map;
@@ -448,6 +451,12 @@ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
acpi_os_map_cleanup(map);
}
+EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
+
+void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
+{
+ return acpi_os_unmap_iomem((void __iomem *)virt, size);
+}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
@@ -469,7 +478,7 @@ int acpi_os_map_generic_address(struct acpi_generic_address *gas)
if (!addr || !gas->bit_width)
return -EINVAL;
- virt = acpi_os_map_memory(addr, gas->bit_width / 8);
+ virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
if (!virt)
return -EIO;
@@ -545,7 +554,7 @@ static u64 acpi_tables_addr;
static int all_tables_size;
/* Copied from acpica/tbutils.c:acpi_tb_checksum() */
-u8 __init acpi_table_checksum(u8 *buffer, u32 length)
+static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
{
u8 sum = 0;
u8 *end = buffer + length;
@@ -569,8 +578,10 @@ static const char * const table_sigs[] = {
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
-/* Must not increase 10 or needs code modification below */
-#define ACPI_OVERRIDE_TABLES 10
+#define ACPI_OVERRIDE_TABLES 64
+static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
+
+#define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
void __init acpi_initrd_override(void *data, size_t size)
{
@@ -579,8 +590,6 @@ void __init acpi_initrd_override(void *data, size_t size)
struct acpi_table_header *table;
char cpio_path[32] = "kernel/firmware/acpi/";
struct cpio_data file;
- struct cpio_data early_initrd_files[ACPI_OVERRIDE_TABLES];
- char *p;
if (data == NULL || size == 0)
return;
@@ -625,8 +634,8 @@ void __init acpi_initrd_override(void *data, size_t size)
table->signature, cpio_path, file.name, table->length);
all_tables_size += table->length;
- early_initrd_files[table_nr].data = file.data;
- early_initrd_files[table_nr].size = file.size;
+ acpi_initrd_files[table_nr].data = file.data;
+ acpi_initrd_files[table_nr].size = file.size;
table_nr++;
}
if (table_nr == 0)
@@ -652,14 +661,34 @@ void __init acpi_initrd_override(void *data, size_t size)
memblock_reserve(acpi_tables_addr, all_tables_size);
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
- p = early_ioremap(acpi_tables_addr, all_tables_size);
-
+ /*
+ * early_ioremap only can remap 256k one time. If we map all
+ * tables one time, we will hit the limit. Need to map chunks
+ * one by one during copying the same as that in relocate_initrd().
+ */
for (no = 0; no < table_nr; no++) {
- memcpy(p + total_offset, early_initrd_files[no].data,
- early_initrd_files[no].size);
- total_offset += early_initrd_files[no].size;
+ unsigned char *src_p = acpi_initrd_files[no].data;
+ phys_addr_t size = acpi_initrd_files[no].size;
+ phys_addr_t dest_addr = acpi_tables_addr + total_offset;
+ phys_addr_t slop, clen;
+ char *dest_p;
+
+ total_offset += size;
+
+ while (size) {
+ slop = dest_addr & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAP_CHUNK_SIZE - slop)
+ clen = MAP_CHUNK_SIZE - slop;
+ dest_p = early_ioremap(dest_addr & PAGE_MASK,
+ clen + slop);
+ memcpy(dest_p + slop, src_p, clen);
+ early_iounmap(dest_p, clen + slop);
+ src_p += clen;
+ dest_addr += clen;
+ size -= clen;
+ }
}
- early_iounmap(p, all_tables_size);
}
#endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
@@ -820,7 +849,7 @@ acpi_status acpi_os_remove_interrupt_handler(u32 irq, acpi_osd_handler handler)
void acpi_os_sleep(u64 ms)
{
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ msleep(ms);
}
void acpi_os_stall(u32 us)
@@ -1067,9 +1096,6 @@ static void acpi_os_execute_deferred(struct work_struct *work)
{
struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
- if (dpc->wait)
- acpi_os_wait_events_complete();
-
dpc->function(dpc->context);
kfree(dpc);
}
@@ -1089,8 +1115,8 @@ static void acpi_os_execute_deferred(struct work_struct *work)
*
******************************************************************************/
-static acpi_status __acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context, int hp)
+acpi_status acpi_os_execute(acpi_execute_type type,
+ acpi_osd_exec_callback function, void *context)
{
acpi_status status = AE_OK;
struct acpi_os_dpc *dpc;
@@ -1117,20 +1143,11 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
dpc->context = context;
/*
- * We can't run hotplug code in keventd_wq/kacpid_wq/kacpid_notify_wq
- * because the hotplug code may call driver .remove() functions,
- * which invoke flush_scheduled_work/acpi_os_wait_events_complete
- * to flush these workqueues.
- *
* To prevent lockdep from complaining unnecessarily, make sure that
* there is a different static lockdep key for each workqueue by using
* INIT_WORK() for each of them separately.
*/
- if (hp) {
- queue = kacpi_hotplug_wq;
- dpc->wait = 1;
- INIT_WORK(&dpc->work, acpi_os_execute_deferred);
- } else if (type == OSL_NOTIFY_HANDLER) {
+ if (type == OSL_NOTIFY_HANDLER) {
queue = kacpi_notify_wq;
INIT_WORK(&dpc->work, acpi_os_execute_deferred);
} else {
@@ -1155,38 +1172,70 @@ static acpi_status __acpi_os_execute(acpi_execute_type type,
}
return status;
}
+EXPORT_SYMBOL(acpi_os_execute);
-acpi_status acpi_os_execute(acpi_execute_type type,
- acpi_osd_exec_callback function, void *context)
+void acpi_os_wait_events_complete(void)
{
- return __acpi_os_execute(type, function, context, 0);
+ flush_workqueue(kacpid_wq);
+ flush_workqueue(kacpi_notify_wq);
}
-EXPORT_SYMBOL(acpi_os_execute);
-acpi_status acpi_os_hotplug_execute(acpi_osd_exec_callback function,
- void *context)
+struct acpi_hp_work {
+ struct work_struct work;
+ struct acpi_device *adev;
+ u32 src;
+};
+
+static void acpi_hotplug_work_fn(struct work_struct *work)
{
- return __acpi_os_execute(0, function, context, 1);
+ struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
+
+ acpi_os_wait_events_complete();
+ acpi_device_hotplug(hpw->adev, hpw->src);
+ kfree(hpw);
}
-EXPORT_SYMBOL(acpi_os_hotplug_execute);
-void acpi_os_wait_events_complete(void)
+acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
{
- flush_workqueue(kacpid_wq);
- flush_workqueue(kacpi_notify_wq);
+ struct acpi_hp_work *hpw;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+ "Scheduling hotplug event (%p, %u) for deferred execution.\n",
+ adev, src));
+
+ hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
+ if (!hpw)
+ return AE_NO_MEMORY;
+
+ INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
+ hpw->adev = adev;
+ hpw->src = src;
+ /*
+ * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
+ * the hotplug code may call driver .remove() functions, which may
+ * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
+ * these workqueues.
+ */
+ if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
+ kfree(hpw);
+ return AE_ERROR;
+ }
+ return AE_OK;
}
-EXPORT_SYMBOL(acpi_os_wait_events_complete);
+bool acpi_queue_hotplug_work(struct work_struct *work)
+{
+ return queue_work(kacpi_hotplug_wq, work);
+}
acpi_status
acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
{
struct semaphore *sem = NULL;
- sem = acpi_os_allocate(sizeof(struct semaphore));
+ sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
if (!sem)
return AE_NO_MEMORY;
- memset(sem, 0, sizeof(struct semaphore));
sema_init(sem, initial_units);
@@ -1244,7 +1293,7 @@ acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
jiffies = MAX_SCHEDULE_TIMEOUT;
else
jiffies = msecs_to_jiffies(timeout);
-
+
ret = down_timeout(sem, jiffies);
if (ret)
status = AE_TIME;
@@ -1335,7 +1384,7 @@ static int __init acpi_os_name_setup(char *str)
if (!str || !*str)
return 0;
- for (; count-- && str && *str; str++) {
+ for (; count-- && *str; str++) {
if (isalnum(*str) || *str == ' ' || *str == ':')
*p++ = *str;
else if (*str == '\'' || *str == '"')
@@ -1501,17 +1550,21 @@ static int __init osi_setup(char *str)
__setup("acpi_osi=", osi_setup);
-/* enable serialization to combat AE_ALREADY_EXISTS errors */
-static int __init acpi_serialize_setup(char *str)
+/*
+ * Disable the auto-serialization of named objects creation methods.
+ *
+ * This feature is enabled by default. It marks the AML control methods
+ * that contain the opcodes to create named objects as "Serialized".
+ */
+static int __init acpi_no_auto_serialize_setup(char *str)
{
- printk(KERN_INFO PREFIX "serialize enabled\n");
-
- acpi_gbl_all_methods_serialized = TRUE;
+ acpi_gbl_auto_serialize_methods = FALSE;
+ pr_info("ACPI: auto-serialization disabled\n");
return 1;
}
-__setup("acpi_serialize", acpi_serialize_setup);
+__setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
/* Check of resource interference between native drivers and ACPI
* OperationRegions (SystemIO and System Memory only).
@@ -1731,16 +1784,26 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
}
#endif
-static int __init acpi_no_auto_ssdt_setup(char *s)
+static int __init acpi_no_static_ssdt_setup(char *s)
{
- printk(KERN_NOTICE PREFIX "SSDT auto-load disabled\n");
+ acpi_gbl_disable_ssdt_table_install = TRUE;
+ pr_info("ACPI: static SSDT installation disabled\n");
+
+ return 0;
+}
+
+early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
- acpi_gbl_disable_ssdt_table_load = TRUE;
+static int __init acpi_disable_return_repair(char *s)
+{
+ printk(KERN_NOTICE PREFIX
+ "ACPI: Predefined validation mechanism disabled\n");
+ acpi_gbl_disable_auto_repair = TRUE;
- return 1;
+ return 1;
}
-__setup("acpi_no_auto_ssdt", acpi_no_auto_ssdt_setup);
+__setup("acpica_no_return_repair", acpi_disable_return_repair);
acpi_status __init acpi_os_initialize(void)
{
@@ -1748,6 +1811,16 @@ acpi_status __init acpi_os_initialize(void)
acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
+ if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
+ /*
+ * Use acpi_os_map_generic_address to pre-map the reset
+ * register if it's in system memory.
+ */
+ int rv;
+
+ rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
+ pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
+ }
return AE_OK;
}
@@ -1756,7 +1829,7 @@ acpi_status __init acpi_os_initialize1(void)
{
kacpid_wq = alloc_workqueue("kacpid", 0, 1);
kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
- kacpi_hotplug_wq = alloc_workqueue("kacpi_hotplug", 0, 1);
+ kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
BUG_ON(!kacpid_wq);
BUG_ON(!kacpi_notify_wq);
BUG_ON(!kacpi_hotplug_wq);
@@ -1776,6 +1849,8 @@ acpi_status acpi_os_terminate(void)
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
+ if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
+ acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
destroy_workqueue(kacpid_wq);
destroy_workqueue(kacpi_notify_wq);
@@ -1825,25 +1900,3 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
{
__acpi_os_prepare_extended_sleep = func;
}
-
-
-void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
- void (*func)(struct work_struct *work))
-{
- struct acpi_hp_work *hp_work;
- int ret;
-
- hp_work = kmalloc(sizeof(*hp_work), GFP_KERNEL);
- if (!hp_work)
- return;
-
- hp_work->handle = handle;
- hp_work->type = type;
- hp_work->context = context;
-
- INIT_WORK(&hp_work->work, func);
- ret = queue_work(kacpi_hotplug_wq, &hp_work->work);
- if (!ret)
- kfree(hp_work);
-}
-EXPORT_SYMBOL_GPL(alloc_acpi_hp_work);