diff options
Diffstat (limited to 'drivers/acpi/ec.c')
| -rw-r--r-- | drivers/acpi/ec.c | 742 |
1 files changed, 405 insertions, 337 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index fd1801bdee6..a66ab658abb 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1,11 +1,14 @@ /* - * ec.c - ACPI Embedded Controller Driver (v2.1) + * ec.c - ACPI Embedded Controller Driver (v2.2) * - * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de> - * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> - * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2001-2014 Intel Corporation + * Author: 2014 Lv Zheng <lv.zheng@intel.com> + * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> + * 2006 Denis Sadykov <denis.m.sadykov@intel.com> + * 2004 Luming Yu <luming.yu@intel.com> + * 2001, 2002 Andy Grover <andrew.grover@intel.com> + * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -28,31 +31,31 @@ /* Uncomment next line to get verbose printout */ /* #define DEBUG */ +#define pr_fmt(fmt) "ACPI : EC: " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/delay.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/spinlock.h> -#include <asm/io.h> -#include <acpi/acpi_bus.h> -#include <acpi/acpi_drivers.h> +#include <linux/slab.h> +#include <linux/acpi.h> #include <linux/dmi.h> +#include <asm/io.h> + +#include "internal.h" #define ACPI_EC_CLASS "embedded_controller" #define ACPI_EC_DEVICE_NAME "Embedded Controller" #define ACPI_EC_FILE_INFO "info" -#define PREFIX "ACPI: EC: " - /* EC status register */ #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ +#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */ #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ @@ -67,22 +70,34 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ -#define ACPI_EC_CDELAY 10 /* Wait 10us before polling EC */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ - -#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts - per one transaction */ +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ EC_FLAGS_GPE_STORM, /* GPE storm detected */ - EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and + EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and * OpReg are installed */ + EC_FLAGS_BLOCKED, /* Transactions are blocked */ }; -/* If we find an EC via the ECDT, we need to keep a ptr to its context */ -/* External interfaces use first EC only, so remember */ -typedef int (*acpi_ec_query_func) (void *data); +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ + +/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ +static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; +module_param(ec_delay, uint, 0644); +MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); + +/* + * If the number of false interrupts per one transaction exceeds + * this threshold, will think there is a GPE storm happened and + * will disable the GPE for normal transaction. + */ +static unsigned int ec_storm_threshold __read_mostly = 8; +module_param(ec_storm_threshold, uint, 0644); +MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); struct acpi_ec_query_handler { struct list_head node; @@ -101,26 +116,16 @@ struct transaction { u8 ri; u8 wlen; u8 rlen; - bool done; + u8 flags; }; -static struct acpi_ec { - acpi_handle handle; - unsigned long gpe; - unsigned long command_addr; - unsigned long data_addr; - unsigned long global_lock; - unsigned long flags; - struct mutex lock; - wait_queue_head_t wait; - struct list_head list; - struct transaction *curr; - spinlock_t curr_lock; -} *boot_ec, *first_ec; +struct acpi_ec *boot_ec, *first_ec; +EXPORT_SYMBOL(first_ec); static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- Transaction Management @@ -129,86 +134,113 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ static inline u8 acpi_ec_read_status(struct acpi_ec *ec) { u8 x = inb(ec->command_addr); - pr_debug(PREFIX "---> status = 0x%2.2x\n", x); + pr_debug("EC_SC(R) = 0x%2.2x " + "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n", + x, + !!(x & ACPI_EC_FLAG_SCI), + !!(x & ACPI_EC_FLAG_BURST), + !!(x & ACPI_EC_FLAG_CMD), + !!(x & ACPI_EC_FLAG_IBF), + !!(x & ACPI_EC_FLAG_OBF)); return x; } static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); - pr_debug(PREFIX "---> data = 0x%2.2x\n", x); + pr_debug("EC_DATA(R) = 0x%2.2x\n", x); return x; } static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) { - pr_debug(PREFIX "<--- command = 0x%2.2x\n", command); + pr_debug("EC_SC(W) = 0x%2.2x\n", command); outb(command, ec->command_addr); } static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) { - pr_debug(PREFIX "<--- data = 0x%2.2x\n", data); + pr_debug("EC_DATA(W) = 0x%2.2x\n", data); outb(data, ec->data_addr); } -static int ec_transaction_done(struct acpi_ec *ec) +static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; - spin_lock_irqsave(&ec->curr_lock, flags); - if (!ec->curr || ec->curr->done) + spin_lock_irqsave(&ec->lock, flags); + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; - spin_unlock_irqrestore(&ec->curr_lock, flags); + spin_unlock_irqrestore(&ec->lock, flags); return ret; } -static void start_transaction(struct acpi_ec *ec) +static bool advance_transaction(struct acpi_ec *ec) { - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; - ec->curr->done = false; - acpi_ec_write_cmd(ec, ec->curr->command); -} + struct transaction *t; + u8 status; + bool wakeup = false; -static void advance_transaction(struct acpi_ec *ec, u8 status) -{ - unsigned long flags; - spin_lock_irqsave(&ec->curr_lock, flags); - if (!ec->curr) - goto unlock; - if (ec->curr->wlen > ec->curr->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) - acpi_ec_write_data(ec, - ec->curr->wdata[ec->curr->wi++]); - else - goto err; - } else if (ec->curr->rlen > ec->curr->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { - ec->curr->rdata[ec->curr->ri++] = acpi_ec_read_data(ec); - if (ec->curr->rlen == ec->curr->ri) - ec->curr->done = true; + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + status = acpi_ec_read_status(ec); + t = ec->curr; + if (!t) + goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { + if (t->wlen > t->wi) { + if ((status & ACPI_EC_FLAG_IBF) == 0) + acpi_ec_write_data(ec, t->wdata[t->wi++]); + else + goto err; + } else if (t->rlen > t->ri) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + } else + goto err; + } else if (t->wlen == t->wi && + (status & ACPI_EC_FLAG_IBF) == 0) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + return wakeup; + } else { + if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; } else goto err; - } else if (ec->curr->wlen == ec->curr->wi && - (status & ACPI_EC_FLAG_IBF) == 0) - ec->curr->done = true; - goto unlock; + return wakeup; + } err: - /* false interrupt, state didn't change */ - if (in_interrupt()) - ++ec->curr->irq_count; -unlock: - spin_unlock_irqrestore(&ec->curr_lock, flags); + /* + * If SCI bit is set, then don't think it's a false IRQ + * otherwise will take a not handled IRQ as a false one. + */ + if (!(status & ACPI_EC_FLAG_SCI)) { + if (in_interrupt() && t) + ++t->irq_count; + } + return wakeup; } -static void acpi_ec_gpe_query(void *ec_cxt); +static void start_transaction(struct acpi_ec *ec) +{ + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; + ec->curr->flags = 0; + (void)advance_transaction(ec); +} -static int ec_check_sci(struct acpi_ec *ec, u8 state) +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); + +static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) { if (state & ACPI_EC_FLAG_SCI) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - return acpi_os_execute(OSL_EC_BURST_HANDLER, - acpi_ec_gpe_query, ec); + return acpi_ec_sync_query(ec, NULL); } return 0; } @@ -216,30 +248,30 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) static int ec_poll(struct acpi_ec *ec) { unsigned long flags; - int repeat = 2; /* number of command restarts */ + int repeat = 5; /* number of command restarts */ while (repeat--) { unsigned long delay = jiffies + - msecs_to_jiffies(ACPI_EC_DELAY); + msecs_to_jiffies(ec_delay); do { /* don't sleep with disabled interrupts */ if (EC_FLAGS_MSI || irqs_disabled()) { udelay(ACPI_EC_MSI_UDELAY); - if (ec_transaction_done(ec)) + if (ec_transaction_completed(ec)) return 0; } else { if (wait_event_timeout(ec->wait, - ec_transaction_done(ec), + ec_transaction_completed(ec), msecs_to_jiffies(1))) return 0; } - advance_transaction(ec, acpi_ec_read_status(ec)); + spin_lock_irqsave(&ec->lock, flags); + (void)advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) - break; - pr_debug(PREFIX "controller reset, restart transaction\n"); - spin_lock_irqsave(&ec->curr_lock, flags); + pr_debug("controller reset, restart transaction\n"); + spin_lock_irqsave(&ec->lock, flags); start_transaction(ec); - spin_unlock_irqrestore(&ec->curr_lock, flags); + spin_unlock_irqrestore(&ec->lock, flags); } return -ETIME; } @@ -249,56 +281,23 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, { unsigned long tmp; int ret = 0; - pr_debug(PREFIX "transaction start\n"); - /* disable GPE during transaction if storm is detected */ - if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { - acpi_disable_gpe(NULL, ec->gpe); - } if (EC_FLAGS_MSI) udelay(ACPI_EC_MSI_UDELAY); /* start transaction */ - spin_lock_irqsave(&ec->curr_lock, tmp); + spin_lock_irqsave(&ec->lock, tmp); /* following two actions should be kept atomic */ ec->curr = t; start_transaction(ec); if (ec->curr->command == ACPI_EC_COMMAND_QUERY) clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); - spin_unlock_irqrestore(&ec->curr_lock, tmp); + spin_unlock_irqrestore(&ec->lock, tmp); ret = ec_poll(ec); - pr_debug(PREFIX "transaction end\n"); - spin_lock_irqsave(&ec->curr_lock, tmp); + spin_lock_irqsave(&ec->lock, tmp); ec->curr = NULL; - spin_unlock_irqrestore(&ec->curr_lock, tmp); - if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { - /* check if we received SCI during transaction */ - ec_check_sci(ec, acpi_ec_read_status(ec)); - /* it is safe to enable GPE outside of transaction */ - acpi_enable_gpe(NULL, ec->gpe); - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { - pr_info(PREFIX "GPE storm detected, " - "transactions will use polling mode\n"); - set_bit(EC_FLAGS_GPE_STORM, &ec->flags); - } + spin_unlock_irqrestore(&ec->lock, tmp); return ret; } -static int ec_check_ibf0(struct acpi_ec *ec) -{ - u8 status = acpi_ec_read_status(ec); - return (status & ACPI_EC_FLAG_IBF) == 0; -} - -static int ec_wait_ibf0(struct acpi_ec *ec) -{ - unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); - /* interrupt wait manually if GPE mode is not active */ - while (time_before(jiffies, delay)) - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), - msecs_to_jiffies(1))) - return 0; - return -ETIME; -} - static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; @@ -307,7 +306,11 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) return -EINVAL; if (t->rdata) memset(t->rdata, 0, t->rlen); - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); + if (test_bit(EC_FLAGS_BLOCKED, &ec->flags)) { + status = -EINVAL; + goto unlock; + } if (ec->global_lock) { status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); if (ACPI_FAILURE(status)) { @@ -315,18 +318,33 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) goto unlock; } } - if (ec_wait_ibf0(ec)) { - pr_err(PREFIX "input buffer is not empty, " - "aborting transaction\n"); - status = -ETIME; - goto end; + pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", + t->command, t->wdata ? t->wdata[0] : 0); + /* disable GPE during transaction if storm is detected */ + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { + /* It has to be disabled, so that it doesn't trigger. */ + acpi_disable_gpe(NULL, ec->gpe); } + status = acpi_ec_transaction_unlocked(ec, t); -end: + + /* check if we received SCI during transaction */ + ec_check_sci_sync(ec, acpi_ec_read_status(ec)); + if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { + msleep(1); + /* It is safe to enable the GPE outside of the transaction. */ + acpi_enable_gpe(NULL, ec->gpe); + } else if (t->irq_count > ec_storm_threshold) { + pr_info("GPE storm detected(%d GPEs), " + "transactions will use polling mode\n", + t->irq_count); + set_bit(EC_FLAGS_GPE_STORM, &ec->flags); + } + pr_debug("transaction end\n"); if (ec->global_lock) acpi_release_global_lock(glk); unlock: - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); return status; } @@ -373,28 +391,7 @@ static int acpi_ec_write(struct acpi_ec *ec, u8 address, u8 data) return acpi_ec_transaction(ec, &t); } -/* - * Externally callable EC access functions. For now, assume 1 EC only - */ -int ec_burst_enable(void) -{ - if (!first_ec) - return -ENODEV; - return acpi_ec_burst_enable(first_ec); -} - -EXPORT_SYMBOL(ec_burst_enable); - -int ec_burst_disable(void) -{ - if (!first_ec) - return -ENODEV; - return acpi_ec_burst_disable(first_ec); -} - -EXPORT_SYMBOL(ec_burst_disable); - -int ec_read(u8 addr, u8 * val) +int ec_read(u8 addr, u8 *val) { int err; u8 temp_data; @@ -429,8 +426,7 @@ EXPORT_SYMBOL(ec_write); int ec_transaction(u8 command, const u8 * wdata, unsigned wdata_len, - u8 * rdata, unsigned rdata_len, - int force_poll) + u8 * rdata, unsigned rdata_len) { struct transaction t = {.command = command, .wdata = wdata, .rdata = rdata, @@ -443,7 +439,78 @@ int ec_transaction(u8 command, EXPORT_SYMBOL(ec_transaction); -static int acpi_ec_query(struct acpi_ec *ec, u8 * data) +/* Get the handle to the EC device */ +acpi_handle ec_get_handle(void) +{ + if (!first_ec) + return NULL; + return first_ec->handle; +} + +EXPORT_SYMBOL(ec_get_handle); + +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_sync_query(ec, &value); + if (status || !value) + break; + } + + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + +void acpi_ec_block_transactions(void) +{ + struct acpi_ec *ec = first_ec; + + if (!ec) + return; + + mutex_lock(&ec->mutex); + /* Prevent transactions from being carried out */ + set_bit(EC_FLAGS_BLOCKED, &ec->flags); + mutex_unlock(&ec->mutex); +} + +void acpi_ec_unblock_transactions(void) +{ + struct acpi_ec *ec = first_ec; + + if (!ec) + return; + + mutex_lock(&ec->mutex); + /* Allow transactions to be carried out again */ + clear_bit(EC_FLAGS_BLOCKED, &ec->flags); + + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); + + mutex_unlock(&ec->mutex); +} + +void acpi_ec_unblock_transactions_early(void) +{ + /* + * Allow transactions to happen again (this function is called from + * atomic context during wakeup, so we don't need to acquire the mutex). + */ + if (first_ec) + clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); +} + +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) { int result; u8 d; @@ -452,20 +519,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data) .wlen = 0, .rlen = 1}; if (!ec || !data) return -EINVAL; - /* * Query the EC to find out which _Qxx method we need to evaluate. * Note that successful completion of the query causes the ACPI_EC_SCI * bit to be cleared (and thus clearing the interrupt source). */ - - result = acpi_ec_transaction(ec, &t); + result = acpi_ec_transaction_unlocked(ec, &t); if (result) return result; - if (!d) return -ENODATA; - *data = d; return 0; } @@ -486,9 +549,9 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit, handler->handle = handle; handler->func = func; handler->data = data; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); list_add(&handler->node, &ec->list); - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); return 0; } @@ -497,56 +560,95 @@ EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) { struct acpi_ec_query_handler *handler, *tmp; - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { if (query_bit == handler->query_bit) { list_del(&handler->node); kfree(handler); } } - mutex_unlock(&ec->lock); + mutex_unlock(&ec->mutex); } EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); -static void acpi_ec_gpe_query(void *ec_cxt) +static void acpi_ec_run(void *cxt) +{ + struct acpi_ec_query_handler *handler = cxt; + if (!handler) + return; + pr_debug("start query execution\n"); + if (handler->func) + handler->func(handler->data); + else if (handler->handle) + acpi_evaluate_object(handler->handle, NULL, NULL, NULL); + pr_debug("stop query execution\n"); + kfree(handler); +} + +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) { - struct acpi_ec *ec = ec_cxt; u8 value = 0; - struct acpi_ec_query_handler *handler, copy; + int status; + struct acpi_ec_query_handler *handler, *copy; + + status = acpi_ec_query_unlocked(ec, &value); + if (data) + *data = value; + if (status) + return status; - if (!ec || acpi_ec_query(ec, &value)) - return; - mutex_lock(&ec->lock); list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { /* have custom handler for this bit */ - memcpy(©, handler, sizeof(copy)); - mutex_unlock(&ec->lock); - if (copy.func) { - copy.func(copy.data); - } else if (copy.handle) { - acpi_evaluate_object(copy.handle, NULL, NULL, NULL); - } - return; + copy = kmalloc(sizeof(*handler), GFP_KERNEL); + if (!copy) + return -ENOMEM; + memcpy(copy, handler, sizeof(*copy)); + pr_debug("push query execution (0x%2x) on queue\n", + value); + return acpi_os_execute((copy->func) ? + OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, + acpi_ec_run, copy); } } - mutex_unlock(&ec->lock); + return 0; } -static u32 acpi_ec_gpe_handler(void *data) +static void acpi_ec_gpe_query(void *ec_cxt) { - struct acpi_ec *ec = data; - u8 status; + struct acpi_ec *ec = ec_cxt; + if (!ec) + return; + mutex_lock(&ec->mutex); + acpi_ec_sync_query(ec, NULL); + mutex_unlock(&ec->mutex); +} - pr_debug(PREFIX "~~~> interrupt\n"); - status = acpi_ec_read_status(ec); +static int ec_check_sci(struct acpi_ec *ec, u8 state) +{ + if (state & ACPI_EC_FLAG_SCI) { + if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { + pr_debug("push gpe query to the queue\n"); + return acpi_os_execute(OSL_NOTIFY_HANDLER, + acpi_ec_gpe_query, ec); + } + } + return 0; +} - advance_transaction(ec, status); - if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) +static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, + u32 gpe_number, void *data) +{ + unsigned long flags; + struct acpi_ec *ec = data; + + spin_lock_irqsave(&ec->lock, flags); + if (advance_transaction(ec)) wake_up(&ec->wait); - ec_check_sci(ec, status); - return ACPI_INTERRUPT_HANDLED; + spin_unlock_irqrestore(&ec->lock, flags); + ec_check_sci(ec, acpi_ec_read_status(ec)); + return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } /* -------------------------------------------------------------------------- @@ -555,12 +657,12 @@ static u32 acpi_ec_gpe_handler(void *data) static acpi_status acpi_ec_space_handler(u32 function, acpi_physical_address address, - u32 bits, acpi_integer *value, + u32 bits, u64 *value64, void *handler_context, void *region_context) { struct acpi_ec *ec = handler_context; - int result = 0, i; - u8 temp = 0; + int result = 0, i, bytes = bits / 8; + u8 *value = (u8 *)value64; if ((address > 0xFF) || !value || !handler_context) return AE_BAD_PARAMETER; @@ -568,32 +670,15 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, if (function != ACPI_READ && function != ACPI_WRITE) return AE_BAD_PARAMETER; - if (bits != 8 && acpi_strict) - return AE_BAD_PARAMETER; - - if (EC_FLAGS_MSI) + if (EC_FLAGS_MSI || bits > 8) acpi_ec_burst_enable(ec); - if (function == ACPI_READ) { - result = acpi_ec_read(ec, address, &temp); - *value = temp; - } else { - temp = 0xff & (*value); - result = acpi_ec_write(ec, address, temp); - } - - for (i = 8; unlikely(bits - i > 0); i += 8) { - ++address; - if (function == ACPI_READ) { - result = acpi_ec_read(ec, address, &temp); - (*value) |= ((acpi_integer)temp) << i; - } else { - temp = 0xff & ((*value) >> i); - result = acpi_ec_write(ec, address, temp); - } - } + for (i = 0; i < bytes; ++i, ++address, ++value) + result = (function == ACPI_READ) ? + acpi_ec_read(ec, address, value) : + acpi_ec_write(ec, address, *value); - if (EC_FLAGS_MSI) + if (EC_FLAGS_MSI || bits > 8) acpi_ec_burst_disable(ec); switch (result) { @@ -612,72 +697,6 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address, } /* -------------------------------------------------------------------------- - FS Interface (/proc) - -------------------------------------------------------------------------- */ - -static struct proc_dir_entry *acpi_ec_dir; - -static int acpi_ec_read_info(struct seq_file *seq, void *offset) -{ - struct acpi_ec *ec = seq->private; - - if (!ec) - goto end; - - seq_printf(seq, "gpe:\t\t\t0x%02x\n", (u32) ec->gpe); - seq_printf(seq, "ports:\t\t\t0x%02x, 0x%02x\n", - (unsigned)ec->command_addr, (unsigned)ec->data_addr); - seq_printf(seq, "use global lock:\t%s\n", - ec->global_lock ? "yes" : "no"); - end: - return 0; -} - -static int acpi_ec_info_open_fs(struct inode *inode, struct file *file) -{ - return single_open(file, acpi_ec_read_info, PDE(inode)->data); -} - -static const struct file_operations acpi_ec_info_ops = { - .open = acpi_ec_info_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; - -static int acpi_ec_add_fs(struct acpi_device *device) -{ - struct proc_dir_entry *entry = NULL; - - if (!acpi_device_dir(device)) { - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), - acpi_ec_dir); - if (!acpi_device_dir(device)) - return -ENODEV; - } - - entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO, - acpi_device_dir(device), - &acpi_ec_info_ops, acpi_driver_data(device)); - if (!entry) - return -ENODEV; - return 0; -} - -static int acpi_ec_remove_fs(struct acpi_device *device) -{ - - if (acpi_device_dir(device)) { - remove_proc_entry(ACPI_EC_FILE_INFO, acpi_device_dir(device)); - remove_proc_entry(acpi_device_bid(device), acpi_ec_dir); - acpi_device_dir(device) = NULL; - } - - return 0; -} - -/* -------------------------------------------------------------------------- Driver Interface -------------------------------------------------------------------------- */ static acpi_status @@ -689,10 +708,10 @@ static struct acpi_ec *make_acpi_ec(void) if (!ec) return NULL; ec->flags = 1 << EC_FLAGS_QUERY_PENDING; - mutex_init(&ec->lock); + mutex_init(&ec->mutex); init_waitqueue_head(&ec->wait); INIT_LIST_HEAD(&ec->list); - spin_lock_init(&ec->curr_lock); + spin_lock_init(&ec->lock); return ec; } @@ -754,7 +773,7 @@ static int ec_install_handlers(struct acpi_ec *ec) &acpi_ec_gpe_handler, ec); if (ACPI_FAILURE(status)) return -ENODEV; - acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME); + acpi_enable_gpe(NULL, ec->gpe); status = acpi_install_address_space_handler(ec->handle, ACPI_ADR_SPACE_EC, @@ -767,9 +786,10 @@ static int ec_install_handlers(struct acpi_ec *ec) * The AE_NOT_FOUND error will be ignored and OS * continue to initialize EC. */ - printk(KERN_ERR "Fail in evaluating the _REG object" + pr_err("Fail in evaluating the _REG object" " of EC device. Broken bios is suspected.\n"); } else { + acpi_disable_gpe(NULL, ec->gpe); acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler); return -ENODEV; @@ -782,12 +802,13 @@ static int ec_install_handlers(struct acpi_ec *ec) static void ec_remove_handlers(struct acpi_ec *ec) { + acpi_disable_gpe(NULL, ec->gpe); if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle, ACPI_ADR_SPACE_EC, &acpi_ec_space_handler))) - pr_err(PREFIX "failed to remove space handler\n"); + pr_err("failed to remove space handler\n"); if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler))) - pr_err(PREFIX "failed to remove gpe handler\n"); + pr_err("failed to remove gpe handler\n"); clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags); } @@ -816,8 +837,6 @@ static int acpi_ec_add(struct acpi_device *device) return -EINVAL; } - ec->handle = device->handle; - /* Find and register all query methods */ acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1, acpi_ec_register_query_methods, NULL, ec, NULL); @@ -825,18 +844,30 @@ static int acpi_ec_add(struct acpi_device *device) if (!first_ec) first_ec = ec; device->driver_data = ec; - acpi_ec_add_fs(device); - pr_info(PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", + + ret = !!request_region(ec->data_addr, 1, "EC data"); + WARN(!ret, "Could not request EC data io port 0x%lx", ec->data_addr); + ret = !!request_region(ec->command_addr, 1, "EC cmd"); + WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr); + + pr_info("GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n", ec->gpe, ec->command_addr, ec->data_addr); ret = ec_install_handlers(ec); /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + + /* Clear stale _Q events if hardware might require that */ + if (EC_FLAGS_CLEAR_ON_RESUME) { + mutex_lock(&ec->mutex); + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); + } return ret; } -static int acpi_ec_remove(struct acpi_device *device, int type) +static int acpi_ec_remove(struct acpi_device *device) { struct acpi_ec *ec; struct acpi_ec_query_handler *handler, *tmp; @@ -846,13 +877,14 @@ static int acpi_ec_remove(struct acpi_device *device, int type) ec = acpi_driver_data(device); ec_remove_handlers(ec); - mutex_lock(&ec->lock); + mutex_lock(&ec->mutex); list_for_each_entry_safe(handler, tmp, &ec->list, node) { list_del(&handler->node); kfree(handler); } - mutex_unlock(&ec->lock); - acpi_ec_remove_fs(device); + mutex_unlock(&ec->mutex); + release_region(ec->data_addr, 1); + release_region(ec->command_addr, 1); device->driver_data = NULL; if (ec == first_ec) first_ec = NULL; @@ -916,13 +948,48 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) /* MSI EC needs special treatment, enable it */ static int ec_flag_msi(const struct dmi_system_id *id) { - printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n"); + pr_debug("Detected MSI hardware, enabling workarounds.\n"); EC_FLAGS_MSI = 1; EC_FLAGS_VALIDATE_ECDT = 1; return 0; } -static struct dmi_system_id __initdata ec_dmi_table[] = { +/* + * Clevo M720 notebook actually works ok with IRQ mode, if we lifted + * the GPE storm threshold back to 20 + */ +static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) +{ + pr_debug("Setting the EC GPE storm threshold to 20\n"); + ec_storm_threshold = 20; + return 0; +} + +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + return 0; +} + +static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_skip_dsdt_scan, "Compal JFL92", { DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), @@ -937,12 +1004,40 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { ec_flag_msi, "MSI hardware", { DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, { + ec_flag_msi, "MSI hardware", { + DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-STAR")}, NULL}, + { + ec_flag_msi, "Quanta hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW8/SW8/DW8"),}, NULL}, + { + ec_flag_msi, "Quanta hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, + { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, + { + ec_validate_ecdt, "ASUS hardware", { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, + { + ec_enlarge_storm_threshold, "CLEVO hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), + DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, + { + ec_skip_dsdt_scan, "HP Folio 13", { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, + { + ec_validate_ecdt, "ASUS hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; - int __init acpi_ec_ecdt_probe(void) { acpi_status status; @@ -959,7 +1054,7 @@ int __init acpi_ec_ecdt_probe(void) status = acpi_get_table(ACPI_SIG_ECDT, 1, (struct acpi_table_header **)&ecdt_ptr); if (ACPI_SUCCESS(status)) { - pr_info(PREFIX "EC description table is found, configuring boot EC\n"); + pr_info("EC description table is found, configuring boot EC\n"); boot_ec->command_addr = ecdt_ptr->control.address; boot_ec->data_addr = ecdt_ptr->data.address; boot_ec->gpe = ecdt_ptr->gpe; @@ -968,19 +1063,20 @@ int __init acpi_ec_ecdt_probe(void) /* Don't trust ECDT, which comes from ASUSTek */ if (!EC_FLAGS_VALIDATE_ECDT) goto install; - saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); + saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL); if (!saved_ec) return -ENOMEM; - memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); /* fall through */ } - if (EC_FLAGS_SKIP_DSDT_SCAN) + if (EC_FLAGS_SKIP_DSDT_SCAN) { + kfree(saved_ec); return -ENODEV; + } /* This workaround is needed only on some broken machines, * which require early EC, but fail to provide ECDT */ - printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); + pr_debug("Look up EC in DSDT\n"); status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device, boot_ec, NULL); /* Check that acpi_get_devices actually find something */ @@ -992,7 +1088,7 @@ int __init acpi_ec_ecdt_probe(void) saved_ec->data_addr != boot_ec->data_addr || saved_ec->gpe != boot_ec->gpe || saved_ec->handle != boot_ec->handle) - pr_info(PREFIX "ASUSTek keeps feeding us with broken " + pr_info("ASUSTek keeps feeding us with broken " "ECDT tables, which are very hard to workaround. " "Trying to use DSDT EC info instead. Please send " "output of acpidump to linux-acpi@vger.kernel.org\n"); @@ -1003,10 +1099,8 @@ int __init acpi_ec_ecdt_probe(void) * which needs it, has fake EC._INI method, so use it as flag. * Keep boot_ec struct as it will be needed soon. */ - acpi_handle dummy; if (!dmi_name_in_vendors("ASUS") || - ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", - &dummy))) + !acpi_has_method(boot_ec->handle, "_INI")) return -ENODEV; } install: @@ -1016,26 +1110,11 @@ install: } error: kfree(boot_ec); + kfree(saved_ec); boot_ec = NULL; return -ENODEV; } -static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state) -{ - struct acpi_ec *ec = acpi_driver_data(device); - /* Stop using GPE */ - acpi_disable_gpe(NULL, ec->gpe); - return 0; -} - -static int acpi_ec_resume(struct acpi_device *device) -{ - struct acpi_ec *ec = acpi_driver_data(device); - /* Enable use of GPE back */ - acpi_enable_gpe(NULL, ec->gpe); - return 0; -} - static struct acpi_driver acpi_ec_driver = { .name = "ec", .class = ACPI_EC_CLASS, @@ -1043,8 +1122,6 @@ static struct acpi_driver acpi_ec_driver = { .ops = { .add = acpi_ec_add, .remove = acpi_ec_remove, - .suspend = acpi_ec_suspend, - .resume = acpi_ec_resume, }, }; @@ -1052,16 +1129,10 @@ int __init acpi_ec_init(void) { int result = 0; - acpi_ec_dir = proc_mkdir(ACPI_EC_CLASS, acpi_root_dir); - if (!acpi_ec_dir) - return -ENODEV; - /* Now register the driver for the EC */ result = acpi_bus_register_driver(&acpi_ec_driver); - if (result < 0) { - remove_proc_entry(ACPI_EC_CLASS, acpi_root_dir); + if (result < 0) return -ENODEV; - } return result; } @@ -1072,9 +1143,6 @@ static void __exit acpi_ec_exit(void) { acpi_bus_unregister_driver(&acpi_ec_driver); - - remove_proc_entry(ACPI_EC_CLASS, acpi_root_dir); - return; } #endif /* 0 */ |
