diff options
Diffstat (limited to 'drivers/acpi/ec.c')
| -rw-r--r-- | drivers/acpi/ec.c | 241 |
1 files changed, 157 insertions, 84 deletions
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 959d41acc10..a66ab658abb 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -1,11 +1,14 @@ /* - * ec.c - ACPI Embedded Controller Driver (v2.1) + * ec.c - ACPI Embedded Controller Driver (v2.2) * - * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de> - * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> - * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2001-2014 Intel Corporation + * Author: 2014 Lv Zheng <lv.zheng@intel.com> + * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> + * 2006 Denis Sadykov <denis.m.sadykov@intel.com> + * 2004 Luming Yu <luming.yu@intel.com> + * 2001, 2002 Andy Grover <andrew.grover@intel.com> + * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> + * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * @@ -52,6 +55,7 @@ /* EC status register */ #define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ #define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ +#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */ #define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ #define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ @@ -67,6 +71,8 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ @@ -76,6 +82,9 @@ enum { EC_FLAGS_BLOCKED, /* Transactions are blocked */ }; +#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */ +#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */ + /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); @@ -107,7 +116,7 @@ struct transaction { u8 ri; u8 wlen; u8 rlen; - bool done; + u8 flags; }; struct acpi_ec *boot_ec, *first_ec; @@ -116,6 +125,7 @@ EXPORT_SYMBOL(first_ec); static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- Transaction Management @@ -124,92 +134,113 @@ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ static inline u8 acpi_ec_read_status(struct acpi_ec *ec) { u8 x = inb(ec->command_addr); - pr_debug("---> status = 0x%2.2x\n", x); + pr_debug("EC_SC(R) = 0x%2.2x " + "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n", + x, + !!(x & ACPI_EC_FLAG_SCI), + !!(x & ACPI_EC_FLAG_BURST), + !!(x & ACPI_EC_FLAG_CMD), + !!(x & ACPI_EC_FLAG_IBF), + !!(x & ACPI_EC_FLAG_OBF)); return x; } static inline u8 acpi_ec_read_data(struct acpi_ec *ec) { u8 x = inb(ec->data_addr); - pr_debug("---> data = 0x%2.2x\n", x); + pr_debug("EC_DATA(R) = 0x%2.2x\n", x); return x; } static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) { - pr_debug("<--- command = 0x%2.2x\n", command); + pr_debug("EC_SC(W) = 0x%2.2x\n", command); outb(command, ec->command_addr); } static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) { - pr_debug("<--- data = 0x%2.2x\n", data); + pr_debug("EC_DATA(W) = 0x%2.2x\n", data); outb(data, ec->data_addr); } -static int ec_transaction_done(struct acpi_ec *ec) +static int ec_transaction_completed(struct acpi_ec *ec) { unsigned long flags; int ret = 0; spin_lock_irqsave(&ec->lock, flags); - if (!ec->curr || ec->curr->done) + if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ret = 1; spin_unlock_irqrestore(&ec->lock, flags); return ret; } -static void start_transaction(struct acpi_ec *ec) -{ - ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; - ec->curr->done = false; - acpi_ec_write_cmd(ec, ec->curr->command); -} - -static void advance_transaction(struct acpi_ec *ec, u8 status) +static bool advance_transaction(struct acpi_ec *ec) { - unsigned long flags; struct transaction *t; + u8 status; + bool wakeup = false; - spin_lock_irqsave(&ec->lock, flags); + pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); + status = acpi_ec_read_status(ec); t = ec->curr; if (!t) - goto unlock; - if (t->wlen > t->wi) { - if ((status & ACPI_EC_FLAG_IBF) == 0) - acpi_ec_write_data(ec, - t->wdata[t->wi++]); - else - goto err; - } else if (t->rlen > t->ri) { - if ((status & ACPI_EC_FLAG_OBF) == 1) { - t->rdata[t->ri++] = acpi_ec_read_data(ec); - if (t->rlen == t->ri) - t->done = true; + goto err; + if (t->flags & ACPI_EC_COMMAND_POLL) { + if (t->wlen > t->wi) { + if ((status & ACPI_EC_FLAG_IBF) == 0) + acpi_ec_write_data(ec, t->wdata[t->wi++]); + else + goto err; + } else if (t->rlen > t->ri) { + if ((status & ACPI_EC_FLAG_OBF) == 1) { + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + } else + goto err; + } else if (t->wlen == t->wi && + (status & ACPI_EC_FLAG_IBF) == 0) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; + wakeup = true; + } + return wakeup; + } else { + if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; } else goto err; - } else if (t->wlen == t->wi && - (status & ACPI_EC_FLAG_IBF) == 0) - t->done = true; - goto unlock; + return wakeup; + } err: /* * If SCI bit is set, then don't think it's a false IRQ * otherwise will take a not handled IRQ as a false one. */ - if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) - ++t->irq_count; + if (!(status & ACPI_EC_FLAG_SCI)) { + if (in_interrupt() && t) + ++t->irq_count; + } + return wakeup; +} -unlock: - spin_unlock_irqrestore(&ec->lock, flags); +static void start_transaction(struct acpi_ec *ec) +{ + ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; + ec->curr->flags = 0; + (void)advance_transaction(ec); } -static int acpi_ec_sync_query(struct acpi_ec *ec); +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) { if (state & ACPI_EC_FLAG_SCI) { if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) - return acpi_ec_sync_query(ec); + return acpi_ec_sync_query(ec, NULL); } return 0; } @@ -225,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec) /* don't sleep with disabled interrupts */ if (EC_FLAGS_MSI || irqs_disabled()) { udelay(ACPI_EC_MSI_UDELAY); - if (ec_transaction_done(ec)) + if (ec_transaction_completed(ec)) return 0; } else { if (wait_event_timeout(ec->wait, - ec_transaction_done(ec), + ec_transaction_completed(ec), msecs_to_jiffies(1))) return 0; } - advance_transaction(ec, acpi_ec_read_status(ec)); + spin_lock_irqsave(&ec->lock, flags); + (void)advance_transaction(ec); + spin_unlock_irqrestore(&ec->lock, flags); } while (time_before(jiffies, delay)); pr_debug("controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); @@ -265,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, return ret; } -static int ec_check_ibf0(struct acpi_ec *ec) -{ - u8 status = acpi_ec_read_status(ec); - return (status & ACPI_EC_FLAG_IBF) == 0; -} - -static int ec_wait_ibf0(struct acpi_ec *ec) -{ - unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); - /* interrupt wait manually if GPE mode is not active */ - while (time_before(jiffies, delay)) - if (wait_event_timeout(ec->wait, ec_check_ibf0(ec), - msecs_to_jiffies(1))) - return 0; - return -ETIME; -} - static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) { int status; @@ -302,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) goto unlock; } } - if (ec_wait_ibf0(ec)) { - pr_err("input buffer is not empty, " - "aborting transaction\n"); - status = -ETIME; - goto end; - } pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", t->command, t->wdata ? t->wdata[0] : 0); /* disable GPE during transaction if storm is detected */ @@ -331,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) set_bit(EC_FLAGS_GPE_STORM, &ec->flags); } pr_debug("transaction end\n"); -end: if (ec->global_lock) acpi_release_global_lock(glk); unlock: @@ -440,6 +449,27 @@ acpi_handle ec_get_handle(void) EXPORT_SYMBOL(ec_get_handle); +/* + * Process _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_sync_query(ec, &value); + if (status || !value) + break; + } + + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + void acpi_ec_block_transactions(void) { struct acpi_ec *ec = first_ec; @@ -463,6 +493,10 @@ void acpi_ec_unblock_transactions(void) mutex_lock(&ec->mutex); /* Allow transactions to be carried out again */ clear_bit(EC_FLAGS_BLOCKED, &ec->flags); + + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); } @@ -552,13 +586,18 @@ static void acpi_ec_run(void *cxt) kfree(handler); } -static int acpi_ec_sync_query(struct acpi_ec *ec) +static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data) { u8 value = 0; int status; struct acpi_ec_query_handler *handler, *copy; - if ((status = acpi_ec_query_unlocked(ec, &value))) + + status = acpi_ec_query_unlocked(ec, &value); + if (data) + *data = value; + if (status) return status; + list_for_each_entry(handler, &ec->list, node) { if (value == handler->query_bit) { /* have custom handler for this bit */ @@ -582,7 +621,7 @@ static void acpi_ec_gpe_query(void *ec_cxt) if (!ec) return; mutex_lock(&ec->mutex); - acpi_ec_sync_query(ec); + acpi_ec_sync_query(ec, NULL); mutex_unlock(&ec->mutex); } @@ -601,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state) static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, u32 gpe_number, void *data) { + unsigned long flags; struct acpi_ec *ec = data; - u8 status = acpi_ec_read_status(ec); - pr_debug("~~~> interrupt, status:0x%02x\n", status); - - advance_transaction(ec, status); - if (ec_transaction_done(ec) && - (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { + spin_lock_irqsave(&ec->lock, flags); + if (advance_transaction(ec)) wake_up(&ec->wait); - ec_check_sci(ec, acpi_ec_read_status(ec)); - } + spin_unlock_irqrestore(&ec->lock, flags); + ec_check_sci(ec, acpi_ec_read_status(ec)); return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; } @@ -821,6 +857,13 @@ static int acpi_ec_add(struct acpi_device *device) /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + + /* Clear stale _Q events if hardware might require that */ + if (EC_FLAGS_CLEAR_ON_RESUME) { + mutex_lock(&ec->mutex); + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); + } return ret; } @@ -922,6 +965,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) return 0; } +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + return 0; +} + static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -965,6 +1032,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; @@ -999,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void) /* fall through */ } - if (EC_FLAGS_SKIP_DSDT_SCAN) + if (EC_FLAGS_SKIP_DSDT_SCAN) { + kfree(saved_ec); return -ENODEV; + } /* This workaround is needed only on some broken machines, * which require early EC, but fail to provide ECDT */ @@ -1038,6 +1110,7 @@ install: } error: kfree(boot_ec); + kfree(saved_ec); boot_ec = NULL; return -ENODEV; } |
