/*
- * ec.c - ACPI Embedded Controller Driver (v2.0)
+ * ec.c - ACPI Embedded Controller Driver (v2.1)
*
- * Copyright (C) 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
* Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
* Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-/* Uncomment next line to get verbose print outs*/
+/* Uncomment next line to get verbose printout */
/* #define DEBUG */
#include <linux/kernel.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/spinlock.h>
#include <asm/io.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
ACPI_EC_COMMAND_QUERY = 0x84,
};
-/* EC events */
-enum ec_event {
- ACPI_EC_EVENT_OBF_1 = 1, /* Output buffer full */
- ACPI_EC_EVENT_IBF_0, /* Input buffer empty */
-};
-
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_UDELAY 100 /* Wait 100us before polling EC again */
+#define ACPI_EC_STORM_THRESHOLD 20 /* number of false interrupts
+ per one transaction */
+
enum {
- EC_FLAGS_WAIT_GPE = 0, /* Don't check status until GPE arrives */
EC_FLAGS_QUERY_PENDING, /* Query is pending */
- EC_FLAGS_GPE_MODE, /* Expect GPE to be sent for status change */
+ EC_FLAGS_GPE_MODE, /* Expect GPE to be sent
+ * for status change */
EC_FLAGS_NO_GPE, /* Don't use GPE mode */
- EC_FLAGS_RESCHEDULE_POLL /* Re-schedule poll */
+ EC_FLAGS_GPE_STORM, /* GPE storm detected */
+ EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and
+ * OpReg are installed */
};
/* If we find an EC via the ECDT, we need to keep a ptr to its context */
u8 query_bit;
};
+struct transaction_data {
+ const u8 *wdata;
+ u8 *rdata;
+ unsigned short irq_count;
+ u8 wlen;
+ u8 rlen;
+};
+
static struct acpi_ec {
acpi_handle handle;
unsigned long gpe;
struct mutex lock;
wait_queue_head_t wait;
struct list_head list;
- struct delayed_work work;
- atomic_t irq_count;
- u8 handlers_installed;
+ struct transaction_data *t;
+ spinlock_t t_lock;
} *boot_ec, *first_ec;
/*
{
u8 x = inb(ec->data_addr);
pr_debug(PREFIX "---> data = 0x%2.2x\n", x);
- return inb(ec->data_addr);
+ return x;
}
static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
outb(data, ec->data_addr);
}
-static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
+static int ec_transaction_done(struct acpi_ec *ec)
{
- if (test_bit(EC_FLAGS_WAIT_GPE, &ec->flags))
- return 0;
- if (event == ACPI_EC_EVENT_OBF_1) {
- if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_OBF)
- return 1;
- } else if (event == ACPI_EC_EVENT_IBF_0) {
- if (!(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
- return 1;
- }
-
- return 0;
+ unsigned long flags;
+ int ret = 0;
+ spin_lock_irqsave(&ec->t_lock, flags);
+ if (!ec->t || (!ec->t->wlen && !ec->t->rlen))
+ ret = 1;
+ spin_unlock_irqrestore(&ec->t_lock, flags);
+ return ret;
}
-static void ec_schedule_ec_poll(struct acpi_ec *ec)
+static void gpe_transaction(struct acpi_ec *ec, u8 status)
{
- if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags))
- schedule_delayed_work(&ec->work,
- msecs_to_jiffies(ACPI_EC_DELAY));
+ unsigned long flags;
+ spin_lock_irqsave(&ec->t_lock, flags);
+ if (!ec->t)
+ goto unlock;
+ if (ec->t->wlen > 0) {
+ if ((status & ACPI_EC_FLAG_IBF) == 0) {
+ acpi_ec_write_data(ec, *(ec->t->wdata++));
+ --ec->t->wlen;
+ } else
+ /* false interrupt, state didn't change */
+ ++ec->t->irq_count;
+
+ } else if (ec->t->rlen > 0) {
+ if ((status & ACPI_EC_FLAG_OBF) == 1) {
+ *(ec->t->rdata++) = acpi_ec_read_data(ec);
+ --ec->t->rlen;
+ } else
+ /* false interrupt, state didn't change */
+ ++ec->t->irq_count;
+ }
+unlock:
+ spin_unlock_irqrestore(&ec->t_lock, flags);
}
-static void ec_switch_to_poll_mode(struct acpi_ec *ec)
+static int acpi_ec_wait(struct acpi_ec *ec)
{
+ if (wait_event_timeout(ec->wait, ec_transaction_done(ec),
+ msecs_to_jiffies(ACPI_EC_DELAY)))
+ return 0;
+ /* missing GPEs, switch back to poll mode */
+ if (printk_ratelimit())
+ pr_info(PREFIX "missing confirmations, "
+ "switch off interrupt mode.\n");
set_bit(EC_FLAGS_NO_GPE, &ec->flags);
clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
- set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
+ return 1;
}
-static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
+static void acpi_ec_gpe_query(void *ec_cxt);
+
+static int ec_check_sci(struct acpi_ec *ec, u8 state)
{
- atomic_set(&ec->irq_count, 0);
- if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
- likely(!force_poll)) {
- if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
- msecs_to_jiffies(ACPI_EC_DELAY)))
- return 0;
- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- if (acpi_ec_check_status(ec, event)) {
- /* missing GPEs, switch back to poll mode */
- if (printk_ratelimit())
- pr_info(PREFIX "missing confirmations, "
- "switch off interrupt mode.\n");
- ec_switch_to_poll_mode(ec);
- ec_schedule_ec_poll(ec);
- return 0;
- }
- } else {
- unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- while (time_before(jiffies, delay)) {
- if (acpi_ec_check_status(ec, event))
- return 0;
- msleep(1);
- }
- if (acpi_ec_check_status(ec,event))
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
+ return acpi_os_execute(OSL_EC_BURST_HANDLER,
+ acpi_ec_gpe_query, ec);
+ }
+ return 0;
+}
+
+static int ec_poll(struct acpi_ec *ec)
+{
+ unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
+ msleep(1);
+ while (time_before(jiffies, delay)) {
+ gpe_transaction(ec, acpi_ec_read_status(ec));
+ msleep(1);
+ if (ec_transaction_done(ec))
return 0;
}
- pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
- acpi_ec_read_status(ec),
- (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\"");
return -ETIME;
}
u8 * rdata, unsigned rdata_len,
int force_poll)
{
- int result = 0;
- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
+ unsigned long tmp;
+ struct transaction_data t = {.wdata = wdata, .rdata = rdata,
+ .wlen = wdata_len, .rlen = rdata_len,
+ .irq_count = 0};
+ int ret = 0;
pr_debug(PREFIX "transaction start\n");
- acpi_ec_write_cmd(ec, command);
- for (; wdata_len > 0; --wdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
- if (result) {
- pr_err(PREFIX
- "write_cmd timeout, command = %d\n", command);
- goto end;
- }
- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- acpi_ec_write_data(ec, *(wdata++));
+ /* disable GPE during transaction if storm is detected */
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+ acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
}
-
- if (!rdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
- if (result) {
- pr_err(PREFIX
- "finish-write timeout, command = %d\n", command);
- goto end;
- }
- } else if (command == ACPI_EC_COMMAND_QUERY)
+ /* start transaction */
+ spin_lock_irqsave(&ec->t_lock, tmp);
+ /* following two actions should be kept atomic */
+ ec->t = &t;
+ acpi_ec_write_cmd(ec, command);
+ if (command == ACPI_EC_COMMAND_QUERY)
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
-
- for (; rdata_len > 0; --rdata_len) {
- result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, force_poll);
- if (result) {
- pr_err(PREFIX "read timeout, command = %d\n", command);
- goto end;
- }
- /* Don't expect GPE after last read */
- if (rdata_len > 1)
- set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- *(rdata++) = acpi_ec_read_data(ec);
- }
- end:
+ spin_unlock_irqrestore(&ec->t_lock, tmp);
+ /* if we selected poll mode or failed in GPE-mode do a poll loop */
+ if (force_poll ||
+ !test_bit(EC_FLAGS_GPE_MODE, &ec->flags) ||
+ acpi_ec_wait(ec))
+ ret = ec_poll(ec);
pr_debug(PREFIX "transaction end\n");
- return result;
+ spin_lock_irqsave(&ec->t_lock, tmp);
+ ec->t = NULL;
+ spin_unlock_irqrestore(&ec->t_lock, tmp);
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ /* check if we received SCI during transaction */
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+ } else if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+ t.irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_debug(PREFIX "GPE storm detected\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
+ return ret;
+}
+
+static int ec_check_ibf0(struct acpi_ec *ec)
+{
+ u8 status = acpi_ec_read_status(ec);
+ return (status & ACPI_EC_FLAG_IBF) == 0;
}
static int acpi_ec_transaction(struct acpi_ec *ec, u8 command,
{
int status;
u32 glk;
-
if (!ec || (wdata_len && !wdata) || (rdata_len && !rdata))
return -EINVAL;
-
if (rdata)
memset(rdata, 0, rdata_len);
-
mutex_lock(&ec->lock);
if (ec->global_lock) {
status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk);
if (ACPI_FAILURE(status)) {
- mutex_unlock(&ec->lock);
- return -ENODEV;
+ status = -ENODEV;
+ goto unlock;
}
}
-
- status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0);
- if (status) {
+ if (!wait_event_timeout(ec->wait, ec_check_ibf0(ec),
+ msecs_to_jiffies(ACPI_EC_DELAY))) {
pr_err(PREFIX "input buffer is not empty, "
"aborting transaction\n");
+ status = -ETIME;
goto end;
}
-
status = acpi_ec_transaction_unlocked(ec, command,
wdata, wdata_len,
rdata, rdata_len,
force_poll);
-
- end:
-
+end:
if (ec->global_lock)
acpi_release_global_lock(glk);
+unlock:
mutex_unlock(&ec->lock);
-
return status;
}
int acpi_ec_burst_disable(struct acpi_ec *ec)
{
- return acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE, NULL, 0, NULL, 0, 0);
+ return (acpi_ec_read_status(ec) & ACPI_EC_FLAG_BURST) ?
+ acpi_ec_transaction(ec, ACPI_EC_BURST_DISABLE,
+ NULL, 0, NULL, 0, 0) : 0;
}
static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data)
static u32 acpi_ec_gpe_handler(void *data)
{
- acpi_status status = AE_OK;
struct acpi_ec *ec = data;
- u8 state = acpi_ec_read_status(ec);
+ u8 status;
pr_debug(PREFIX "~~~> interrupt\n");
- atomic_inc(&ec->irq_count);
- if (atomic_read(&ec->irq_count) > 5) {
- pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
- ec_switch_to_poll_mode(ec);
- goto end;
- }
- clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
- if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
+ status = acpi_ec_read_status(ec);
+
+ gpe_transaction(ec, status);
+ if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
wake_up(&ec->wait);
- if (state & ACPI_EC_FLAG_SCI) {
- if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- status = acpi_os_execute(OSL_EC_BURST_HANDLER,
- acpi_ec_gpe_query, ec);
- } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
- !test_bit(EC_FLAGS_NO_GPE, &ec->flags) &&
- in_interrupt()) {
+ ec_check_sci(ec, status);
+ if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+ !test_bit(EC_FLAGS_NO_GPE, &ec->flags)) {
/* this is non-query, must be confirmation */
if (printk_ratelimit())
pr_info(PREFIX "non-query interrupt received,"
" switching to interrupt mode\n");
set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
- clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
}
-end:
- ec_schedule_ec_poll(ec);
- return ACPI_SUCCESS(status) ?
- ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
-}
-
-static void do_ec_poll(struct work_struct *work)
-{
- struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work);
- atomic_set(&ec->irq_count, 0);
- (void)acpi_ec_gpe_handler(ec);
+ return ACPI_INTERRUPT_HANDLED;
}
/* --------------------------------------------------------------------------
mutex_init(&ec->lock);
init_waitqueue_head(&ec->wait);
INIT_LIST_HEAD(&ec->list);
- INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll);
- atomic_set(&ec->irq_count, 0);
+ spin_lock_init(&ec->t_lock);
return ec;
}
return AE_CTRL_TERMINATE;
}
-static void ec_poll_stop(struct acpi_ec *ec)
-{
- clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
- cancel_delayed_work(&ec->work);
-}
-
static void ec_remove_handlers(struct acpi_ec *ec)
{
- ec_poll_stop(ec);
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
pr_err(PREFIX "failed to remove space handler\n");
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
&acpi_ec_gpe_handler)))
pr_err(PREFIX "failed to remove gpe handler\n");
- ec->handlers_installed = 0;
+ clear_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
}
static int acpi_ec_add(struct acpi_device *device)
static int ec_install_handlers(struct acpi_ec *ec)
{
acpi_status status;
- if (ec->handlers_installed)
+ if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
return 0;
status = acpi_install_gpe_handler(NULL, ec->gpe,
- ACPI_GPE_EDGE_TRIGGERED,
- &acpi_ec_gpe_handler, ec);
+ ACPI_GPE_EDGE_TRIGGERED,
+ &acpi_ec_gpe_handler, ec);
if (ACPI_FAILURE(status))
return -ENODEV;
-
acpi_set_gpe_type(NULL, ec->gpe, ACPI_GPE_TYPE_RUNTIME);
acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
-
status = acpi_install_address_space_handler(ec->handle,
ACPI_ADR_SPACE_EC,
&acpi_ec_space_handler,
return -ENODEV;
}
- ec->handlers_installed = 1;
+ set_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags);
return 0;
}
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
- ec_schedule_ec_poll(ec);
return ret;
}
int __init acpi_boot_ec_enable(void)
{
- if (!boot_ec || boot_ec->handlers_installed)
+ if (!boot_ec || test_bit(EC_FLAGS_HANDLERS_INSTALLED, &boot_ec->flags))
return 0;
if (!ec_install_handlers(boot_ec)) {
first_ec = boot_ec;