aboutsummaryrefslogtreecommitdiff
path: root/drivers/char/ipmi/ipmi_msghandler.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ipmi/ipmi_msghandler.c')
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c497
1 files changed, 282 insertions, 215 deletions
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 58c0e6387cf..e6db9381b2c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/errno.h>
-#include <asm/system.h>
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
@@ -46,6 +45,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
#define PFX "IPMI message handler: "
@@ -53,6 +53,9 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(unsigned long);
+static void handle_new_recv_msgs(ipmi_smi_t intf);
+static void need_waiter(ipmi_smi_t intf);
static int initialized;
@@ -71,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root;
*/
#define MAX_MSG_TIMEOUT 60000
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+
/*
* The main "user" data structure.
*/
struct ipmi_user {
struct list_head link;
- /* Set to "0" when the user is destroyed. */
- int valid;
+ /* Set to false when the user is destroyed. */
+ bool valid;
struct kref refcount;
@@ -90,7 +107,7 @@ struct ipmi_user {
ipmi_smi_t intf;
/* Does this interface receive IPMI events? */
- int gets_events;
+ bool gets_events;
};
struct cmd_rcvr {
@@ -355,12 +372,15 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages that were delayed for some reason (out of memory,
- * for instance), will go in here to be processed later in a
- * periodic timer interrupt.
+ * Messages queued for delivery. If delivery fails (out of memory
+ * for instance), They will stay in here to be processed later in a
+ * periodic timer interrupt. The tasklet is for handling received
+ * messages directly from the handler.
*/
spinlock_t waiting_msgs_lock;
struct list_head waiting_msgs;
+ atomic_t watchdog_pretimeouts_to_deliver;
+ struct tasklet_struct recv_tasklet;
/*
* The list of command receivers that are registered for commands
@@ -378,6 +398,9 @@ struct ipmi_smi {
unsigned int waiting_events_count; /* How many events in queue? */
char delivering_events;
char event_msg_printed;
+ atomic_t event_waiters;
+ unsigned int ticks_to_req_ev;
+ int last_needs_timer;
/*
* The event receiver for my BMC, only really used at panic
@@ -390,7 +413,7 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- int maintenance_mode_enable;
+ bool maintenance_mode_enable;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -446,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex);
static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex);
-
#define ipmi_inc_stat(intf, stat) \
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
#define ipmi_get_stat(intf, stat) \
@@ -493,6 +515,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
+ tasklet_kill(&intf->recv_tasklet);
+
free_smi_msg_list(&intf->waiting_msgs);
free_recv_msg_list(&intf->waiting_events);
@@ -765,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf,
*seq = i;
*seqid = intf->seq_table[i].seqid;
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+ need_waiter(intf);
} else {
rv = -EAGAIN;
}
@@ -934,7 +959,7 @@ int ipmi_create_user(unsigned int if_num,
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
- new_user->gets_events = 0;
+ new_user->gets_events = false;
if (!try_module_get(intf->handlers->owner)) {
rv = -ENODEV;
@@ -955,10 +980,15 @@ int ipmi_create_user(unsigned int if_num,
*/
mutex_unlock(&ipmi_interfaces_mutex);
- new_user->valid = 1;
+ new_user->valid = true;
spin_lock_irqsave(&intf->seq_lock, flags);
list_add_rcu(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
+ if (handler->ipmi_watchdog_pretimeout) {
+ /* User wants pretimeouts, so make sure to watch for them. */
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+ }
*user = new_user;
return 0;
@@ -1012,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user)
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- user->valid = 0;
+ user->valid = false;
+
+ if (user->handler->ipmi_watchdog_pretimeout)
+ atomic_dec(&intf->event_waiters);
+
+ if (user->gets_events)
+ atomic_dec(&intf->event_waiters);
/* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags);
@@ -1148,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode = mode;
intf->maintenance_mode_enable
= (intf->auto_maintenance_timeout > 0);
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode = mode;
- intf->maintenance_mode_enable = 0;
+ intf->maintenance_mode_enable = false;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode = mode;
- intf->maintenance_mode_enable = 1;
+ intf->maintenance_mode_enable = true;
break;
default:
rv = -EINVAL;
goto out_unlock;
}
+ intf->maintenance_mode = mode;
maintenance_mode_update(intf);
}
@@ -1177,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
}
EXPORT_SYMBOL(ipmi_set_maintenance_mode);
-int ipmi_set_gets_events(ipmi_user_t user, int val)
+int ipmi_set_gets_events(ipmi_user_t user, bool val)
{
unsigned long flags;
ipmi_smi_t intf = user->intf;
@@ -1187,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&intf->events_lock, flags);
+ if (user->gets_events == val)
+ goto out;
+
user->gets_events = val;
+ if (val) {
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+ } else {
+ atomic_dec(&intf->event_waiters);
+ }
+
if (intf->delivering_events)
/*
* Another thread is delivering events for this, so
@@ -1282,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user,
goto out_unlock;
}
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock:
@@ -1323,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
while (rcvrs) {
+ atomic_dec(&intf->event_waiters);
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
@@ -1528,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user,
= IPMI_MAINTENANCE_MODE_TIMEOUT;
if (!intf->maintenance_mode
&& !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = 1;
+ intf->maintenance_mode_enable = true;
maintenance_mode_update(intf);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
@@ -1841,7 +1889,7 @@ int ipmi_request_settime(ipmi_user_t user,
int retries,
unsigned int retry_time_ms)
{
- unsigned char saddr, lun;
+ unsigned char saddr = 0, lun = 0;
int rv;
if (!user)
@@ -1873,7 +1921,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_recv_msg *supplied_recv,
int priority)
{
- unsigned char saddr, lun;
+ unsigned char saddr = 0, lun = 0;
int rv;
if (!user)
@@ -1910,7 +1958,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_ipmb_proc_show, PDE(inode)->data);
+ return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_ipmb_proc_ops = {
@@ -1931,7 +1979,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
static int smi_version_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_version_proc_show, PDE(inode)->data);
+ return single_open(file, smi_version_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_version_proc_ops = {
@@ -2006,7 +2054,7 @@ static int smi_stats_proc_show(struct seq_file *m, void *v)
static int smi_stats_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, smi_stats_proc_show, PDE(inode)->data);
+ return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
}
static const struct file_operations smi_stats_proc_ops = {
@@ -2030,12 +2078,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+ entry->name = kstrdup(name, GFP_KERNEL);
if (!entry->name) {
kfree(entry);
return -ENOMEM;
}
- strcpy(entry->name, name);
file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
if (!file) {
@@ -2786,12 +2833,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
return;
}
-void ipmi_poll_interface(ipmi_user_t user)
+static void ipmi_poll(ipmi_smi_t intf)
{
- ipmi_smi_t intf = user->intf;
-
if (intf->handlers->poll)
intf->handlers->poll(intf->send_info);
+ /* In case something came in */
+ handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(ipmi_user_t user)
+{
+ ipmi_poll(user->intf);
}
EXPORT_SYMBOL(ipmi_poll_interface);
@@ -2860,7 +2912,13 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
#endif
spin_lock_init(&intf->waiting_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_msgs);
+ tasklet_init(&intf->recv_tasklet,
+ smi_recv_tasklet,
+ (unsigned long) intf);
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->events_lock);
+ atomic_set(&intf->event_waiters, 0);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
mutex_init(&intf->cmd_rcvrs_mutex);
@@ -3622,11 +3680,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
}
/*
- * Handle a new message. Return 1 if the message should be requeued,
+ * Handle a received message. Return 1 if the message should be requeued,
* 0 if the message should be freed, or -1 if the message should not
* be freed or requeued.
*/
-static int handle_new_recv_msg(ipmi_smi_t intf,
+static int handle_one_recv_msg(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
int requeue;
@@ -3773,7 +3831,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
- /* It's an asyncronous event. */
+ /* It's an asynchronous event. */
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -3784,12 +3842,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
return requeue;
}
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(ipmi_smi_t intf)
+{
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = intf->run_to_completion;
+
+ /* See if any waiting messages need to be processed. */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_msgs)) {
+ smi_msg = list_entry(intf->waiting_msgs.next,
+ struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ rv = handle_one_recv_msg(intf, smi_msg);
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ if (rv == 0) {
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ } else if (rv < 0) {
+ /* Fatal error on the message, del but don't free. */
+ } else {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message.
+ */
+ list_add(&smi_msg->link, &intf->waiting_msgs);
+ break;
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ ipmi_user_t user;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ rcu_read_unlock();
+ }
+}
+
+static void smi_recv_tasklet(unsigned long val)
+{
+ handle_new_recv_msgs((ipmi_smi_t) val);
+}
+
/* Handle a new message from the lower layer. */
void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int rv;
int run_to_completion;
@@ -3843,31 +3961,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
run_to_completion = intf->run_to_completion;
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- if (!list_empty(&intf->waiting_msgs)) {
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- goto out;
- }
+ list_add_tail(&msg->link, &intf->waiting_msgs);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- rv = handle_new_recv_msg(intf, msg);
- if (rv > 0) {
- /*
- * Could not handle the message now, just add it to a
- * list to handle later.
- */
- run_to_completion = intf->run_to_completion;
- if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- } else if (rv == 0) {
- ipmi_free_smi_msg(msg);
- }
-
+ tasklet_schedule(&intf->recv_tasklet);
out:
return;
}
@@ -3875,16 +3973,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
- ipmi_user_t user;
-
- rcu_read_lock();
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (!user->handler->ipmi_watchdog_pretimeout)
- continue;
-
- user->handler->ipmi_watchdog_pretimeout(user->handler_data);
- }
- rcu_read_unlock();
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+ tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -3918,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct list_head *timeouts, long timeout_period,
- int slot, unsigned long *flags)
+ int slot, unsigned long *flags,
+ unsigned int *waiting_msgs)
{
struct ipmi_recv_msg *msg;
struct ipmi_smi_handlers *handlers;
@@ -3930,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
return;
ent->timeout -= timeout_period;
- if (ent->timeout > 0)
+ if (ent->timeout > 0) {
+ (*waiting_msgs)++;
return;
+ }
if (ent->retries_left == 0) {
/* The message has used all its retries. */
@@ -3948,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct ipmi_smi_msg *smi_msg;
/* More retries, send again. */
+ (*waiting_msgs)++;
+
/*
* Start with the max timer, set to normal timer after
* the message is sent.
@@ -3993,133 +4088,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
}
}
-static void ipmi_timeout_handler(long timeout_period)
+static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
{
- ipmi_smi_t intf;
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
- struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
int i;
+ unsigned int waiting_msgs = 0;
- rcu_read_lock();
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- /* See if any waiting messages need to be processed. */
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_for_each_entry_safe(smi_msg, smi_msg2,
- &intf->waiting_msgs, link) {
- if (!handle_new_recv_msg(intf, smi_msg)) {
- list_del(&smi_msg->link);
- ipmi_free_smi_msg(smi_msg);
- } else {
- /*
- * To preserve message order, quit if we
- * can't handle a message.
- */
- break;
- }
- }
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-
- /*
- * Go through the seq table and find any messages that
- * have timed out, putting them in the timeouts
- * list.
- */
- INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
- for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
- check_msg_timeout(intf, &(intf->seq_table[i]),
- &timeouts, timeout_period, i,
- &flags);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ /*
+ * Go through the seq table and find any messages that
+ * have timed out, putting them in the timeouts
+ * list.
+ */
+ INIT_LIST_HEAD(&timeouts);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &(intf->seq_table[i]),
+ &timeouts, timeout_period, i,
+ &flags, &waiting_msgs);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
- list_for_each_entry_safe(msg, msg2, &timeouts, link)
- deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
+ list_for_each_entry_safe(msg, msg2, &timeouts, link)
+ deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
- /*
- * Maintenance mode handling. Check the timeout
- * optimistically before we claim the lock. It may
- * mean a timeout gets missed occasionally, but that
- * only means the timeout gets extended by one period
- * in that case. No big deal, and it avoids the lock
- * most of the time.
- */
+ /*
+ * Maintenance mode handling. Check the timeout
+ * optimistically before we claim the lock. It may
+ * mean a timeout gets missed occasionally, but that
+ * only means the timeout gets extended by one period
+ * in that case. No big deal, and it avoids the lock
+ * most of the time.
+ */
+ if (intf->auto_maintenance_timeout > 0) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
if (intf->auto_maintenance_timeout > 0) {
- spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- if (intf->auto_maintenance_timeout > 0) {
- intf->auto_maintenance_timeout
- -= timeout_period;
- if (!intf->maintenance_mode
- && (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = 0;
- maintenance_mode_update(intf);
- }
+ intf->auto_maintenance_timeout
+ -= timeout_period;
+ if (!intf->maintenance_mode
+ && (intf->auto_maintenance_timeout <= 0)) {
+ intf->maintenance_mode_enable = false;
+ maintenance_mode_update(intf);
}
- spin_unlock_irqrestore(&intf->maintenance_mode_lock,
- flags);
}
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
}
- rcu_read_unlock();
+
+ tasklet_schedule(&intf->recv_tasklet);
+
+ return waiting_msgs;
}
-static void ipmi_request_event(void)
+static void ipmi_request_event(ipmi_smi_t intf)
{
- ipmi_smi_t intf;
struct ipmi_smi_handlers *handlers;
- rcu_read_lock();
- /*
- * Called from the timer, no need to check if handlers is
- * valid.
- */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- /* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
- continue;
+ /* No event requests when in maintenance mode. */
+ if (intf->maintenance_mode_enable)
+ return;
- handlers = intf->handlers;
- if (handlers)
- handlers->request_events(intf->send_info);
- }
- rcu_read_unlock();
+ handlers = intf->handlers;
+ if (handlers)
+ handlers->request_events(intf->send_info);
}
static struct timer_list ipmi_timer;
-/* Call every ~1000 ms. */
-#define IPMI_TIMEOUT_TIME 1000
-
-/* How many jiffies does it take to get to the timeout time. */
-#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
-
-/*
- * Request events from the queue every second (this is the number of
- * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
- * future, IPMI will add a way to know immediately if an event is in
- * the queue and this silliness can go away.
- */
-#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
-
static atomic_t stop_operation;
-static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
static void ipmi_timeout(unsigned long data)
{
+ ipmi_smi_t intf;
+ int nt = 0;
+
if (atomic_read(&stop_operation))
return;
- ticks_to_req_ev--;
- if (ticks_to_req_ev == 0) {
- ipmi_request_event();
- ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
- }
+ rcu_read_lock();
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ int lnt = 0;
- ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
+ if (atomic_read(&intf->event_waiters)) {
+ intf->ticks_to_req_ev--;
+ if (intf->ticks_to_req_ev == 0) {
+ ipmi_request_event(intf);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+ lnt++;
+ }
- mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+ lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+
+ lnt = !!lnt;
+ if (lnt != intf->last_needs_timer &&
+ intf->handlers->set_need_watch)
+ intf->handlers->set_need_watch(intf->send_info, lnt);
+ intf->last_needs_timer = lnt;
+
+ nt += lnt;
+ }
+ rcu_read_unlock();
+
+ if (nt)
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static void need_waiter(ipmi_smi_t intf)
+{
+ /* Racy, but worst case we start the timer twice. */
+ if (!timer_pending(&ipmi_timer))
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
@@ -4173,12 +4253,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
#ifdef CONFIG_IPMI_PANIC_EVENT
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
+ atomic_dec(&panic_done_count);
}
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
+ atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+ int rv;
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+ atomic_add(2, &panic_done_count);
+ rv = i_ipmi_request(NULL,
+ intf,
+ addr,
+ 0,
+ msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll(intf);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4217,8 +4333,6 @@ static void send_panic_events(char *str)
unsigned char data[16];
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
- struct ipmi_smi_msg smi_msg;
- struct ipmi_recv_msg recv_msg;
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4246,9 +4360,6 @@ static void send_panic_events(char *str)
data[7] = str[2];
}
- smi_msg.done = dummy_smi_done_handler;
- recv_msg.done = dummy_recv_done_handler;
-
/* For every registered interface, send the event. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (!intf->handlers)
@@ -4258,18 +4369,7 @@ static void send_panic_events(char *str)
intf->run_to_completion = 1;
/* Send the event announcing the panic. */
intf->handlers->set_run_to_completion(intf->send_info, 1);
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4317,18 +4417,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -4337,18 +4426,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -4405,18 +4483,7 @@ static void send_panic_events(char *str)
strncpy(data+5, p, 11);
p += size;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
#endif /* CONFIG_IPMI_PANIC_STRING */
@@ -4522,7 +4589,7 @@ static void __exit cleanup_ipmi(void)
del_timer_sync(&ipmi_timer);
#ifdef CONFIG_PROC_FS
- remove_proc_entry(proc_ipmi_root->name, NULL);
+ proc_remove(proc_ipmi_root);
#endif /* CONFIG_PROC_FS */
driver_unregister(&ipmidriver.driver);