aboutsummaryrefslogtreecommitdiff
path: root/drivers/char/ipmi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/ipmi')
-rw-r--r--drivers/char/ipmi/Kconfig12
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c18
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c58
-rw-r--r--drivers/char/ipmi/ipmi_kcs_sm.c11
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c682
-rw-r--r--drivers/char/ipmi/ipmi_poweroff.c23
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c1237
-rw-r--r--drivers/char/ipmi/ipmi_smic_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c176
10 files changed, 1464 insertions, 757 deletions
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 0baa8fab4ea..db1c9b7adaa 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -50,6 +50,18 @@ config IPMI_SI
Currently, only KCS and SMIC are supported. If
you are using IPMI, you should probably say "y" here.
+config IPMI_SI_PROBE_DEFAULTS
+ bool 'Probe for all possible IPMI system interfaces by default'
+ default n
+ depends on IPMI_SI
+ help
+ Modern systems will usually expose IPMI interfaces via a discoverable
+ firmware mechanism such as ACPI or DMI. Older systems do not, and so
+ the driver is forced to probe hardware manually. This may cause boot
+ delays. Say "n" here to disable this manual probing. IPMI will then
+ only be available on older systems if the "ipmi_si_intf.trydefaults=1"
+ boot argument is passed.
+
config IPMI_WATCHDOG
tristate 'IPMI Watchdog Timer'
help
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index eb8a1a8c188..16a93648d54 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -2,7 +2,7 @@
# Makefile for the ipmi drivers.
#
-ipmi_si-objs := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
+ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 7b98c067190..61e71616689 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -2,7 +2,7 @@
* ipmi_bt_sm.c
*
* The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part
- * of the driver architecture at http://sourceforge.net/project/openipmi
+ * of the driver architecture at http://sourceforge.net/projects/openipmi
*
* Author: Rocky Craig <first.last@hp.com>
*
@@ -95,9 +95,9 @@ struct si_sm_data {
enum bt_states state;
unsigned char seq; /* BT sequence number */
struct si_sm_io *io;
- unsigned char write_data[IPMI_MAX_MSG_LENGTH];
+ unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
int write_count;
- unsigned char read_data[IPMI_MAX_MSG_LENGTH];
+ unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
int read_count;
int truncated;
long timeout; /* microseconds countdown */
@@ -201,7 +201,7 @@ static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
}
bt->state = BT_STATE_IDLE; /* start here */
bt->complete = BT_STATE_IDLE; /* end here */
- bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000;
+ bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * USEC_PER_SEC;
bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT;
/* BT_CAP_outreqs == zero is a flag to read BT Capabilities */
return 3; /* We claim 3 bytes of space; ought to check SPMI table */
@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
static inline int read_all_bytes(struct si_sm_data *bt)
{
- unsigned char i;
+ unsigned int i;
/*
* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
@@ -560,7 +560,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_CONTROL(BT_H_BUSY); /* set */
/*
- * Uncached, ordered writes should just proceeed serially but
+ * Uncached, ordered writes should just proceed serially but
* some BMCs don't clear B2H_ATN with one hit. Fast-path a
* workaround without too much penalty to the general case.
*/
@@ -613,7 +613,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
HOST2BMC(42); /* Sequence number */
HOST2BMC(3); /* Cmd == Soft reset */
BT_CONTROL(BT_H2B_ATN);
- bt->timeout = BT_RESET_DELAY * 1000000;
+ bt->timeout = BT_RESET_DELAY * USEC_PER_SEC;
BT_STATE_CHANGE(BT_STATE_RESET3,
SI_SM_CALL_WITH_DELAY);
@@ -651,14 +651,14 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
bt_init_data(bt, bt->io);
if ((i == 8) && !BT_CAP[2]) {
bt->BT_CAP_outreqs = BT_CAP[3];
- bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000;
+ bt->BT_CAP_req2rsp = BT_CAP[6] * USEC_PER_SEC;
bt->BT_CAP_retries = BT_CAP[7];
} else
printk(KERN_WARNING "IPMI BT: using default values\n");
if (!bt->BT_CAP_outreqs)
bt->BT_CAP_outreqs = 1;
printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n",
- bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries);
+ bt->BT_CAP_req2rsp / USEC_PER_SEC, bt->BT_CAP_retries);
bt->timeout = bt->BT_CAP_req2rsp;
return SI_SM_CALL_WITHOUT_DELAY;
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 41fc11dc921..ec318bf434a 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -34,8 +34,8 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
-#include <asm/system.h>
#include <linux/poll.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/ipmi.h>
@@ -43,7 +43,6 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/compat.h>
-#include <linux/smp_lock.h>
struct ipmi_file_private
{
@@ -58,6 +57,7 @@ struct ipmi_file_private
unsigned int default_retry_time_ms;
};
+static DEFINE_MUTEX(ipmi_mutex);
static void file_receive_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
@@ -101,9 +101,9 @@ static int ipmi_fasync(int fd, struct file *file, int on)
struct ipmi_file_private *priv = file->private_data;
int result;
- lock_kernel(); /* could race against open() otherwise */
+ mutex_lock(&ipmi_mutex); /* could race against open() otherwise */
result = fasync_helper(fd, file, on, &priv->fasync_queue);
- unlock_kernel();
+ mutex_unlock(&ipmi_mutex);
return (result);
}
@@ -124,7 +124,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
if (!priv)
return -ENOMEM;
- lock_kernel();
+ mutex_lock(&ipmi_mutex);
priv->file = file;
rv = ipmi_create_user(if_num,
@@ -149,7 +149,7 @@ static int ipmi_open(struct inode *inode, struct file *file)
priv->default_retry_time_ms = 0;
out:
- unlock_kernel();
+ mutex_unlock(&ipmi_mutex);
return rv;
}
@@ -227,8 +227,7 @@ static int handle_send_req(ipmi_user_t user,
return rv;
}
-static int ipmi_ioctl(struct inode *inode,
- struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd,
unsigned long data)
{
@@ -629,6 +628,23 @@ static int ipmi_ioctl(struct inode *inode,
return rv;
}
+/*
+ * Note: it doesn't make sense to take the BKL here but
+ * not in compat_ipmi_ioctl. -arnd
+ */
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long data)
+{
+ int ret;
+
+ mutex_lock(&ipmi_mutex);
+ ret = ipmi_ioctl(file, cmd, data);
+ mutex_unlock(&ipmi_mutex);
+
+ return ret;
+}
+
#ifdef CONFIG_COMPAT
/*
@@ -794,6 +810,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
struct ipmi_recv __user *precv64;
struct ipmi_recv recv64;
+ memset(&recv64, 0, sizeof(recv64));
if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
return -EFAULT;
@@ -801,7 +818,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
if (copy_to_user(precv64, &recv64, sizeof(recv64)))
return -EFAULT;
- rc = ipmi_ioctl(filep->f_path.dentry->d_inode, filep,
+ rc = ipmi_ioctl(filep,
((cmd == COMPAT_IPMICTL_RECEIVE_MSG)
? IPMICTL_RECEIVE_MSG
: IPMICTL_RECEIVE_MSG_TRUNC),
@@ -818,21 +835,34 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
return rc;
}
default:
- return ipmi_ioctl(filep->f_path.dentry->d_inode, filep, cmd, arg);
+ return ipmi_ioctl(filep, cmd, arg);
}
}
+
+static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_mutex);
+ ret = compat_ipmi_ioctl(filep, cmd, arg);
+ mutex_unlock(&ipmi_mutex);
+
+ return ret;
+}
#endif
static const struct file_operations ipmi_fops = {
.owner = THIS_MODULE,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = compat_ipmi_ioctl,
+ .compat_ioctl = unlocked_compat_ipmi_ioctl,
#endif
.open = ipmi_open,
.release = ipmi_release,
.fasync = ipmi_fasync,
.poll = ipmi_poll,
+ .llseek = noop_llseek,
};
#define DEVICE_NAME "ipmidev"
@@ -898,7 +928,7 @@ static struct ipmi_smi_watcher smi_watcher =
.smi_gone = ipmi_smi_gone,
};
-static __init int init_ipmi_devintf(void)
+static int __init init_ipmi_devintf(void)
{
int rv;
@@ -936,7 +966,7 @@ static __init int init_ipmi_devintf(void)
}
module_init(init_ipmi_devintf);
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
{
struct ipmi_reg_list *entry, *entry2;
mutex_lock(&reg_list_mutex);
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c
index 80704875794..8c25f596808 100644
--- a/drivers/char/ipmi/ipmi_kcs_sm.c
+++ b/drivers/char/ipmi/ipmi_kcs_sm.c
@@ -118,8 +118,8 @@ enum kcs_states {
#define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH
/* Timeouts in microseconds. */
-#define IBF_RETRY_TIMEOUT 1000000
-#define OBF_RETRY_TIMEOUT 1000000
+#define IBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
+#define OBF_RETRY_TIMEOUT (5*USEC_PER_SEC)
#define MAX_ERROR_RETRIES 10
#define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
@@ -251,8 +251,9 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
if (!GET_STATUS_OBF(status)) {
kcs->obf_timeout -= time;
if (kcs->obf_timeout < 0) {
- start_error_recovery(kcs, "OBF not ready in time");
- return 1;
+ kcs->obf_timeout = OBF_RETRY_TIMEOUT;
+ start_error_recovery(kcs, "OBF not ready in time");
+ return 1;
}
return 0;
}
@@ -370,7 +371,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
return SI_SM_IDLE;
case KCS_START_OP:
- if (state != KCS_IDLE) {
+ if (state != KCS_IDLE_STATE) {
start_error_recovery(kcs,
"State machine not idle at start");
break;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 09050797c76..e6db9381b2c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -33,8 +33,9 @@
#include <linux/module.h>
#include <linux/errno.h>
-#include <asm/system.h>
#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -44,6 +45,7 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
#define PFX "IPMI message handler: "
@@ -51,6 +53,9 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void);
+static void smi_recv_tasklet(unsigned long);
+static void handle_new_recv_msgs(ipmi_smi_t intf);
+static void need_waiter(ipmi_smi_t intf);
static int initialized;
@@ -69,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root;
*/
#define MAX_MSG_TIMEOUT 60000
+/* Call every ~1000 ms. */
+#define IPMI_TIMEOUT_TIME 1000
+
+/* How many jiffies does it take to get to the timeout time. */
+#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
+
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
+#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
+
/*
* The main "user" data structure.
*/
struct ipmi_user {
struct list_head link;
- /* Set to "0" when the user is destroyed. */
- int valid;
+ /* Set to false when the user is destroyed. */
+ bool valid;
struct kref refcount;
@@ -88,7 +107,7 @@ struct ipmi_user {
ipmi_smi_t intf;
/* Does this interface receive IPMI events? */
- int gets_events;
+ bool gets_events;
};
struct cmd_rcvr {
@@ -353,12 +372,15 @@ struct ipmi_smi {
int curr_seq;
/*
- * Messages that were delayed for some reason (out of memory,
- * for instance), will go in here to be processed later in a
- * periodic timer interrupt.
+ * Messages queued for delivery. If delivery fails (out of memory
+ * for instance), They will stay in here to be processed later in a
+ * periodic timer interrupt. The tasklet is for handling received
+ * messages directly from the handler.
*/
spinlock_t waiting_msgs_lock;
struct list_head waiting_msgs;
+ atomic_t watchdog_pretimeouts_to_deliver;
+ struct tasklet_struct recv_tasklet;
/*
* The list of command receivers that are registered for commands
@@ -376,6 +398,9 @@ struct ipmi_smi {
unsigned int waiting_events_count; /* How many events in queue? */
char delivering_events;
char event_msg_printed;
+ atomic_t event_waiters;
+ unsigned int ticks_to_req_ev;
+ int last_needs_timer;
/*
* The event receiver for my BMC, only really used at panic
@@ -388,7 +413,7 @@ struct ipmi_smi {
/* For handling of maintenance mode. */
int maintenance_mode;
- int maintenance_mode_enable;
+ bool maintenance_mode_enable;
int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */
@@ -444,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex);
static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex);
-
#define ipmi_inc_stat(intf, stat) \
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
#define ipmi_get_stat(intf, stat) \
@@ -491,6 +515,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list;
+ tasklet_kill(&intf->recv_tasklet);
+
free_smi_msg_list(&intf->waiting_msgs);
free_recv_msg_list(&intf->waiting_events);
@@ -763,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf,
*seq = i;
*seqid = intf->seq_table[i].seqid;
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
+ need_waiter(intf);
} else {
rv = -EAGAIN;
}
@@ -932,7 +959,7 @@ int ipmi_create_user(unsigned int if_num,
new_user->handler = handler;
new_user->handler_data = handler_data;
new_user->intf = intf;
- new_user->gets_events = 0;
+ new_user->gets_events = false;
if (!try_module_get(intf->handlers->owner)) {
rv = -ENODEV;
@@ -953,10 +980,15 @@ int ipmi_create_user(unsigned int if_num,
*/
mutex_unlock(&ipmi_interfaces_mutex);
- new_user->valid = 1;
+ new_user->valid = true;
spin_lock_irqsave(&intf->seq_lock, flags);
list_add_rcu(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags);
+ if (handler->ipmi_watchdog_pretimeout) {
+ /* User wants pretimeouts, so make sure to watch for them. */
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+ }
*user = new_user;
return 0;
@@ -969,6 +1001,33 @@ out_kfree:
}
EXPORT_SYMBOL(ipmi_create_user);
+int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
+{
+ int rv = 0;
+ ipmi_smi_t intf;
+ struct ipmi_smi_handlers *handlers;
+
+ mutex_lock(&ipmi_interfaces_mutex);
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ if (intf->intf_num == if_num)
+ goto found;
+ }
+ /* Not found, return an error */
+ rv = -EINVAL;
+ mutex_unlock(&ipmi_interfaces_mutex);
+ return rv;
+
+found:
+ handlers = intf->handlers;
+ rv = -ENOSYS;
+ if (handlers->get_smi_info)
+ rv = handlers->get_smi_info(intf->send_info, data);
+ mutex_unlock(&ipmi_interfaces_mutex);
+
+ return rv;
+}
+EXPORT_SYMBOL(ipmi_get_smi_info);
+
static void free_user(struct kref *ref)
{
ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
@@ -983,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user)
struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL;
- user->valid = 0;
+ user->valid = false;
+
+ if (user->handler->ipmi_watchdog_pretimeout)
+ atomic_dec(&intf->event_waiters);
+
+ if (user->gets_events)
+ atomic_dec(&intf->event_waiters);
/* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags);
@@ -1119,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
if (intf->maintenance_mode != mode) {
switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO:
- intf->maintenance_mode = mode;
intf->maintenance_mode_enable
= (intf->auto_maintenance_timeout > 0);
break;
case IPMI_MAINTENANCE_MODE_OFF:
- intf->maintenance_mode = mode;
- intf->maintenance_mode_enable = 0;
+ intf->maintenance_mode_enable = false;
break;
case IPMI_MAINTENANCE_MODE_ON:
- intf->maintenance_mode = mode;
- intf->maintenance_mode_enable = 1;
+ intf->maintenance_mode_enable = true;
break;
default:
rv = -EINVAL;
goto out_unlock;
}
+ intf->maintenance_mode = mode;
maintenance_mode_update(intf);
}
@@ -1148,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
}
EXPORT_SYMBOL(ipmi_set_maintenance_mode);
-int ipmi_set_gets_events(ipmi_user_t user, int val)
+int ipmi_set_gets_events(ipmi_user_t user, bool val)
{
unsigned long flags;
ipmi_smi_t intf = user->intf;
@@ -1158,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&intf->events_lock, flags);
+ if (user->gets_events == val)
+ goto out;
+
user->gets_events = val;
+ if (val) {
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+ } else {
+ atomic_dec(&intf->event_waiters);
+ }
+
if (intf->delivering_events)
/*
* Another thread is delivering events for this, so
@@ -1253,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user,
goto out_unlock;
}
+ if (atomic_inc_return(&intf->event_waiters) == 1)
+ need_waiter(intf);
+
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock:
@@ -1294,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu();
while (rcvrs) {
+ atomic_dec(&intf->event_waiters);
rcvr = rcvrs;
rcvrs = rcvr->next;
kfree(rcvr);
@@ -1499,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user,
= IPMI_MAINTENANCE_MODE_TIMEOUT;
if (!intf->maintenance_mode
&& !intf->maintenance_mode_enable) {
- intf->maintenance_mode_enable = 1;
+ intf->maintenance_mode_enable = true;
maintenance_mode_update(intf);
}
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
@@ -1812,7 +1889,7 @@ int ipmi_request_settime(ipmi_user_t user,
int retries,
unsigned int retry_time_ms)
{
- unsigned char saddr, lun;
+ unsigned char saddr = 0, lun = 0;
int rv;
if (!user)
@@ -1844,7 +1921,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
struct ipmi_recv_msg *supplied_recv,
int priority)
{
- unsigned char saddr, lun;
+ unsigned char saddr = 0, lun = 0;
int rv;
if (!user)
@@ -1868,102 +1945,128 @@ int ipmi_request_supply_msgs(ipmi_user_t user,
EXPORT_SYMBOL(ipmi_request_supply_msgs);
#ifdef CONFIG_PROC_FS
-static int ipmb_file_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int smi_ipmb_proc_show(struct seq_file *m, void *v)
{
- char *out = (char *) page;
- ipmi_smi_t intf = data;
+ ipmi_smi_t intf = m->private;
int i;
- int rv = 0;
- for (i = 0; i < IPMI_MAX_CHANNELS; i++)
- rv += sprintf(out+rv, "%x ", intf->channels[i].address);
- out[rv-1] = '\n'; /* Replace the final space with a newline */
- out[rv] = '\0';
- rv++;
- return rv;
+ seq_printf(m, "%x", intf->channels[0].address);
+ for (i = 1; i < IPMI_MAX_CHANNELS; i++)
+ seq_printf(m, " %x", intf->channels[i].address);
+ return seq_putc(m, '\n');
}
-static int version_file_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
{
- char *out = (char *) page;
- ipmi_smi_t intf = data;
+ return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
+}
- return sprintf(out, "%u.%u\n",
+static const struct file_operations smi_ipmb_proc_ops = {
+ .open = smi_ipmb_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_version_proc_show(struct seq_file *m, void *v)
+{
+ ipmi_smi_t intf = m->private;
+
+ return seq_printf(m, "%u.%u\n",
ipmi_version_major(&intf->bmc->id),
ipmi_version_minor(&intf->bmc->id));
}
-static int stat_file_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int smi_version_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_version_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_version_proc_ops = {
+ .open = smi_version_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_stats_proc_show(struct seq_file *m, void *v)
{
- char *out = (char *) page;
- ipmi_smi_t intf = data;
+ ipmi_smi_t intf = m->private;
- out += sprintf(out, "sent_invalid_commands: %u\n",
+ seq_printf(m, "sent_invalid_commands: %u\n",
ipmi_get_stat(intf, sent_invalid_commands));
- out += sprintf(out, "sent_local_commands: %u\n",
+ seq_printf(m, "sent_local_commands: %u\n",
ipmi_get_stat(intf, sent_local_commands));
- out += sprintf(out, "handled_local_responses: %u\n",
+ seq_printf(m, "handled_local_responses: %u\n",
ipmi_get_stat(intf, handled_local_responses));
- out += sprintf(out, "unhandled_local_responses: %u\n",
+ seq_printf(m, "unhandled_local_responses: %u\n",
ipmi_get_stat(intf, unhandled_local_responses));
- out += sprintf(out, "sent_ipmb_commands: %u\n",
+ seq_printf(m, "sent_ipmb_commands: %u\n",
ipmi_get_stat(intf, sent_ipmb_commands));
- out += sprintf(out, "sent_ipmb_command_errs: %u\n",
+ seq_printf(m, "sent_ipmb_command_errs: %u\n",
ipmi_get_stat(intf, sent_ipmb_command_errs));
- out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
+ seq_printf(m, "retransmitted_ipmb_commands: %u\n",
ipmi_get_stat(intf, retransmitted_ipmb_commands));
- out += sprintf(out, "timed_out_ipmb_commands: %u\n",
+ seq_printf(m, "timed_out_ipmb_commands: %u\n",
ipmi_get_stat(intf, timed_out_ipmb_commands));
- out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n",
+ seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
- out += sprintf(out, "sent_ipmb_responses: %u\n",
+ seq_printf(m, "sent_ipmb_responses: %u\n",
ipmi_get_stat(intf, sent_ipmb_responses));
- out += sprintf(out, "handled_ipmb_responses: %u\n",
+ seq_printf(m, "handled_ipmb_responses: %u\n",
ipmi_get_stat(intf, handled_ipmb_responses));
- out += sprintf(out, "invalid_ipmb_responses: %u\n",
+ seq_printf(m, "invalid_ipmb_responses: %u\n",
ipmi_get_stat(intf, invalid_ipmb_responses));
- out += sprintf(out, "unhandled_ipmb_responses: %u\n",
+ seq_printf(m, "unhandled_ipmb_responses: %u\n",
ipmi_get_stat(intf, unhandled_ipmb_responses));
- out += sprintf(out, "sent_lan_commands: %u\n",
+ seq_printf(m, "sent_lan_commands: %u\n",
ipmi_get_stat(intf, sent_lan_commands));
- out += sprintf(out, "sent_lan_command_errs: %u\n",
+ seq_printf(m, "sent_lan_command_errs: %u\n",
ipmi_get_stat(intf, sent_lan_command_errs));
- out += sprintf(out, "retransmitted_lan_commands: %u\n",
+ seq_printf(m, "retransmitted_lan_commands: %u\n",
ipmi_get_stat(intf, retransmitted_lan_commands));
- out += sprintf(out, "timed_out_lan_commands: %u\n",
+ seq_printf(m, "timed_out_lan_commands: %u\n",
ipmi_get_stat(intf, timed_out_lan_commands));
- out += sprintf(out, "sent_lan_responses: %u\n",
+ seq_printf(m, "sent_lan_responses: %u\n",
ipmi_get_stat(intf, sent_lan_responses));
- out += sprintf(out, "handled_lan_responses: %u\n",
+ seq_printf(m, "handled_lan_responses: %u\n",
ipmi_get_stat(intf, handled_lan_responses));
- out += sprintf(out, "invalid_lan_responses: %u\n",
+ seq_printf(m, "invalid_lan_responses: %u\n",
ipmi_get_stat(intf, invalid_lan_responses));
- out += sprintf(out, "unhandled_lan_responses: %u\n",
+ seq_printf(m, "unhandled_lan_responses: %u\n",
ipmi_get_stat(intf, unhandled_lan_responses));
- out += sprintf(out, "handled_commands: %u\n",
+ seq_printf(m, "handled_commands: %u\n",
ipmi_get_stat(intf, handled_commands));
- out += sprintf(out, "invalid_commands: %u\n",
+ seq_printf(m, "invalid_commands: %u\n",
ipmi_get_stat(intf, invalid_commands));
- out += sprintf(out, "unhandled_commands: %u\n",
+ seq_printf(m, "unhandled_commands: %u\n",
ipmi_get_stat(intf, unhandled_commands));
- out += sprintf(out, "invalid_events: %u\n",
+ seq_printf(m, "invalid_events: %u\n",
ipmi_get_stat(intf, invalid_events));
- out += sprintf(out, "events: %u\n",
+ seq_printf(m, "events: %u\n",
ipmi_get_stat(intf, events));
- out += sprintf(out, "failed rexmit LAN msgs: %u\n",
+ seq_printf(m, "failed rexmit LAN msgs: %u\n",
ipmi_get_stat(intf, dropped_rexmit_lan_commands));
- out += sprintf(out, "failed rexmit IPMB msgs: %u\n",
+ seq_printf(m, "failed rexmit IPMB msgs: %u\n",
ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
+ return 0;
+}
- return (out - ((char *) page));
+static int smi_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
}
+
+static const struct file_operations smi_stats_proc_ops = {
+ .open = smi_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_PROC_FS */
int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
- read_proc_t *read_proc,
+ const struct file_operations *proc_ops,
void *data)
{
int rv = 0;
@@ -1975,22 +2078,18 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
- entry->name = kmalloc(strlen(name)+1, GFP_KERNEL);
+ entry->name = kstrdup(name, GFP_KERNEL);
if (!entry->name) {
kfree(entry);
return -ENOMEM;
}
- strcpy(entry->name, name);
- file = create_proc_entry(name, 0, smi->proc_dir);
+ file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
if (!file) {
kfree(entry->name);
kfree(entry);
rv = -ENOMEM;
} else {
- file->data = data;
- file->read_proc = read_proc;
-
mutex_lock(&smi->proc_entry_lock);
/* Stick it on the list. */
entry->next = smi->proc_entries;
@@ -2015,17 +2114,17 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
if (rv == 0)
rv = ipmi_smi_add_proc_entry(smi, "stats",
- stat_file_read_proc,
+ &smi_stats_proc_ops,
smi);
if (rv == 0)
rv = ipmi_smi_add_proc_entry(smi, "ipmb",
- ipmb_file_read_proc,
+ &smi_ipmb_proc_ops,
smi);
if (rv == 0)
rv = ipmi_smi_add_proc_entry(smi, "version",
- version_file_read_proc,
+ &smi_version_proc_ops,
smi);
#endif /* CONFIG_PROC_FS */
@@ -2271,42 +2370,52 @@ static int create_files(struct bmc_device *bmc)
bmc->device_id_attr.attr.name = "device_id";
bmc->device_id_attr.attr.mode = S_IRUGO;
bmc->device_id_attr.show = device_id_show;
+ sysfs_attr_init(&bmc->device_id_attr.attr);
bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+ sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
bmc->revision_attr.attr.name = "revision";
bmc->revision_attr.attr.mode = S_IRUGO;
bmc->revision_attr.show = revision_show;
+ sysfs_attr_init(&bmc->revision_attr.attr);
bmc->firmware_rev_attr.attr.name = "firmware_revision";
bmc->firmware_rev_attr.attr.mode = S_IRUGO;
bmc->firmware_rev_attr.show = firmware_rev_show;
+ sysfs_attr_init(&bmc->firmware_rev_attr.attr);
bmc->version_attr.attr.name = "ipmi_version";
bmc->version_attr.attr.mode = S_IRUGO;
bmc->version_attr.show = ipmi_version_show;
+ sysfs_attr_init(&bmc->version_attr.attr);
bmc->add_dev_support_attr.attr.name = "additional_device_support";
bmc->add_dev_support_attr.attr.mode = S_IRUGO;
bmc->add_dev_support_attr.show = add_dev_support_show;
+ sysfs_attr_init(&bmc->add_dev_support_attr.attr);
bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
bmc->manufacturer_id_attr.show = manufacturer_id_show;
+ sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
bmc->product_id_attr.attr.name = "product_id";
bmc->product_id_attr.attr.mode = S_IRUGO;
bmc->product_id_attr.show = product_id_show;
+ sysfs_attr_init(&bmc->product_id_attr.attr);
bmc->guid_attr.attr.name = "guid";
bmc->guid_attr.attr.mode = S_IRUGO;
bmc->guid_attr.show = guid_show;
+ sysfs_attr_init(&bmc->guid_attr.attr);
bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+ sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
err = device_create_file(&bmc->dev->dev,
&bmc->device_id_attr);
@@ -2494,12 +2603,11 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
return rv;
}
- printk(KERN_INFO
- "ipmi: Found new BMC (man_id: 0x%6.6x, "
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
- bmc->id.manufacturer_id,
- bmc->id.product_id,
- bmc->id.device_id);
+ dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
+ "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
+ bmc->id.manufacturer_id,
+ bmc->id.product_id,
+ bmc->id.device_id);
}
/*
@@ -2725,12 +2833,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
return;
}
-void ipmi_poll_interface(ipmi_user_t user)
+static void ipmi_poll(ipmi_smi_t intf)
{
- ipmi_smi_t intf = user->intf;
-
if (intf->handlers->poll)
intf->handlers->poll(intf->send_info);
+ /* In case something came in */
+ handle_new_recv_msgs(intf);
+}
+
+void ipmi_poll_interface(ipmi_user_t user)
+{
+ ipmi_poll(user->intf);
}
EXPORT_SYMBOL(ipmi_poll_interface);
@@ -2799,7 +2912,13 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
#endif
spin_lock_init(&intf->waiting_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_msgs);
+ tasklet_init(&intf->recv_tasklet,
+ smi_recv_tasklet,
+ (unsigned long) intf);
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->events_lock);
+ atomic_set(&intf->event_waiters, 0);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0;
mutex_init(&intf->cmd_rcvrs_mutex);
@@ -3561,11 +3680,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf,
}
/*
- * Handle a new message. Return 1 if the message should be requeued,
+ * Handle a received message. Return 1 if the message should be requeued,
* 0 if the message should be freed, or -1 if the message should not
* be freed or requeued.
*/
-static int handle_new_recv_msg(ipmi_smi_t intf,
+static int handle_one_recv_msg(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
int requeue;
@@ -3712,7 +3831,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
- /* It's an asyncronous event. */
+ /* It's an asynchronous event. */
requeue = handle_read_event_rsp(intf, msg);
} else {
/* It's a response from the local BMC. */
@@ -3723,12 +3842,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf,
return requeue;
}
+/*
+ * If there are messages in the queue or pretimeouts, handle them.
+ */
+static void handle_new_recv_msgs(ipmi_smi_t intf)
+{
+ struct ipmi_smi_msg *smi_msg;
+ unsigned long flags = 0;
+ int rv;
+ int run_to_completion = intf->run_to_completion;
+
+ /* See if any waiting messages need to be processed. */
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ while (!list_empty(&intf->waiting_msgs)) {
+ smi_msg = list_entry(intf->waiting_msgs.next,
+ struct ipmi_smi_msg, link);
+ list_del(&smi_msg->link);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+ rv = handle_one_recv_msg(intf, smi_msg);
+ if (!run_to_completion)
+ spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+ if (rv == 0) {
+ /* Message handled */
+ ipmi_free_smi_msg(smi_msg);
+ } else if (rv < 0) {
+ /* Fatal error on the message, del but don't free. */
+ } else {
+ /*
+ * To preserve message order, quit if we
+ * can't handle a message.
+ */
+ list_add(&smi_msg->link, &intf->waiting_msgs);
+ break;
+ }
+ }
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
+ /*
+ * If the pretimout count is non-zero, decrement one from it and
+ * deliver pretimeouts to all the users.
+ */
+ if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
+ ipmi_user_t user;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(user, &intf->users, link) {
+ if (user->handler->ipmi_watchdog_pretimeout)
+ user->handler->ipmi_watchdog_pretimeout(
+ user->handler_data);
+ }
+ rcu_read_unlock();
+ }
+}
+
+static void smi_recv_tasklet(unsigned long val)
+{
+ handle_new_recv_msgs((ipmi_smi_t) val);
+}
+
/* Handle a new message from the lower layer. */
void ipmi_smi_msg_received(ipmi_smi_t intf,
struct ipmi_smi_msg *msg)
{
unsigned long flags = 0; /* keep us warning-free. */
- int rv;
int run_to_completion;
@@ -3782,31 +3961,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
run_to_completion = intf->run_to_completion;
if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- if (!list_empty(&intf->waiting_msgs)) {
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- goto out;
- }
+ list_add_tail(&msg->link, &intf->waiting_msgs);
if (!run_to_completion)
spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- rv = handle_new_recv_msg(intf, msg);
- if (rv > 0) {
- /*
- * Could not handle the message now, just add it to a
- * list to handle later.
- */
- run_to_completion = intf->run_to_completion;
- if (!run_to_completion)
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_add_tail(&msg->link, &intf->waiting_msgs);
- if (!run_to_completion)
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
- } else if (rv == 0) {
- ipmi_free_smi_msg(msg);
- }
-
+ tasklet_schedule(&intf->recv_tasklet);
out:
return;
}
@@ -3814,16 +3973,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received);
void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
{
- ipmi_user_t user;
-
- rcu_read_lock();
- list_for_each_entry_rcu(user, &intf->users, link) {
- if (!user->handler->ipmi_watchdog_pretimeout)
- continue;
-
- user->handler->ipmi_watchdog_pretimeout(user->handler_data);
- }
- rcu_read_unlock();
+ atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
+ tasklet_schedule(&intf->recv_tasklet);
}
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
@@ -3857,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct list_head *timeouts, long timeout_period,
- int slot, unsigned long *flags)
+ int slot, unsigned long *flags,
+ unsigned int *waiting_msgs)
{
struct ipmi_recv_msg *msg;
struct ipmi_smi_handlers *handlers;
@@ -3869,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
return;
ent->timeout -= timeout_period;
- if (ent->timeout > 0)
+ if (ent->timeout > 0) {
+ (*waiting_msgs)++;
return;
+ }
if (ent->retries_left == 0) {
/* The message has used all its retries. */
@@ -3887,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct ipmi_smi_msg *smi_msg;
/* More retries, send again. */
+ (*waiting_msgs)++;
+
/*
* Start with the max timer, set to normal timer after
* the message is sent.
@@ -3932,133 +4088,118 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
}
}
-static void ipmi_timeout_handler(long timeout_period)
+static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
{
- ipmi_smi_t intf;
struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2;
- struct ipmi_smi_msg *smi_msg, *smi_msg2;
unsigned long flags;
int i;
+ unsigned int waiting_msgs = 0;
- rcu_read_lock();
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- /* See if any waiting messages need to be processed. */
- spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
- list_for_each_entry_safe(smi_msg, smi_msg2,
- &intf->waiting_msgs, link) {
- if (!handle_new_recv_msg(intf, smi_msg)) {
- list_del(&smi_msg->link);
- ipmi_free_smi_msg(smi_msg);
- } else {
- /*
- * To preserve message order, quit if we
- * can't handle a message.
- */
- break;
- }
- }
- spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-
- /*
- * Go through the seq table and find any messages that
- * have timed out, putting them in the timeouts
- * list.
- */
- INIT_LIST_HEAD(&timeouts);
- spin_lock_irqsave(&intf->seq_lock, flags);
- for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
- check_msg_timeout(intf, &(intf->seq_table[i]),
- &timeouts, timeout_period, i,
- &flags);
- spin_unlock_irqrestore(&intf->seq_lock, flags);
+ /*
+ * Go through the seq table and find any messages that
+ * have timed out, putting them in the timeouts
+ * list.
+ */
+ INIT_LIST_HEAD(&timeouts);
+ spin_lock_irqsave(&intf->seq_lock, flags);
+ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
+ check_msg_timeout(intf, &(intf->seq_table[i]),
+ &timeouts, timeout_period, i,
+ &flags, &waiting_msgs);
+ spin_unlock_irqrestore(&intf->seq_lock, flags);
- list_for_each_entry_safe(msg, msg2, &timeouts, link)
- deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
+ list_for_each_entry_safe(msg, msg2, &timeouts, link)
+ deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
- /*
- * Maintenance mode handling. Check the timeout
- * optimistically before we claim the lock. It may
- * mean a timeout gets missed occasionally, but that
- * only means the timeout gets extended by one period
- * in that case. No big deal, and it avoids the lock
- * most of the time.
- */
+ /*
+ * Maintenance mode handling. Check the timeout
+ * optimistically before we claim the lock. It may
+ * mean a timeout gets missed occasionally, but that
+ * only means the timeout gets extended by one period
+ * in that case. No big deal, and it avoids the lock
+ * most of the time.
+ */
+ if (intf->auto_maintenance_timeout > 0) {
+ spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
if (intf->auto_maintenance_timeout > 0) {
- spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
- if (intf->auto_maintenance_timeout > 0) {
- intf->auto_maintenance_timeout
- -= timeout_period;
- if (!intf->maintenance_mode
- && (intf->auto_maintenance_timeout <= 0)) {
- intf->maintenance_mode_enable = 0;
- maintenance_mode_update(intf);
- }
+ intf->auto_maintenance_timeout
+ -= timeout_period;
+ if (!intf->maintenance_mode
+ && (intf->auto_maintenance_timeout <= 0)) {
+ intf->maintenance_mode_enable = false;
+ maintenance_mode_update(intf);
}
- spin_unlock_irqrestore(&intf->maintenance_mode_lock,
- flags);
}
+ spin_unlock_irqrestore(&intf->maintenance_mode_lock,
+ flags);
}
- rcu_read_unlock();
+
+ tasklet_schedule(&intf->recv_tasklet);
+
+ return waiting_msgs;
}
-static void ipmi_request_event(void)
+static void ipmi_request_event(ipmi_smi_t intf)
{
- ipmi_smi_t intf;
struct ipmi_smi_handlers *handlers;
- rcu_read_lock();
- /*
- * Called from the timer, no need to check if handlers is
- * valid.
- */
- list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
- /* No event requests when in maintenance mode. */
- if (intf->maintenance_mode_enable)
- continue;
+ /* No event requests when in maintenance mode. */
+ if (intf->maintenance_mode_enable)
+ return;
- handlers = intf->handlers;
- if (handlers)
- handlers->request_events(intf->send_info);
- }
- rcu_read_unlock();
+ handlers = intf->handlers;
+ if (handlers)
+ handlers->request_events(intf->send_info);
}
static struct timer_list ipmi_timer;
-/* Call every ~100 ms. */
-#define IPMI_TIMEOUT_TIME 100
-
-/* How many jiffies does it take to get to the timeout time. */
-#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
-
-/*
- * Request events from the queue every second (this is the number of
- * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
- * future, IPMI will add a way to know immediately if an event is in
- * the queue and this silliness can go away.
- */
-#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
-
static atomic_t stop_operation;
-static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
static void ipmi_timeout(unsigned long data)
{
+ ipmi_smi_t intf;
+ int nt = 0;
+
if (atomic_read(&stop_operation))
return;
- ticks_to_req_ev--;
- if (ticks_to_req_ev == 0) {
- ipmi_request_event();
- ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
- }
+ rcu_read_lock();
+ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
+ int lnt = 0;
- ipmi_timeout_handler(IPMI_TIMEOUT_TIME);
+ if (atomic_read(&intf->event_waiters)) {
+ intf->ticks_to_req_ev--;
+ if (intf->ticks_to_req_ev == 0) {
+ ipmi_request_event(intf);
+ intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
+ }
+ lnt++;
+ }
- mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+ lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
+
+ lnt = !!lnt;
+ if (lnt != intf->last_needs_timer &&
+ intf->handlers->set_need_watch)
+ intf->handlers->set_need_watch(intf->send_info, lnt);
+ intf->last_needs_timer = lnt;
+
+ nt += lnt;
+ }
+ rcu_read_unlock();
+
+ if (nt)
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
+static void need_waiter(ipmi_smi_t intf)
+{
+ /* Racy, but worst case we start the timer twice. */
+ if (!timer_pending(&ipmi_timer))
+ mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
+}
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
@@ -4112,12 +4253,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg);
#ifdef CONFIG_IPMI_PANIC_EVENT
+static atomic_t panic_done_count = ATOMIC_INIT(0);
+
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
{
+ atomic_dec(&panic_done_count);
}
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
{
+ atomic_dec(&panic_done_count);
+}
+
+/*
+ * Inside a panic, send a message and wait for a response.
+ */
+static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
+ struct ipmi_addr *addr,
+ struct kernel_ipmi_msg *msg)
+{
+ struct ipmi_smi_msg smi_msg;
+ struct ipmi_recv_msg recv_msg;
+ int rv;
+
+ smi_msg.done = dummy_smi_done_handler;
+ recv_msg.done = dummy_recv_done_handler;
+ atomic_add(2, &panic_done_count);
+ rv = i_ipmi_request(NULL,
+ intf,
+ addr,
+ 0,
+ msg,
+ intf,
+ &smi_msg,
+ &recv_msg,
+ 0,
+ intf->channels[0].address,
+ intf->channels[0].lun,
+ 0, 1); /* Don't retry, and don't wait. */
+ if (rv)
+ atomic_sub(2, &panic_done_count);
+ while (atomic_read(&panic_done_count) != 0)
+ ipmi_poll(intf);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4156,8 +4333,6 @@ static void send_panic_events(char *str)
unsigned char data[16];
struct ipmi_system_interface_addr *si;
struct ipmi_addr addr;
- struct ipmi_smi_msg smi_msg;
- struct ipmi_recv_msg recv_msg;
si = (struct ipmi_system_interface_addr *) &addr;
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -4185,9 +4360,6 @@ static void send_panic_events(char *str)
data[7] = str[2];
}
- smi_msg.done = dummy_smi_done_handler;
- recv_msg.done = dummy_recv_done_handler;
-
/* For every registered interface, send the event. */
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
if (!intf->handlers)
@@ -4197,18 +4369,7 @@ static void send_panic_events(char *str)
intf->run_to_completion = 1;
/* Send the event announcing the panic. */
intf->handlers->set_run_to_completion(intf->send_info, 1);
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
#ifdef CONFIG_IPMI_PANIC_STRING
@@ -4256,18 +4417,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = device_id_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* Don't retry, and don't wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
if (intf->local_event_generator) {
/* Request the event receiver from the local MC. */
@@ -4276,18 +4426,7 @@ static void send_panic_events(char *str)
msg.data = NULL;
msg.data_len = 0;
intf->null_user_handler = event_receiver_fetcher;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
intf->null_user_handler = NULL;
@@ -4344,18 +4483,7 @@ static void send_panic_events(char *str)
strncpy(data+5, p, 11);
p += size;
- i_ipmi_request(NULL,
- intf,
- &addr,
- 0,
- &msg,
- intf,
- &smi_msg,
- &recv_msg,
- 0,
- intf->channels[0].address,
- intf->channels[0].lun,
- 0, 1); /* no retry, and no wait. */
+ ipmi_panic_request_and_wait(intf, &addr, &msg);
}
}
#endif /* CONFIG_IPMI_PANIC_STRING */
@@ -4432,13 +4560,13 @@ static int ipmi_init_msghandler(void)
return 0;
}
-static __init int ipmi_init_msghandler_mod(void)
+static int __init ipmi_init_msghandler_mod(void)
{
ipmi_init_msghandler();
return 0;
}
-static __exit void cleanup_ipmi(void)
+static void __exit cleanup_ipmi(void)
{
int count;
@@ -4461,7 +4589,7 @@ static __exit void cleanup_ipmi(void)
del_timer_sync(&ipmi_timer);
#ifdef CONFIG_PROC_FS
- remove_proc_entry(proc_ipmi_root->name, NULL);
+ proc_remove(proc_ipmi_root);
#endif /* CONFIG_PROC_FS */
driver_unregister(&ipmidriver.driver);
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c
index a261bd735df..9f2e3be2c5b 100644
--- a/drivers/char/ipmi/ipmi_poweroff.c
+++ b/drivers/char/ipmi/ipmi_poweroff.c
@@ -122,7 +122,7 @@ static struct ipmi_recv_msg halt_recv_msg = {
/*
- * Code to send a message and wait for the reponse.
+ * Code to send a message and wait for the response.
*/
static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
@@ -659,27 +659,24 @@ static struct ipmi_smi_watcher smi_watcher = {
#ifdef CONFIG_PROC_FS
#include <linux/sysctl.h>
-static ctl_table ipmi_table[] = {
- { .ctl_name = DEV_IPMI_POWEROFF_POWERCYCLE,
- .procname = "poweroff_powercycle",
+static struct ctl_table ipmi_table[] = {
+ { .procname = "poweroff_powercycle",
.data = &poweroff_powercycle,
.maxlen = sizeof(poweroff_powercycle),
.mode = 0644,
- .proc_handler = &proc_dointvec },
+ .proc_handler = proc_dointvec },
{ }
};
-static ctl_table ipmi_dir_table[] = {
- { .ctl_name = DEV_IPMI,
- .procname = "ipmi",
+static struct ctl_table ipmi_dir_table[] = {
+ { .procname = "ipmi",
.mode = 0555,
.child = ipmi_table },
{ }
};
-static ctl_table ipmi_root_table[] = {
- { .ctl_name = CTL_DEV,
- .procname = "dev",
+static struct ctl_table ipmi_root_table[] = {
+ { .procname = "dev",
.mode = 0555,
.child = ipmi_dir_table },
{ }
@@ -691,7 +688,7 @@ static struct ctl_table_header *ipmi_table_header;
/*
* Startup and shutdown functions.
*/
-static int ipmi_poweroff_init(void)
+static int __init ipmi_poweroff_init(void)
{
int rv;
@@ -725,7 +722,7 @@ static int ipmi_poweroff_init(void)
}
#ifdef MODULE
-static __exit void ipmi_poweroff_cleanup(void)
+static void __exit ipmi_poweroff_cleanup(void)
{
int rv;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 259644646b8..5d665680ae3 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -41,8 +41,8 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <asm/system.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
@@ -57,17 +57,22 @@
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
+#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include <asm/io.h>
#include "ipmi_si_sm.h"
-#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/string.h>
#include <linux/ctype.h>
-
-#ifdef CONFIG_PPC_OF
+#include <linux/pnp.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_PARISC
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
#endif
#define PFX "ipmi_si: "
@@ -106,15 +111,13 @@ enum si_type {
};
static char *si_to_str[] = { "kcs", "smic", "bt" };
-#define DEVICE_NAME "ipmi_si"
+static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
+ "ACPI", "SMBIOS", "PCI",
+ "device-tree", "default" };
-static struct platform_driver ipmi_driver = {
- .driver = {
- .name = DEVICE_NAME,
- .bus = &platform_bus_type
- }
-};
+#define DEVICE_NAME "ipmi_si"
+static struct platform_driver ipmi_driver;
/*
* Indexes into stats[] in smi_info below.
@@ -156,7 +159,7 @@ enum si_stat_indexes {
/* Number of watchdog pretimeouts. */
SI_STAT_watchdog_pretimeouts,
- /* Number of asyncronous messages received. */
+ /* Number of asynchronous messages received. */
SI_STAT_incoming_messages,
@@ -171,7 +174,6 @@ struct smi_info {
struct si_sm_handlers *handlers;
enum si_type si_type;
spinlock_t si_lock;
- spinlock_t msg_lock;
struct list_head xmit_msgs;
struct list_head hp_xmit_msgs;
struct ipmi_smi_msg *curr_msg;
@@ -187,7 +189,7 @@ struct smi_info {
int (*irq_setup)(struct smi_info *info);
void (*irq_cleanup)(struct smi_info *info);
unsigned int io_size;
- char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */
+ enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
void (*addr_source_cleanup)(struct smi_info *info);
void *addr_source_data;
@@ -215,7 +217,7 @@ struct smi_info {
unsigned char msg_flags;
/* Does the BMC have an event buffer? */
- char has_event_buffer;
+ bool has_event_buffer;
/*
* If set to true, this will request events the next time the
@@ -228,7 +230,7 @@ struct smi_info {
* call. Generally used after a panic to make sure stuff goes
* out.
*/
- int run_to_completion;
+ bool run_to_completion;
/* The I/O port of an SI interface. */
int port;
@@ -246,19 +248,25 @@ struct smi_info {
/* The timer for this si. */
struct timer_list si_timer;
+ /* This flag is set, if the timer is running (timer_pending() isn't enough) */
+ bool timer_running;
+
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
/* Used to gracefully stop the timer without race conditions. */
atomic_t stop_operation;
+ /* Are we waiting for the events, pretimeouts, received msgs? */
+ atomic_t need_watch;
+
/*
* The driver will disable interrupts when it gets into a
* situation where it cannot handle messages due to lack of
* memory. Once that situation clears up, it will re-enable
* interrupts.
*/
- int interrupt_disabled;
+ bool interrupt_disabled;
/* From the get device id response... */
struct ipmi_device_id device_id;
@@ -271,7 +279,7 @@ struct smi_info {
* True if we allocated the device, false if it came from
* someplace else (like PCI).
*/
- int dev_registered;
+ bool dev_registered;
/* Slave address, could be reported from DMI. */
unsigned char slave_addr;
@@ -282,6 +290,7 @@ struct smi_info {
struct task_struct *thread;
struct list_head link;
+ union ipmi_smi_info_union addr_info;
};
#define smi_inc_stat(smi, stat) \
@@ -293,11 +302,25 @@ struct smi_info {
static int force_kipmid[SI_MAX_PARMS];
static int num_force_kipmid;
+#ifdef CONFIG_PCI
+static bool pci_registered;
+#endif
+#ifdef CONFIG_ACPI
+static bool pnp_registered;
+#endif
+#ifdef CONFIG_PARISC
+static bool parisc_registered;
+#endif
+
+static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
+static int num_max_busy_us;
-static int unload_when_empty = 1;
+static bool unload_when_empty = true;
+static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *to_clean);
+static void cleanup_ipmi_si(void);
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block *nb)
@@ -308,11 +331,8 @@ static int register_xaction_notifier(struct notifier_block *nb)
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
- /* Deliver the message to the upper layer with the lock
- released. */
- spin_unlock(&(smi_info->si_lock));
+ /* Deliver the message to the upper layer. */
ipmi_smi_msg_received(smi_info->intf, msg);
- spin_lock(&(smi_info->si_lock));
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -323,7 +343,7 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
cCode = IPMI_ERR_UNSPECIFIED;
/* else use it as is */
- /* Make it a reponse */
+ /* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = cCode;
@@ -341,13 +361,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
struct timeval t;
#endif
- /*
- * No need to save flags, we aleady have interrupts off and we
- * already hold the SMI lock.
- */
- if (!smi_info->run_to_completion)
- spin_lock(&(smi_info->msg_lock));
-
/* Pick the high priority queue first. */
if (!list_empty(&(smi_info->hp_xmit_msgs))) {
entry = smi_info->hp_xmit_msgs.next;
@@ -385,9 +398,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
- if (!smi_info->run_to_completion)
- spin_unlock(&(smi_info->msg_lock));
-
return rv;
}
@@ -430,6 +440,13 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS;
}
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+ smi_info->last_timeout_jiffies = jiffies;
+ mod_timer(&smi_info->si_timer, new_val);
+ smi_info->timer_running = true;
+}
+
/*
* When we have a situtaion where we run out of memory and cannot
* allocate messages, we just leave them in the BMC and run the system
@@ -440,7 +457,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
start_disable_irq(smi_info);
- smi_info->interrupt_disabled = 1;
+ smi_info->interrupt_disabled = true;
+ if (!atomic_read(&smi_info->stop_operation))
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
}
}
@@ -448,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
start_enable_irq(smi_info);
- smi_info->interrupt_disabled = 0;
+ smi_info->interrupt_disabled = false;
}
}
@@ -461,9 +480,7 @@ static void handle_flags(struct smi_info *smi_info)
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
- spin_unlock(&(smi_info->si_lock));
ipmi_smi_watchdog_pretimeout(smi_info->intf);
- spin_lock(&(smi_info->si_lock));
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -572,9 +589,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
- printk(KERN_WARNING
- "ipmi_si: Error clearing flags: %2.2x\n",
- msg[2]);
+ dev_warn(smi_info->dev,
+ "Error clearing flags: %2.2x\n", msg[2]);
}
if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ)
start_enable_irq(smi_info);
@@ -666,9 +682,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed get, using polled mode.\n");
+ dev_warn(smi_info->dev,
+ "Couldn't get irq info: %x.\n", msg[2]);
+ dev_warn(smi_info->dev,
+ "Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -690,10 +707,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not enable interrupts"
- ", failed set, using polled mode.\n");
- }
+ dev_warn(smi_info->dev,
+ "Couldn't set irq info: %x.\n", msg[2]);
+ dev_warn(smi_info->dev,
+ "Maybe ok, but ipmi might run very slowly.\n");
+ } else
+ smi_info->interrupt_disabled = false;
smi_info->si_state = SI_NORMAL;
break;
}
@@ -705,9 +724,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed get.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed get.\n");
smi_info->si_state = SI_NORMAL;
} else {
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -729,9 +747,8 @@ static void handle_transaction_done(struct smi_info *smi_info)
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Could not disable interrupts"
- ", failed set.\n");
+ dev_warn(smi_info->dev, "Could not disable interrupts"
+ ", failed set.\n");
}
smi_info->si_state = SI_NORMAL;
break;
@@ -848,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
return si_sm_result;
}
+static void check_start_timer_thread(struct smi_info *smi_info)
+{
+ if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+ if (smi_info->thread)
+ wake_up_process(smi_info->thread);
+
+ start_next_msg(smi_info);
+ smi_event_handler(smi_info, 0);
+ }
+}
+
static void sender(void *send_info,
struct ipmi_smi_msg *msg,
int priority)
@@ -895,20 +925,17 @@ static void sender(void *send_info,
return;
}
- spin_lock_irqsave(&smi_info->msg_lock, flags);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
if (priority > 0)
list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
else
list_add_tail(&msg->link, &smi_info->xmit_msgs);
- spin_unlock_irqrestore(&smi_info->msg_lock, flags);
- spin_lock_irqsave(&smi_info->si_lock, flags);
- if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
- start_next_msg(smi_info);
+ check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
-static void set_run_to_completion(void *send_info, int i_run_to_completion)
+static void set_run_to_completion(void *send_info, bool i_run_to_completion)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
@@ -924,22 +951,95 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
}
}
+/*
+ * Use -1 in the nsec value of the busy waiting timespec to tell that
+ * we are spinning in kipmid looking for something and not delaying
+ * between checks
+ */
+static inline void ipmi_si_set_not_busy(struct timespec *ts)
+{
+ ts->tv_nsec = -1;
+}
+static inline int ipmi_si_is_busy(struct timespec *ts)
+{
+ return ts->tv_nsec != -1;
+}
+
+static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
+ const struct smi_info *smi_info,
+ struct timespec *busy_until)
+{
+ unsigned int max_busy_us = 0;
+
+ if (smi_info->intf_num < num_max_busy_us)
+ max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
+ if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+ ipmi_si_set_not_busy(busy_until);
+ else if (!ipmi_si_is_busy(busy_until)) {
+ getnstimeofday(busy_until);
+ timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+ } else {
+ struct timespec now;
+ getnstimeofday(&now);
+ if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+ ipmi_si_set_not_busy(busy_until);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard. This is only enabled for systems
+ * that are not BT and do not have interrupts. It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled). See the paragraph on kimid_max_busy_us in
+ * Documentation/IPMI.txt for details.
+ */
static int ipmi_thread(void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
+ struct timespec busy_until;
- set_user_nice(current, 19);
+ ipmi_si_set_not_busy(&busy_until);
+ set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
+ int busy_wait;
+
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
+
+ /*
+ * If the driver is doing something, there is a possible
+ * race with the timer. If the timer handler see idle,
+ * and the thread here sees something else, the timer
+ * handler won't restart the timer even though it is
+ * required. So start it here if necessary.
+ */
+ if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
+ smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
; /* do nothing */
- else if (smi_result == SI_SM_CALL_WITH_DELAY)
+ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
- else
+ else if (smi_result == SI_SM_IDLE) {
+ if (atomic_read(&smi_info->need_watch)) {
+ schedule_timeout_interruptible(100);
+ } else {
+ /* Wait to be woken up when we are needed. */
+ __set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ } else
schedule_timeout_interruptible(1);
}
return 0;
@@ -949,16 +1049,19 @@ static int ipmi_thread(void *data)
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
- unsigned long flags;
+ unsigned long flags = 0;
+ bool run_to_completion = smi_info->run_to_completion;
/*
* Make sure there is some delay in the poll loop so we can
* drive time forward and timeout things.
*/
udelay(10);
- spin_lock_irqsave(&smi_info->si_lock, flags);
+ if (!run_to_completion)
+ spin_lock_irqsave(&smi_info->si_lock, flags);
smi_event_handler(smi_info, 10);
- spin_unlock_irqrestore(&smi_info->si_lock, flags);
+ if (!run_to_completion)
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void request_events(void *send_info)
@@ -972,6 +1075,17 @@ static void request_events(void *send_info)
atomic_set(&smi_info->req_events, 1);
}
+static void set_need_watch(void *send_info, bool enable)
+{
+ struct smi_info *smi_info = send_info;
+ unsigned long flags;
+
+ atomic_set(&smi_info->need_watch, enable);
+ spin_lock_irqsave(&smi_info->si_lock, flags);
+ check_start_timer_thread(smi_info);
+ spin_unlock_irqrestore(&smi_info->si_lock, flags);
+}
+
static int initialized;
static void smi_timeout(unsigned long data)
@@ -981,6 +1095,7 @@ static void smi_timeout(unsigned long data)
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
+ long timeout;
#ifdef DEBUG_TIMING
struct timeval t;
#endif
@@ -995,15 +1110,11 @@ static void smi_timeout(unsigned long data)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
- spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-
- smi_info->last_timeout_jiffies = jiffies_now;
-
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
- goto do_add_timer;
+ goto do_mod_timer;
}
/*
@@ -1012,14 +1123,18 @@ static void smi_timeout(unsigned long data)
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
- smi_info->si_timer.expires = jiffies + 1;
+ timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
- smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
+ timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
- do_add_timer:
- add_timer(&(smi_info->si_timer));
+ do_mod_timer:
+ if (smi_result != SI_SM_IDLE)
+ smi_mod_timer(smi_info, timeout);
+ else
+ smi_info->timer_running = false;
+ spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
static irqreturn_t si_irq_handler(int irq, void *data)
@@ -1067,8 +1182,7 @@ static int smi_start_processing(void *send_info,
/* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
- new_smi->last_timeout_jiffies = jiffies;
- mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
+ smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/*
* Check if the user forcefully enabled the daemon.
@@ -1086,10 +1200,10 @@ static int smi_start_processing(void *send_info,
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->intf_num);
if (IS_ERR(new_smi->thread)) {
- printk(KERN_NOTICE "ipmi_si_intf: Could not start"
- " kernel thread due to error %ld, only using"
- " timers to drive the interface\n",
- PTR_ERR(new_smi->thread));
+ dev_notice(new_smi->dev, "Could not start"
+ " kernel thread due to error %ld, only using"
+ " timers to drive the interface\n",
+ PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
@@ -1097,7 +1211,19 @@ static int smi_start_processing(void *send_info,
return 0;
}
-static void set_maintenance_mode(void *send_info, int enable)
+static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
+{
+ struct smi_info *smi = send_info;
+
+ data->addr_src = smi->addr_source;
+ data->dev = smi->dev;
+ data->addr_info = smi->addr_info;
+ get_device(smi->dev);
+
+ return 0;
+}
+
+static void set_maintenance_mode(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
@@ -1108,8 +1234,10 @@ static void set_maintenance_mode(void *send_info, int enable)
static struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
+ .get_smi_info = get_smi_info,
.sender = sender,
.request_events = request_events,
+ .set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion,
.poll = poll,
@@ -1127,7 +1255,17 @@ static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
#define DEFAULT_REGSIZE 1
-static int si_trydefaults = 1;
+#ifdef CONFIG_ACPI
+static bool si_tryacpi = 1;
+#endif
+#ifdef CONFIG_DMI
+static bool si_trydmi = 1;
+#endif
+static bool si_tryplatform = 1;
+#ifdef CONFIG_PCI
+static bool si_trypci = 1;
+#endif
+static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
static char si_type_str[MAX_SI_TYPE_STR];
@@ -1143,7 +1281,7 @@ static int regsizes[SI_MAX_PARMS];
static unsigned int num_regsizes;
static int regshifts[SI_MAX_PARMS];
static unsigned int num_regshifts;
-static int slave_addrs[SI_MAX_PARMS];
+static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
static unsigned int num_slave_addrs;
#define IPMI_IO_ADDR_SPACE 0
@@ -1157,6 +1295,25 @@ MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
" Documentation/IPMI.txt in the kernel sources for the"
" gory details.");
+#ifdef CONFIG_ACPI
+module_param_named(tryacpi, si_tryacpi, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via ACPI");
+#endif
+#ifdef CONFIG_DMI
+module_param_named(trydmi, si_trydmi, bool, 0);
+MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via DMI");
+#endif
+module_param_named(tryplatform, si_tryplatform, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via platform"
+ " interfaces like openfirmware");
+#ifdef CONFIG_PCI
+module_param_named(trypci, si_trypci, bool, 0);
+MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+ " default scan of the interfaces identified via pci");
+#endif
module_param_named(trydefaults, si_trydefaults, bool, 0);
MODULE_PARM_DESC(trydefaults, "Setting this to 'false' will disable the"
" default scan of the KCS and SMIC interface at the standard"
@@ -1207,10 +1364,15 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
" this, but the value may be overridden by this parm.");
-module_param(unload_when_empty, int, 0);
+module_param(unload_when_empty, bool, 0);
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
" specified or found, default is 1. Setting to 0"
" is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+ "Max time (in microseconds) to busy-wait for IPMI data before"
+ " sleeping. 0 (default) means to wait forever. Set to 100-500"
+ " if kipmid is using up a lot of CPU time.");
static void std_irq_cleanup(struct smi_info *info)
@@ -1231,7 +1393,7 @@ static int std_irq_setup(struct smi_info *info)
if (info->si_type == SI_BT) {
rv = request_irq(info->irq,
si_bt_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
+ IRQF_SHARED,
DEVICE_NAME,
info);
if (!rv)
@@ -1241,18 +1403,17 @@ static int std_irq_setup(struct smi_info *info)
} else
rv = request_irq(info->irq,
si_irq_handler,
- IRQF_SHARED | IRQF_DISABLED,
+ IRQF_SHARED,
DEVICE_NAME,
info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim interrupt %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim interrupt %d,"
+ " running polled\n",
+ DEVICE_NAME, info->irq);
info->irq = 0;
} else {
info->irq_cleanup = std_irq_cleanup;
- printk(" Using irq %d\n", info->irq);
+ dev_info(info->dev, "Using irq %d\n", info->irq);
}
return rv;
@@ -1343,8 +1504,8 @@ static int port_setup(struct smi_info *info)
info->io.outputb = port_outl;
break;
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1466,8 +1627,8 @@ static int mem_setup(struct smi_info *info)
break;
#endif
default:
- printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
- info->io.regsize);
+ dev_warn(info->dev, "Invalid register size: %d\n",
+ info->io.regsize);
return -EINVAL;
}
@@ -1572,6 +1733,15 @@ static int check_hotmod_int_op(const char *curr, const char *option,
return 0;
}
+static struct smi_info *smi_info_alloc(void)
+{
+ struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL);
+
+ if (info)
+ spin_lock_init(&info->si_lock);
+ return info;
+}
+
static int hotmod_handler(const char *val, struct kernel_param *kp)
{
char *str = kstrdup(val, GFP_KERNEL);
@@ -1606,7 +1776,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
regsize = 1;
regshift = 0;
irq = 0;
- ipmb = 0x20;
+ ipmb = 0; /* Choose the default if not specified */
next = strchr(curr, ':');
if (next) {
@@ -1686,13 +1856,13 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
}
if (op == HM_ADD) {
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
rv = -ENOMEM;
goto out;
}
- info->addr_source = "hotmod";
+ info->addr_source = SI_HOTMOD;
info->si_type = si_type;
info->io.addr_data = addr;
info->io.addr_type = addr_space;
@@ -1714,7 +1884,16 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
info->irq_setup = std_irq_setup;
info->slave_addr = ipmb;
- try_smi_init(info);
+ rv = add_smi(info);
+ if (rv) {
+ kfree(info);
+ goto out;
+ }
+ rv = try_smi_init(info);
+ if (rv) {
+ cleanup_one_si(info);
+ goto out;
+ }
} else {
/* remove */
struct smi_info *e, *tmp_e;
@@ -1737,8 +1916,9 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
return rv;
}
-static __devinit void hardcode_find_bmc(void)
+static int hardcode_find_bmc(void)
{
+ int ret = -ENODEV;
int i;
struct smi_info *info;
@@ -1746,11 +1926,12 @@ static __devinit void hardcode_find_bmc(void)
if (!ports[i] && !addrs[i])
continue;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
- return;
+ return -ENOMEM;
- info->addr_source = "hardcoded";
+ info->addr_source = SI_HARDCODED;
+ printk(KERN_INFO PFX "probing via hardcoded address\n");
if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) {
info->si_type = SI_KCS;
@@ -1759,8 +1940,7 @@ static __devinit void hardcode_find_bmc(void)
} else if (strcmp(si_type[i], "bt") == 0) {
info->si_type = SI_BT;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
+ printk(KERN_WARNING PFX "Interface type specified "
"for interface %d, was invalid: %s\n",
i, si_type[i]);
kfree(info);
@@ -1778,11 +1958,9 @@ static __devinit void hardcode_find_bmc(void)
info->io.addr_data = addrs[i];
info->io.addr_type = IPMI_MEM_ADDR_SPACE;
} else {
- printk(KERN_WARNING
- "ipmi_si: Interface type specified "
- "for interface %d, "
- "but port and address were not set or "
- "set to zero.\n", i);
+ printk(KERN_WARNING PFX "Interface type specified "
+ "for interface %d, but port and address were "
+ "not set or set to zero.\n", i);
kfree(info);
continue;
}
@@ -1798,9 +1976,17 @@ static __devinit void hardcode_find_bmc(void)
info->irq = irqs[i];
if (info->irq)
info->irq_setup = std_irq_setup;
+ info->slave_addr = slave_addrs[i];
- try_smi_init(info);
+ if (!add_smi(info)) {
+ if (try_smi_init(info))
+ cleanup_one_si(info);
+ ret = 0;
+ } else {
+ kfree(info);
+ }
}
+ return ret;
}
#ifdef CONFIG_ACPI
@@ -1815,7 +2001,8 @@ static __devinit void hardcode_find_bmc(void)
static int acpi_failure;
/* For GPE-type interrupts. */
-static u32 ipmi_acpi_gpe(void *context)
+static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
+ u32 gpe_number, void *context)
{
struct smi_info *smi_info = context;
unsigned long flags;
@@ -1859,23 +2046,20 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
&ipmi_acpi_gpe,
info);
if (status != AE_OK) {
- printk(KERN_WARNING
- "ipmi_si: %s unable to claim ACPI GPE %d,"
- " running polled\n",
- DEVICE_NAME, info->irq);
+ dev_warn(info->dev, "%s unable to claim ACPI GPE %d,"
+ " running polled\n", DEVICE_NAME, info->irq);
info->irq = 0;
return -EINVAL;
} else {
info->irq_cleanup = acpi_gpe_irq_cleanup;
- printk(" Using ACPI GPE %d\n", info->irq);
+ dev_info(info->dev, "Using ACPI GPE %d\n", info->irq);
return 0;
}
}
/*
* Defined at
- * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
- * Docs/TechPapers/IA64/hpspmi.pdf
+ * http://h21007.www2.hp.com/portal/download/files/unprot/hpspmi.pdf
*/
struct SPMITable {
s8 Signature[4];
@@ -1919,28 +2103,24 @@ struct SPMITable {
s8 spmi_id[1]; /* A '\0' terminated array starts here. */
};
-static __devinit int try_init_acpi(struct SPMITable *spmi)
+static int try_init_spmi(struct SPMITable *spmi)
{
struct smi_info *info;
- u8 addr_space;
+ int rv;
if (spmi->IPMIlegacy != 1) {
- printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
- return -ENODEV;
+ printk(KERN_INFO PFX "Bad SPMI legacy %d\n", spmi->IPMIlegacy);
+ return -ENODEV;
}
- if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- addr_space = IPMI_MEM_ADDR_SPACE;
- else
- addr_space = IPMI_IO_ADDR_SPACE;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n");
+ printk(KERN_ERR PFX "Could not allocate SI data (3)\n");
return -ENOMEM;
}
- info->addr_source = "ACPI";
+ info->addr_source = SI_SPMI;
+ printk(KERN_INFO PFX "probing via SPMI\n");
/* Figure out the interface type. */
switch (spmi->InterfaceType) {
@@ -1954,8 +2134,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
info->si_type = SI_BT;
break;
default:
- printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n",
- spmi->InterfaceType);
+ printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
+ spmi->InterfaceType);
kfree(info);
return -EIO;
}
@@ -1991,18 +2171,24 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
info->io.addr_type = IPMI_IO_ADDR_SPACE;
} else {
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown ACPI I/O Address type\n");
+ printk(KERN_WARNING PFX "Unknown ACPI I/O Address type\n");
return -EIO;
}
info->io.addr_data = spmi->addr.address;
- try_smi_init(info);
+ pr_info("ipmi_si: SPMI: %s %#lx regsize %d spacing %d irq %d\n",
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
- return 0;
+ rv = add_smi(info);
+ if (rv)
+ kfree(info);
+
+ return rv;
}
-static __devinit void acpi_find_bmc(void)
+static void spmi_find_bmc(void)
{
acpi_status status;
struct SPMITable *spmi;
@@ -2020,9 +2206,132 @@ static __devinit void acpi_find_bmc(void)
if (status != AE_OK)
return;
- try_init_acpi(spmi);
+ try_init_spmi(spmi);
+ }
+}
+
+static int ipmi_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+{
+ struct acpi_device *acpi_dev;
+ struct smi_info *info;
+ struct resource *res, *res_second;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long tmp;
+ int rv;
+
+ acpi_dev = pnp_acpi_device(dev);
+ if (!acpi_dev)
+ return -ENODEV;
+
+ info = smi_info_alloc();
+ if (!info)
+ return -ENOMEM;
+
+ info->addr_source = SI_ACPI;
+ printk(KERN_INFO PFX "probing via ACPI\n");
+
+ handle = acpi_dev->handle;
+ info->addr_info.acpi_info.acpi_handle = handle;
+
+ /* _IFT tells us the interface type: KCS, BT, etc */
+ status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ goto err_free;
+
+ switch (tmp) {
+ case 1:
+ info->si_type = SI_KCS;
+ break;
+ case 2:
+ info->si_type = SI_SMIC;
+ break;
+ case 3:
+ info->si_type = SI_BT;
+ break;
+ default:
+ dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
+ goto err_free;
+ }
+
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (res) {
+ info->io_setup = port_setup;
+ info->io.addr_type = IPMI_IO_ADDR_SPACE;
+ } else {
+ res = pnp_get_resource(dev, IORESOURCE_MEM, 0);
+ if (res) {
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ }
+ }
+ if (!res) {
+ dev_err(&dev->dev, "no I/O or memory address\n");
+ goto err_free;
+ }
+ info->io.addr_data = res->start;
+
+ info->io.regspacing = DEFAULT_REGSPACING;
+ res_second = pnp_get_resource(dev,
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ?
+ IORESOURCE_IO : IORESOURCE_MEM,
+ 1);
+ if (res_second) {
+ if (res_second->start > info->io.addr_data)
+ info->io.regspacing = res_second->start - info->io.addr_data;
+ }
+ info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regshift = 0;
+
+ /* If _GPE exists, use it; otherwise use standard interrupts */
+ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp);
+ if (ACPI_SUCCESS(status)) {
+ info->irq = tmp;
+ info->irq_setup = acpi_gpe_irq_setup;
+ } else if (pnp_irq_valid(dev, 0)) {
+ info->irq = pnp_irq(dev, 0);
+ info->irq_setup = std_irq_setup;
}
+
+ info->dev = &dev->dev;
+ pnp_set_drvdata(dev, info);
+
+ dev_info(info->dev, "%pR regsize %d spacing %d irq %d\n",
+ res, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ rv = add_smi(info);
+ if (rv)
+ kfree(info);
+
+ return rv;
+
+err_free:
+ kfree(info);
+ return -EINVAL;
}
+
+static void ipmi_pnp_remove(struct pnp_dev *dev)
+{
+ struct smi_info *info = pnp_get_drvdata(dev);
+
+ cleanup_one_si(info);
+}
+
+static const struct pnp_device_id pnp_dev_table[] = {
+ {"IPI0001", 0},
+ {"", 0},
+};
+
+static struct pnp_driver ipmi_pnp_driver = {
+ .name = DEVICE_NAME,
+ .probe = ipmi_pnp_probe,
+ .remove = ipmi_pnp_remove,
+ .id_table = pnp_dev_table,
+};
+
+MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
#endif
#ifdef CONFIG_DMI
@@ -2035,7 +2344,7 @@ struct dmi_ipmi_data {
u8 slave_addr;
};
-static int __devinit decode_dmi(const struct dmi_header *dm,
+static int decode_dmi(const struct dmi_header *dm,
struct dmi_ipmi_data *dmi)
{
const u8 *data = (const u8 *)dm;
@@ -2097,18 +2406,18 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
return 0;
}
-static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
+static void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
{
struct smi_info *info;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
- printk(KERN_ERR
- "ipmi_si: Could not allocate SI data\n");
+ printk(KERN_ERR PFX "Could not allocate SI data\n");
return;
}
- info->addr_source = "SMBIOS";
+ info->addr_source = SI_SMBIOS;
+ printk(KERN_INFO PFX "probing via SMBIOS\n");
switch (ipmi_data->type) {
case 0x01: /* KCS */
@@ -2138,8 +2447,7 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
default:
kfree(info);
- printk(KERN_WARNING
- "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n",
+ printk(KERN_WARNING PFX "Unknown SMBIOS I/O Address type: %d\n",
ipmi_data->addr_space);
return;
}
@@ -2157,10 +2465,16 @@ static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data)
if (info->irq)
info->irq_setup = std_irq_setup;
- try_smi_init(info);
+ pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n",
+ (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem",
+ info->io.addr_data, info->io.regsize, info->io.regspacing,
+ info->irq);
+
+ if (add_smi(info))
+ kfree(info);
}
-static void __devinit dmi_find_bmc(void)
+static void dmi_find_bmc(void)
{
const struct dmi_device *dev = NULL;
struct dmi_ipmi_data data;
@@ -2196,19 +2510,51 @@ static void ipmi_pci_cleanup(struct smi_info *info)
pci_disable_device(pdev);
}
-static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
+static int ipmi_pci_probe_regspacing(struct smi_info *info)
+{
+ if (info->si_type == SI_KCS) {
+ unsigned char status;
+ int regspacing;
+
+ info->io.regsize = DEFAULT_REGSIZE;
+ info->io.regshift = 0;
+ info->io_size = 2;
+ info->handlers = &kcs_smi_handlers;
+
+ /* detect 1, 4, 16byte spacing */
+ for (regspacing = DEFAULT_REGSPACING; regspacing <= 16;) {
+ info->io.regspacing = regspacing;
+ if (info->io_setup(info)) {
+ dev_err(info->dev,
+ "Could not setup I/O space\n");
+ return DEFAULT_REGSPACING;
+ }
+ /* write invalid cmd */
+ info->io.outputb(&info->io, 1, 0x10);
+ /* read status back */
+ status = info->io.inputb(&info->io, 1);
+ info->io_cleanup(info);
+ if (status)
+ return regspacing;
+ regspacing *= 4;
+ }
+ }
+ return DEFAULT_REGSPACING;
+}
+
+static int ipmi_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int rv;
int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK;
struct smi_info *info;
- int first_reg_offset = 0;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return -ENOMEM;
- info->addr_source = "PCI";
+ info->addr_source = SI_PCI;
+ dev_info(&pdev->dev, "probing via PCI");
switch (class_type) {
case PCI_ERMC_CLASSCODE_TYPE_SMIC:
@@ -2225,15 +2571,13 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
default:
kfree(info);
- printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n",
- pci_name(pdev), class_type);
+ dev_info(&pdev->dev, "Unknown IPMI type: %d\n", class_type);
return -ENOMEM;
}
rv = pci_enable_device(pdev);
if (rv) {
- printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "couldn't enable PCI device\n");
kfree(info);
return rv;
}
@@ -2241,9 +2585,6 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->addr_source_cleanup = ipmi_pci_cleanup;
info->addr_source_data = pdev;
- if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID)
- first_reg_offset = 1;
-
if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
info->io_setup = port_setup;
info->io.addr_type = IPMI_IO_ADDR_SPACE;
@@ -2253,8 +2594,8 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
}
info->io.addr_data = pci_resource_start(pdev, 0);
- info->io.regspacing = DEFAULT_REGSPACING;
- info->io.regsize = DEFAULT_REGSPACING;
+ info->io.regspacing = ipmi_pci_probe_regspacing(info);
+ info->io.regsize = DEFAULT_REGSIZE;
info->io.regshift = 0;
info->irq = pdev->irq;
@@ -2264,26 +2605,25 @@ static int __devinit ipmi_pci_probe(struct pci_dev *pdev,
info->dev = &pdev->dev;
pci_set_drvdata(pdev, info);
- return try_smi_init(info);
-}
+ dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n",
+ &pdev->resource[0], info->io.regsize, info->io.regspacing,
+ info->irq);
-static void __devexit ipmi_pci_remove(struct pci_dev *pdev)
-{
- struct smi_info *info = pci_get_drvdata(pdev);
- cleanup_one_si(info);
-}
+ rv = add_smi(info);
+ if (rv) {
+ kfree(info);
+ pci_disable_device(pdev);
+ }
-#ifdef CONFIG_PM
-static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- return 0;
+ return rv;
}
-static int ipmi_pci_resume(struct pci_dev *pdev)
+static void ipmi_pci_remove(struct pci_dev *pdev)
{
- return 0;
+ struct smi_info *info = pci_get_drvdata(pdev);
+ cleanup_one_si(info);
+ pci_disable_device(pdev);
}
-#endif
static struct pci_device_id ipmi_pci_devices[] = {
{ PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) },
@@ -2296,27 +2636,27 @@ static struct pci_driver ipmi_pci_driver = {
.name = DEVICE_NAME,
.id_table = ipmi_pci_devices,
.probe = ipmi_pci_probe,
- .remove = __devexit_p(ipmi_pci_remove),
-#ifdef CONFIG_PM
- .suspend = ipmi_pci_suspend,
- .resume = ipmi_pci_resume,
-#endif
+ .remove = ipmi_pci_remove,
};
#endif /* CONFIG_PCI */
-
-#ifdef CONFIG_PPC_OF
-static int __devinit ipmi_of_probe(struct of_device *dev,
- const struct of_device_id *match)
+static struct of_device_id ipmi_match[];
+static int ipmi_probe(struct platform_device *dev)
{
+#ifdef CONFIG_OF
+ const struct of_device_id *match;
struct smi_info *info;
struct resource resource;
- const int *regsize, *regspacing, *regshift;
- struct device_node *np = dev->node;
+ const __be32 *regsize, *regspacing, *regshift;
+ struct device_node *np = dev->dev.of_node;
int ret;
int proplen;
- dev_info(&dev->dev, PFX "probing via device tree\n");
+ dev_info(&dev->dev, "probing via device tree\n");
+
+ match = of_match_device(ipmi_match, &dev->dev);
+ if (!match)
+ return -EINVAL;
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
@@ -2342,16 +2682,16 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
return -EINVAL;
}
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info) {
dev_err(&dev->dev,
- PFX "could not allocate memory for OF probe\n");
+ "could not allocate memory for OF probe\n");
return -ENOMEM;
}
info->si_type = (enum si_type) match->data;
- info->addr_source = "device-tree";
+ info->addr_source = SI_DEVICETREE;
info->irq_setup = std_irq_setup;
if (resource.flags & IORESOURCE_IO) {
@@ -2364,25 +2704,33 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
info->io.addr_data = resource.start;
- info->io.regsize = regsize ? *regsize : DEFAULT_REGSIZE;
- info->io.regspacing = regspacing ? *regspacing : DEFAULT_REGSPACING;
- info->io.regshift = regshift ? *regshift : 0;
+ info->io.regsize = regsize ? be32_to_cpup(regsize) : DEFAULT_REGSIZE;
+ info->io.regspacing = regspacing ? be32_to_cpup(regspacing) : DEFAULT_REGSPACING;
+ info->io.regshift = regshift ? be32_to_cpup(regshift) : 0;
- info->irq = irq_of_parse_and_map(dev->node, 0);
+ info->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
info->dev = &dev->dev;
- dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %x\n",
+ dev_dbg(&dev->dev, "addr 0x%lx regsize %d spacing %d irq %d\n",
info->io.addr_data, info->io.regsize, info->io.regspacing,
info->irq);
- dev->dev.driver_data = (void *) info;
+ dev_set_drvdata(&dev->dev, info);
- return try_smi_init(info);
+ ret = add_smi(info);
+ if (ret) {
+ kfree(info);
+ return ret;
+ }
+#endif
+ return 0;
}
-static int __devexit ipmi_of_remove(struct of_device *dev)
+static int ipmi_remove(struct platform_device *dev)
{
- cleanup_one_si(dev->dev.driver_data);
+#ifdef CONFIG_OF
+ cleanup_one_si(dev_get_drvdata(&dev->dev));
+#endif
return 0;
}
@@ -2397,13 +2745,73 @@ static struct of_device_id ipmi_match[] =
{},
};
-static struct of_platform_driver ipmi_of_platform_driver = {
- .name = "ipmi",
- .match_table = ipmi_match,
- .probe = ipmi_of_probe,
- .remove = __devexit_p(ipmi_of_remove),
+static struct platform_driver ipmi_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = ipmi_match,
+ },
+ .probe = ipmi_probe,
+ .remove = ipmi_remove,
+};
+
+#ifdef CONFIG_PARISC
+static int ipmi_parisc_probe(struct parisc_device *dev)
+{
+ struct smi_info *info;
+ int rv;
+
+ info = smi_info_alloc();
+
+ if (!info) {
+ dev_err(&dev->dev,
+ "could not allocate memory for PARISC probe\n");
+ return -ENOMEM;
+ }
+
+ info->si_type = SI_KCS;
+ info->addr_source = SI_DEVICETREE;
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ info->io.addr_data = dev->hpa.start;
+ info->io.regsize = 1;
+ info->io.regspacing = 1;
+ info->io.regshift = 0;
+ info->irq = 0; /* no interrupt */
+ info->irq_setup = NULL;
+ info->dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
+
+ dev_set_drvdata(&dev->dev, info);
+
+ rv = add_smi(info);
+ if (rv) {
+ kfree(info);
+ return rv;
+ }
+
+ return 0;
+}
+
+static int ipmi_parisc_remove(struct parisc_device *dev)
+{
+ cleanup_one_si(dev_get_drvdata(&dev->dev));
+ return 0;
+}
+
+static struct parisc_device_id ipmi_parisc_tbl[] = {
+ { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+ { 0, }
+};
+
+static struct parisc_driver ipmi_parisc_driver = {
+ .name = "ipmi",
+ .id_table = ipmi_parisc_tbl,
+ .probe = ipmi_parisc_probe,
+ .remove = ipmi_parisc_remove,
};
-#endif /* CONFIG_PPC_OF */
+#endif /* CONFIG_PARISC */
static int wait_for_msg_done(struct smi_info *smi_info)
{
@@ -2415,7 +2823,7 @@ static int wait_for_msg_done(struct smi_info *smi_info)
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
- smi_info->si_sm, 100);
+ smi_info->si_sm, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
@@ -2483,9 +2891,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from get global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from get"
+ " global enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2497,10 +2904,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global"
- " enables command, cannot enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global"
+ " enables command, cannot enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2516,9 +2921,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
rv = wait_for_msg_done(smi_info);
if (rv) {
- printk(KERN_WARNING
- "ipmi_si: Error getting response from set global,"
- " enables command, the event buffer is not"
+ printk(KERN_WARNING PFX "Error getting response from set"
+ " global, enables command, the event buffer is not"
" enabled.\n");
goto out;
}
@@ -2529,10 +2933,8 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
- printk(KERN_WARNING
- "ipmi_si: Invalid return from get global,"
- "enables command, not enable the event"
- " buffer.\n");
+ printk(KERN_WARNING PFX "Invalid return from get global,"
+ "enables command, not enable the event buffer.\n");
rv = -EINVAL;
goto out;
}
@@ -2548,54 +2950,73 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
return rv;
}
-static int type_file_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int smi_type_proc_show(struct seq_file *m, void *v)
{
- struct smi_info *smi = data;
+ struct smi_info *smi = m->private;
- return sprintf(page, "%s\n", si_to_str[smi->si_type]);
+ return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
}
-static int stat_file_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int smi_type_proc_open(struct inode *inode, struct file *file)
{
- char *out = (char *) page;
- struct smi_info *smi = data;
+ return single_open(file, smi_type_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_type_proc_ops = {
+ .open = smi_type_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
- out += sprintf(out, "interrupts_enabled: %d\n",
+static int smi_si_stats_proc_show(struct seq_file *m, void *v)
+{
+ struct smi_info *smi = m->private;
+
+ seq_printf(m, "interrupts_enabled: %d\n",
smi->irq && !smi->interrupt_disabled);
- out += sprintf(out, "short_timeouts: %u\n",
+ seq_printf(m, "short_timeouts: %u\n",
smi_get_stat(smi, short_timeouts));
- out += sprintf(out, "long_timeouts: %u\n",
+ seq_printf(m, "long_timeouts: %u\n",
smi_get_stat(smi, long_timeouts));
- out += sprintf(out, "idles: %u\n",
+ seq_printf(m, "idles: %u\n",
smi_get_stat(smi, idles));
- out += sprintf(out, "interrupts: %u\n",
+ seq_printf(m, "interrupts: %u\n",
smi_get_stat(smi, interrupts));
- out += sprintf(out, "attentions: %u\n",
+ seq_printf(m, "attentions: %u\n",
smi_get_stat(smi, attentions));
- out += sprintf(out, "flag_fetches: %u\n",
+ seq_printf(m, "flag_fetches: %u\n",
smi_get_stat(smi, flag_fetches));
- out += sprintf(out, "hosed_count: %u\n",
+ seq_printf(m, "hosed_count: %u\n",
smi_get_stat(smi, hosed_count));
- out += sprintf(out, "complete_transactions: %u\n",
+ seq_printf(m, "complete_transactions: %u\n",
smi_get_stat(smi, complete_transactions));
- out += sprintf(out, "events: %u\n",
+ seq_printf(m, "events: %u\n",
smi_get_stat(smi, events));
- out += sprintf(out, "watchdog_pretimeouts: %u\n",
+ seq_printf(m, "watchdog_pretimeouts: %u\n",
smi_get_stat(smi, watchdog_pretimeouts));
- out += sprintf(out, "incoming_messages: %u\n",
+ seq_printf(m, "incoming_messages: %u\n",
smi_get_stat(smi, incoming_messages));
+ return 0;
+}
- return out - page;
+static int smi_si_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_si_stats_proc_show, PDE_DATA(inode));
}
-static int param_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static const struct file_operations smi_si_stats_proc_ops = {
+ .open = smi_si_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int smi_params_proc_show(struct seq_file *m, void *v)
{
- struct smi_info *smi = data;
+ struct smi_info *smi = m->private;
- return sprintf(page,
+ return seq_printf(m,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi->si_type],
addr_space_to_str[smi->io.addr_type],
@@ -2607,6 +3028,18 @@ static int param_read_proc(char *page, char **start, off_t off,
smi->slave_addr);
}
+static int smi_params_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, smi_params_proc_show, PDE_DATA(inode));
+}
+
+static const struct file_operations smi_params_proc_ops = {
+ .open = smi_params_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* oem_data_avail_to_receive_msg_avail
* @info - smi_info structure with msg_flags set
@@ -2673,7 +3106,7 @@ static void return_hosed_msg_badsize(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
- /* Make it a reponse */
+ /* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
@@ -2763,7 +3196,7 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
}
}
-static __devinitdata struct ipmi_default_vals
+static struct ipmi_default_vals
{
int type;
int port;
@@ -2775,7 +3208,7 @@ static __devinitdata struct ipmi_default_vals
{ .port = 0 }
};
-static __devinit void default_find_bmc(void)
+static void default_find_bmc(void)
{
struct smi_info *info;
int i;
@@ -2787,11 +3220,11 @@ static __devinit void default_find_bmc(void)
if (check_legacy_ioport(ipmi_defaults[i].port))
continue;
#endif
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = smi_info_alloc();
if (!info)
return;
- info->addr_source = NULL;
+ info->addr_source = SI_DEFAULT;
info->si_type = ipmi_defaults[i].type;
info->io_setup = port_setup;
@@ -2803,14 +3236,18 @@ static __devinit void default_find_bmc(void)
info->io.regsize = DEFAULT_REGSPACING;
info->io.regshift = 0;
- if (try_smi_init(info) == 0) {
- /* Found one... */
- printk(KERN_INFO "ipmi_si: Found default %s state"
- " machine at %s address 0x%lx\n",
- si_to_str[info->si_type],
- addr_space_to_str[info->io.addr_type],
- info->io.addr_data);
- return;
+ if (add_smi(info) == 0) {
+ if ((try_smi_init(info)) == 0) {
+ /* Found one... */
+ printk(KERN_INFO PFX "Found default %s"
+ " state machine at %s address 0x%lx\n",
+ si_to_str[info->si_type],
+ addr_space_to_str[info->io.addr_type],
+ info->io.addr_data);
+ } else
+ cleanup_one_si(info);
+ } else {
+ kfree(info);
}
}
}
@@ -2829,34 +3266,48 @@ static int is_new_interface(struct smi_info *info)
return 1;
}
-static int try_smi_init(struct smi_info *new_smi)
+static int add_smi(struct smi_info *new_smi)
{
- int rv;
- int i;
-
- if (new_smi->addr_source) {
- printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
- " machine at %s address 0x%lx, slave address 0x%x,"
- " irq %d\n",
- new_smi->addr_source,
- si_to_str[new_smi->si_type],
- addr_space_to_str[new_smi->io.addr_type],
- new_smi->io.addr_data,
- new_smi->slave_addr, new_smi->irq);
- }
+ int rv = 0;
+ printk(KERN_INFO PFX "Adding %s-specified %s state machine",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type]);
mutex_lock(&smi_infos_lock);
if (!is_new_interface(new_smi)) {
- printk(KERN_WARNING "ipmi_si: duplicate interface\n");
+ printk(KERN_CONT " duplicate interface\n");
rv = -EBUSY;
goto out_err;
}
+ printk(KERN_CONT "\n");
+
/* So we know not to free it unless we have allocated one. */
new_smi->intf = NULL;
new_smi->si_sm = NULL;
new_smi->handlers = NULL;
+ list_add_tail(&new_smi->link, &smi_infos);
+
+out_err:
+ mutex_unlock(&smi_infos_lock);
+ return rv;
+}
+
+static int try_smi_init(struct smi_info *new_smi)
+{
+ int rv = 0;
+ int i;
+
+ printk(KERN_INFO PFX "Trying %s-specified %s state"
+ " machine at %s address 0x%lx, slave address 0x%x,"
+ " irq %d\n",
+ ipmi_addr_src_to_str[new_smi->addr_source],
+ si_to_str[new_smi->si_type],
+ addr_space_to_str[new_smi->io.addr_type],
+ new_smi->io.addr_data,
+ new_smi->slave_addr, new_smi->irq);
+
switch (new_smi->si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
@@ -2879,7 +3330,8 @@ static int try_smi_init(struct smi_info *new_smi)
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
- printk(KERN_ERR "Could not allocate state machine memory\n");
+ printk(KERN_ERR PFX
+ "Could not allocate state machine memory\n");
rv = -ENOMEM;
goto out_err;
}
@@ -2889,18 +3341,14 @@ static int try_smi_init(struct smi_info *new_smi)
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io_setup(new_smi);
if (rv) {
- printk(KERN_ERR "Could not set up I/O space\n");
+ printk(KERN_ERR PFX "Could not set up I/O space\n");
goto out_err;
}
- spin_lock_init(&(new_smi->si_lock));
- spin_lock_init(&(new_smi->msg_lock));
-
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: Interface detection"
- " failed\n");
+ printk(KERN_INFO PFX "Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
@@ -2912,7 +3360,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->addr_source)
- printk(KERN_INFO "ipmi_si: There appears to be no BMC"
+ printk(KERN_INFO PFX "There appears to be no BMC"
" at this location\n");
goto out_err;
}
@@ -2924,18 +3372,19 @@ static int try_smi_init(struct smi_info *new_smi)
INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
- new_smi->run_to_completion = 0;
+ new_smi->run_to_completion = false;
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
- new_smi->interrupt_disabled = 0;
+ new_smi->interrupt_disabled = true;
atomic_set(&new_smi->stop_operation, 0);
+ atomic_set(&new_smi->need_watch, 0);
new_smi->intf_num = smi_num;
smi_num++;
rv = try_enable_event_buffer(new_smi);
if (rv == 0)
- new_smi->has_event_buffer = 1;
+ new_smi->has_event_buffer = true;
/*
* Start clearing the flags before we enable interrupts or the
@@ -2954,9 +3403,8 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->pdev = platform_device_alloc("ipmi_si",
new_smi->intf_num);
if (!new_smi->pdev) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to allocate platform device\n");
+ printk(KERN_ERR PFX
+ "Unable to allocate platform device\n");
goto out_err;
}
new_smi->dev = &new_smi->pdev->dev;
@@ -2964,14 +3412,13 @@ static int try_smi_init(struct smi_info *new_smi)
rv = platform_device_add(new_smi->pdev);
if (rv) {
- printk(KERN_ERR
- "ipmi_si_intf:"
- " Unable to register system interface device:"
+ printk(KERN_ERR PFX
+ "Unable to register system interface device:"
" %d\n",
rv);
goto out_err;
}
- new_smi->dev_registered = 1;
+ new_smi->dev_registered = true;
}
rv = ipmi_register_smi(&handlers,
@@ -2981,48 +3428,37 @@ static int try_smi_init(struct smi_info *new_smi)
"bmc",
new_smi->slave_addr);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to register device: error %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to register device: error %d\n",
+ rv);
goto out_err_stop_timer;
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
- type_file_read_proc,
+ &smi_type_proc_ops,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
- stat_file_read_proc,
+ &smi_si_stats_proc_ops,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
- param_read_proc,
+ &smi_params_proc_ops,
new_smi);
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to create proc entry: %d\n",
- rv);
+ dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv);
goto out_err_stop_timer;
}
- list_add_tail(&new_smi->link, &smi_infos);
-
- mutex_unlock(&smi_infos_lock);
-
- printk(KERN_INFO "IPMI %s interface initialized\n",
- si_to_str[new_smi->si_type]);
+ dev_info(new_smi->dev, "IPMI %s interface initialized\n",
+ si_to_str[new_smi->si_type]);
return 0;
@@ -3031,11 +3467,17 @@ static int try_smi_init(struct smi_info *new_smi)
wait_for_timer_and_thread(new_smi);
out_err:
- if (new_smi->intf)
+ new_smi->interrupt_disabled = true;
+
+ if (new_smi->intf) {
ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
- if (new_smi->irq_cleanup)
+ if (new_smi->irq_cleanup) {
new_smi->irq_cleanup(new_smi);
+ new_smi->irq_cleanup = NULL;
+ }
/*
* Wait until we know that we are out of any interrupt
@@ -3048,42 +3490,46 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->handlers)
new_smi->handlers->cleanup(new_smi->si_sm);
kfree(new_smi->si_sm);
+ new_smi->si_sm = NULL;
}
- if (new_smi->addr_source_cleanup)
+ if (new_smi->addr_source_cleanup) {
new_smi->addr_source_cleanup(new_smi);
- if (new_smi->io_cleanup)
+ new_smi->addr_source_cleanup = NULL;
+ }
+ if (new_smi->io_cleanup) {
new_smi->io_cleanup(new_smi);
+ new_smi->io_cleanup = NULL;
+ }
- if (new_smi->dev_registered)
+ if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev);
-
- kfree(new_smi);
-
- mutex_unlock(&smi_infos_lock);
+ new_smi->dev_registered = false;
+ }
return rv;
}
-static __devinit int init_ipmi_si(void)
+static int init_ipmi_si(void)
{
int i;
char *str;
int rv;
+ struct smi_info *e;
+ enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
initialized = 1;
- /* Register the device drivers. */
- rv = driver_register(&ipmi_driver.driver);
- if (rv) {
- printk(KERN_ERR
- "init_ipmi_si: Unable to register driver: %d\n",
- rv);
- return rv;
+ if (si_tryplatform) {
+ rv = platform_driver_register(&ipmi_driver);
+ if (rv) {
+ printk(KERN_ERR PFX "Unable to register "
+ "driver: %d\n", rv);
+ return rv;
+ }
}
-
/* Parse out the si_type string into its components. */
str = si_type_str;
if (*str != '\0') {
@@ -3101,52 +3547,98 @@ static __devinit int init_ipmi_si(void)
printk(KERN_INFO "IPMI System Interface driver.\n");
- hardcode_find_bmc();
+ /* If the user gave us a device, they presumably want us to use it */
+ if (!hardcode_find_bmc())
+ return 0;
-#ifdef CONFIG_DMI
- dmi_find_bmc();
+#ifdef CONFIG_PCI
+ if (si_trypci) {
+ rv = pci_register_driver(&ipmi_pci_driver);
+ if (rv)
+ printk(KERN_ERR PFX "Unable to register "
+ "PCI driver: %d\n", rv);
+ else
+ pci_registered = true;
+ }
#endif
#ifdef CONFIG_ACPI
- acpi_find_bmc();
+ if (si_tryacpi) {
+ pnp_register_driver(&ipmi_pnp_driver);
+ pnp_registered = true;
+ }
#endif
-#ifdef CONFIG_PCI
- rv = pci_register_driver(&ipmi_pci_driver);
- if (rv)
- printk(KERN_ERR
- "init_ipmi_si: Unable to register PCI driver: %d\n",
- rv);
+#ifdef CONFIG_DMI
+ if (si_trydmi)
+ dmi_find_bmc();
+#endif
+
+#ifdef CONFIG_ACPI
+ if (si_tryacpi)
+ spmi_find_bmc();
#endif
-#ifdef CONFIG_PPC_OF
- of_register_platform_driver(&ipmi_of_platform_driver);
+#ifdef CONFIG_PARISC
+ register_parisc_driver(&ipmi_parisc_driver);
+ parisc_registered = true;
+ /* poking PC IO addresses will crash machine, don't do it */
+ si_trydefaults = 0;
#endif
+ /* We prefer devices with interrupts, but in the case of a machine
+ with multiple BMCs we assume that there will be several instances
+ of a given type so if we succeed in registering a type then also
+ try to register everything else of the same type */
+
+ mutex_lock(&smi_infos_lock);
+ list_for_each_entry(e, &smi_infos, link) {
+ /* Try to register a device if it has an IRQ and we either
+ haven't successfully registered a device yet or this
+ device has the same type as one we successfully registered */
+ if (e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+
+ /* type will only have been set if we successfully registered an si */
+ if (type) {
+ mutex_unlock(&smi_infos_lock);
+ return 0;
+ }
+
+ /* Fall back to the preferred device */
+
+ list_for_each_entry(e, &smi_infos, link) {
+ if (!e->irq && (!type || e->addr_source == type)) {
+ if (!try_smi_init(e)) {
+ type = e->addr_source;
+ }
+ }
+ }
+ mutex_unlock(&smi_infos_lock);
+
+ if (type)
+ return 0;
+
if (si_trydefaults) {
mutex_lock(&smi_infos_lock);
if (list_empty(&smi_infos)) {
/* No BMC was found, try defaults. */
mutex_unlock(&smi_infos_lock);
default_find_bmc();
- } else {
+ } else
mutex_unlock(&smi_infos_lock);
- }
}
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
-#ifdef CONFIG_PCI
- pci_unregister_driver(&ipmi_pci_driver);
-#endif
-
-#ifdef CONFIG_PPC_OF
- of_unregister_platform_driver(&ipmi_of_platform_driver);
-#endif
- driver_unregister(&ipmi_driver.driver);
- printk(KERN_WARNING
- "ipmi_si: Unable to find any System Interface(s)\n");
+ cleanup_ipmi_si();
+ printk(KERN_WARNING PFX
+ "Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
@@ -3157,7 +3649,7 @@ module_init(init_ipmi_si);
static void cleanup_one_si(struct smi_info *to_clean)
{
- int rv;
+ int rv = 0;
unsigned long flags;
if (!to_clean)
@@ -3201,14 +3693,16 @@ static void cleanup_one_si(struct smi_info *to_clean)
schedule_timeout_uninterruptible(1);
}
- rv = ipmi_unregister_smi(to_clean->intf);
+ if (to_clean->intf)
+ rv = ipmi_unregister_smi(to_clean->intf);
+
if (rv) {
- printk(KERN_ERR
- "ipmi_si: Unable to unregister device: errno=%d\n",
+ printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
rv);
}
- to_clean->handlers->cleanup(to_clean->si_sm);
+ if (to_clean->handlers)
+ to_clean->handlers->cleanup(to_clean->si_sm);
kfree(to_clean->si_sm);
@@ -3223,7 +3717,7 @@ static void cleanup_one_si(struct smi_info *to_clean)
kfree(to_clean);
}
-static __exit void cleanup_ipmi_si(void)
+static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
@@ -3231,19 +3725,24 @@ static __exit void cleanup_ipmi_si(void)
return;
#ifdef CONFIG_PCI
- pci_unregister_driver(&ipmi_pci_driver);
+ if (pci_registered)
+ pci_unregister_driver(&ipmi_pci_driver);
#endif
-
-#ifdef CONFIG_PPC_OF
- of_unregister_platform_driver(&ipmi_of_platform_driver);
+#ifdef CONFIG_ACPI
+ if (pnp_registered)
+ pnp_unregister_driver(&ipmi_pnp_driver);
+#endif
+#ifdef CONFIG_PARISC
+ if (parisc_registered)
+ unregister_parisc_driver(&ipmi_parisc_driver);
#endif
+ platform_driver_unregister(&ipmi_driver);
+
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
mutex_unlock(&smi_infos_lock);
-
- driver_unregister(&ipmi_driver.driver);
}
module_exit(cleanup_ipmi_si);
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c
index faed9297190..c8e77afa8b9 100644
--- a/drivers/char/ipmi/ipmi_smic_sm.c
+++ b/drivers/char/ipmi/ipmi_smic_sm.c
@@ -80,7 +80,7 @@ enum smic_states {
#define SMIC_MAX_ERROR_RETRIES 3
/* Timeouts in microseconds. */
-#define SMIC_RETRY_TIMEOUT 2000000
+#define SMIC_RETRY_TIMEOUT (2*USEC_PER_SEC)
/* SMIC Flags Register Bits */
#define SMIC_RX_DATA_READY 0x80
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index a4d57e31f71..37b8be7cba9 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -35,7 +35,7 @@
#include <linux/moduleparam.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
@@ -52,7 +52,7 @@
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#ifdef CONFIG_X86
/*
@@ -65,6 +65,7 @@
* mechanism for it at that time.
*/
#include <asm/kdebug.h>
+#include <asm/nmi.h>
#define HAVE_DIE_NMI
#endif
@@ -138,18 +139,10 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
-/* These are here until the real ones get into the watchdog.h interface. */
-#ifndef WDIOC_GETTIMEOUT
-#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
-#endif
-#ifndef WDIOC_SET_PRETIMEOUT
-#define WDIOC_SET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 21, int)
-#endif
-#ifndef WDIOC_GET_PRETIMEOUT
-#define WDIOC_GET_PRETIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 22, int)
-#endif
+#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
-static int nowayout = WATCHDOG_NOWAYOUT;
+static DEFINE_MUTEX(ipmi_watchdog_mutex);
+static bool nowayout = WATCHDOG_NOWAYOUT;
static ipmi_user_t watchdog_user;
static int watchdog_ifnum;
@@ -196,7 +189,7 @@ static void ipmi_unregister_watchdog(int ipmi_intf);
*/
static int start_now;
-static int set_param_int(const char *val, struct kernel_param *kp)
+static int set_param_timeout(const char *val, const struct kernel_param *kp)
{
char *endp;
int l;
@@ -215,10 +208,11 @@ static int set_param_int(const char *val, struct kernel_param *kp)
return rv;
}
-static int get_param_int(char *buffer, struct kernel_param *kp)
-{
- return sprintf(buffer, "%i", *((int *)kp->arg));
-}
+static struct kernel_param_ops param_ops_timeout = {
+ .set = set_param_timeout,
+ .get = param_get_int,
+};
+#define param_check_timeout param_check_int
typedef int (*action_fn)(const char *intval, char *outval);
@@ -227,7 +221,7 @@ static int preaction_op(const char *inval, char *outval);
static int preop_op(const char *inval, char *outval);
static void check_parms(void);
-static int set_param_str(const char *val, struct kernel_param *kp)
+static int set_param_str(const char *val, const struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv = 0;
@@ -251,7 +245,7 @@ static int set_param_str(const char *val, struct kernel_param *kp)
return rv;
}
-static int get_param_str(char *buffer, struct kernel_param *kp)
+static int get_param_str(char *buffer, const struct kernel_param *kp)
{
action_fn fn = (action_fn) kp->arg;
int rv;
@@ -263,7 +257,7 @@ static int get_param_str(char *buffer, struct kernel_param *kp)
}
-static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+static int set_param_wdog_ifnum(const char *val, const struct kernel_param *kp)
{
int rv = param_set_int(val, kp);
if (rv)
@@ -276,27 +270,38 @@ static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
return 0;
}
-module_param_call(ifnum_to_use, set_param_wdog_ifnum, get_param_int,
- &ifnum_to_use, 0644);
+static struct kernel_param_ops param_ops_wdog_ifnum = {
+ .set = set_param_wdog_ifnum,
+ .get = param_get_int,
+};
+
+#define param_check_wdog_ifnum param_check_int
+
+static struct kernel_param_ops param_ops_str = {
+ .set = set_param_str,
+ .get = get_param_str,
+};
+
+module_param(ifnum_to_use, wdog_ifnum, 0644);
MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
"timer. Setting to -1 defaults to the first registered "
"interface");
-module_param_call(timeout, set_param_int, get_param_int, &timeout, 0644);
+module_param(timeout, timeout, 0644);
MODULE_PARM_DESC(timeout, "Timeout value in seconds.");
-module_param_call(pretimeout, set_param_int, get_param_int, &pretimeout, 0644);
+module_param(pretimeout, timeout, 0644);
MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
-module_param_call(action, set_param_str, get_param_str, action_op, 0644);
+module_param_cb(action, &param_ops_str, action_op, 0644);
MODULE_PARM_DESC(action, "Timeout action. One of: "
"reset, none, power_cycle, power_off.");
-module_param_call(preaction, set_param_str, get_param_str, preaction_op, 0644);
+module_param_cb(preaction, &param_ops_str, preaction_op, 0644);
MODULE_PARM_DESC(preaction, "Pretimeout action. One of: "
"pre_none, pre_smi, pre_nmi, pre_int.");
-module_param_call(preop, set_param_str, get_param_str, preop_op, 0644);
+module_param_cb(preop, &param_ops_str, preop_op, 0644);
MODULE_PARM_DESC(preop, "Pretimeout driver operation. One of: "
"preop_none, preop_panic, preop_give_data.");
@@ -304,7 +309,7 @@ module_param(start_now, int, 0444);
MODULE_PARM_DESC(start_now, "Set to 1 to start the watchdog as"
"soon as the driver is loaded.");
-module_param(nowayout, int, 0644);
+module_param(nowayout, bool, 0644);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=CONFIG_WATCHDOG_NOWAYOUT)");
@@ -504,6 +509,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -512,8 +518,8 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_smi_msg,
&panic_halt_heartbeat_recv_msg,
1);
- if (!rv)
- atomic_add(2, &panic_done_count);
+ if (rv)
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -537,16 +543,18 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
+ atomic_add(2, &panic_done_count);
rv = i_ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
- if (!rv) {
- atomic_add(2, &panic_done_count);
- if (send_heartbeat_now)
- panic_halt_ipmi_heartbeat();
- } else
+ if (rv) {
+ atomic_sub(2, &panic_done_count);
printk(KERN_WARNING PFX
"Unable to extend the watchdog timeout.");
+ } else {
+ if (send_heartbeat_now)
+ panic_halt_ipmi_heartbeat();
+ }
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
}
@@ -582,6 +590,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
+ int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@@ -602,6 +611,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
+restart:
atomic_set(&heartbeat_tofree, 2);
/*
@@ -639,7 +649,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
- if (heartbeat_recv_msg.msg.data[0] != 0) {
+ if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
+ timeout_retries++;
+ if (timeout_retries > 3) {
+ printk(KERN_ERR PFX ": Unable to restore the IPMI"
+ " watchdog's settings, giving up.\n");
+ rv = -EIO;
+ goto out_unlock;
+ }
+
+ /*
+ * The timer was not initialized, that means the BMC was
+ * probably reset and lost the watchdog information. Attempt
+ * to restore the timer's info. Note that we still hold
+ * the heartbeat lock, to keep a heartbeat from happening
+ * in this process, so must say no heartbeat to avoid a
+ * deadlock on this mutex.
+ */
+ rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
+ if (rv) {
+ printk(KERN_ERR PFX ": Unable to send the command to"
+ " set the watchdog's settings, giving up.\n");
+ goto out_unlock;
+ }
+
+ /* We might need a new heartbeat, so do it now */
+ goto restart;
+ } else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@@ -648,6 +684,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
+out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@@ -659,7 +696,7 @@ static struct watchdog_info ident = {
.identity = "IPMI"
};
-static int ipmi_ioctl(struct inode *inode, struct file *file,
+static int ipmi_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
@@ -684,7 +721,6 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
return -EFAULT;
return 0;
- case WDIOC_SET_PRETIMEOUT:
case WDIOC_SETPRETIMEOUT:
i = copy_from_user(&val, argp, sizeof(int));
if (i)
@@ -692,7 +728,6 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
pretimeout = val;
return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
- case WDIOC_GET_PRETIMEOUT:
case WDIOC_GETPRETIMEOUT:
i = copy_to_user(argp, &pretimeout, sizeof(pretimeout));
if (i)
@@ -730,6 +765,19 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
}
}
+static long ipmi_unlocked_ioctl(struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+
+ mutex_lock(&ipmi_watchdog_mutex);
+ ret = ipmi_ioctl(file, cmd, arg);
+ mutex_unlock(&ipmi_watchdog_mutex);
+
+ return ret;
+}
+
static ssize_t ipmi_write(struct file *file,
const char __user *buf,
size_t len,
@@ -819,7 +867,6 @@ static int ipmi_open(struct inode *ino, struct file *filep)
if (test_and_set_bit(0, &ipmi_wdog_open))
return -EBUSY;
- cycle_kernel_lock();
/*
* Don't start the timer now, let it start on the
@@ -880,10 +927,11 @@ static const struct file_operations ipmi_wdog_fops = {
.read = ipmi_read,
.poll = ipmi_poll,
.write = ipmi_write,
- .ioctl = ipmi_ioctl,
+ .unlocked_ioctl = ipmi_unlocked_ioctl,
.open = ipmi_open,
.release = ipmi_close,
.fasync = ipmi_fasync,
+ .llseek = no_llseek,
};
static struct miscdevice ipmi_wdog_miscdev = {
@@ -895,11 +943,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
- if (msg->msg.data[0] != 0) {
+ if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
+ msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
+ printk(KERN_INFO PFX "response: The IPMI controller appears"
+ " to have been reset, will attempt to reinitialize"
+ " the watchdog timer\n");
+ else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
- }
ipmi_free_recv_msg(msg);
}
@@ -1051,17 +1103,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
#ifdef HAVE_DIE_NMI
static int
-ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
+ipmi_nmi(unsigned int val, struct pt_regs *regs)
{
- struct die_args *args = data;
-
- if (val != DIE_NMI)
- return NOTIFY_OK;
-
- /* Hack, if it's a memory or I/O error, ignore it. */
- if (args->err & 0xc0)
- return NOTIFY_OK;
-
/*
* If we get here, it's an NMI that's not a memory or I/O
* error. We can't truly tell if it's from IPMI or not
@@ -1071,15 +1114,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
if (testing_nmi) {
testing_nmi = 2;
- return NOTIFY_STOP;
+ return NMI_HANDLED;
}
/* If we are not expecting a timeout, ignore it. */
if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
- return NOTIFY_OK;
+ return NMI_DONE;
if (preaction_val != WDOG_PRETIMEOUT_NMI)
- return NOTIFY_OK;
+ return NMI_DONE;
/*
* If no one else handled the NMI, we assume it was the IPMI
@@ -1094,12 +1137,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
panic(PFX "pre-timeout");
}
- return NOTIFY_STOP;
+ return NMI_HANDLED;
}
-
-static struct notifier_block ipmi_nmi_handler = {
- .notifier_call = ipmi_nmi
-};
#endif
static int wdog_reboot_handler(struct notifier_block *this,
@@ -1115,7 +1154,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
if (code == SYS_POWER_OFF || code == SYS_HALT) {
/* Disable the WDT if we are shutting down. */
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
- panic_halt_ipmi_set_timeout();
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
/* Set a long timer to let the reboot happens, but
reboot if it hangs, but only if the watchdog
@@ -1123,7 +1162,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
timeout = 120;
pretimeout = 0;
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
- panic_halt_ipmi_set_timeout();
+ ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
}
}
return NOTIFY_OK;
@@ -1264,7 +1303,8 @@ static void check_parms(void)
}
}
if (do_nmi && !nmi_handler_registered) {
- rv = register_die_notifier(&ipmi_nmi_handler);
+ rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0,
+ "ipmi");
if (rv) {
printk(KERN_WARNING PFX
"Can't register nmi handler\n");
@@ -1272,7 +1312,7 @@ static void check_parms(void)
} else
nmi_handler_registered = 1;
} else if (!do_nmi && nmi_handler_registered) {
- unregister_die_notifier(&ipmi_nmi_handler);
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
nmi_handler_registered = 0;
}
#endif
@@ -1310,7 +1350,7 @@ static int __init ipmi_wdog_init(void)
if (rv) {
#ifdef HAVE_DIE_NMI
if (nmi_handler_registered)
- unregister_die_notifier(&ipmi_nmi_handler);
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,
&wdog_panic_notifier);
@@ -1331,7 +1371,7 @@ static void __exit ipmi_wdog_exit(void)
#ifdef HAVE_DIE_NMI
if (nmi_handler_registered)
- unregister_die_notifier(&ipmi_nmi_handler);
+ unregister_nmi_handler(NMI_UNKNOWN, "ipmi");
#endif
atomic_notifier_chain_unregister(&panic_notifier_list,