diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 17:19:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-28 17:19:28 -0700 |
commit | 532bfc851a7475fb6a36c1e953aa395798a7cca7 (patch) | |
tree | a7892e5a31330dd59f31959efbe9fda1803784fd /drivers | |
parent | 0195c00244dc2e9f522475868fa278c473ba7339 (diff) | |
parent | 8da00edc1069f01c34510fa405dc15d96c090a3f (diff) |
Merge branch 'akpm' (Andrew's patch-bomb)
Merge third batch of patches from Andrew Morton:
- Some MM stragglers
- core SMP library cleanups (on_each_cpu_mask)
- Some IPI optimisations
- kexec
- kdump
- IPMI
- the radix-tree iterator work
- various other misc bits.
"That'll do for -rc1. I still have ~10 patches for 3.4, will send
those along when they've baked a little more."
* emailed from Andrew Morton <akpm@linux-foundation.org>: (35 commits)
backlight: fix typo in tosa_lcd.c
crc32: add help text for the algorithm select option
mm: move hugepage test examples to tools/testing/selftests/vm
mm: move slabinfo.c to tools/vm
mm: move page-types.c from Documentation to tools/vm
selftests/Makefile: make `run_tests' depend on `all'
selftests: launch individual selftests from the main Makefile
radix-tree: use iterators in find_get_pages* functions
radix-tree: rewrite gang lookup using iterator
radix-tree: introduce bit-optimized iterator
fs/proc/namespaces.c: prevent crash when ns_entries[] is empty
nbd: rename the nbd_device variable from lo to nbd
pidns: add reboot_pid_ns() to handle the reboot syscall
sysctl: use bitmap library functions
ipmi: use locks on watchdog timeout set on reboot
ipmi: simplify locking
ipmi: fix message handling during panics
ipmi: use a tasklet for handling received messages
ipmi: increase KCS timeouts
ipmi: decrease the IPMI message transaction time in interrupt mode
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/nbd.c | 295 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_kcs_sm.c | 4 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_msghandler.c | 242 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_si_intf.c | 72 | ||||
-rw-r--r-- | drivers/char/ipmi/ipmi_watchdog.c | 21 | ||||
-rw-r--r-- | drivers/video/backlight/tosa_lcd.c | 2 |
6 files changed, 320 insertions, 316 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index c7ba11f9b20..061427a75d3 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -38,7 +38,7 @@ #include <linux/nbd.h> -#define LO_MAGIC 0x68797548 +#define NBD_MAGIC 0x68797548 #ifdef NDEBUG #define dprintk(flags, fmt...) @@ -115,7 +115,7 @@ static void nbd_end_request(struct request *req) spin_unlock_irqrestore(q->queue_lock, flags); } -static void sock_shutdown(struct nbd_device *lo, int lock) +static void sock_shutdown(struct nbd_device *nbd, int lock) { /* Forcibly shutdown the socket causing all listeners * to error @@ -124,14 +124,14 @@ static void sock_shutdown(struct nbd_device *lo, int lock) * there should be a more generic interface rather than * calling socket ops directly here */ if (lock) - mutex_lock(&lo->tx_lock); - if (lo->sock) { - dev_warn(disk_to_dev(lo->disk), "shutting down socket\n"); - kernel_sock_shutdown(lo->sock, SHUT_RDWR); - lo->sock = NULL; + mutex_lock(&nbd->tx_lock); + if (nbd->sock) { + dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); + kernel_sock_shutdown(nbd->sock, SHUT_RDWR); + nbd->sock = NULL; } if (lock) - mutex_unlock(&lo->tx_lock); + mutex_unlock(&nbd->tx_lock); } static void nbd_xmit_timeout(unsigned long arg) @@ -146,17 +146,17 @@ static void nbd_xmit_timeout(unsigned long arg) /* * Send or receive packet. */ -static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, +static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { - struct socket *sock = lo->sock; + struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; if (unlikely(!sock)) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; @@ -180,15 +180,15 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, if (send) { struct timer_list ti; - if (lo->xmit_timeout) { + if (nbd->xmit_timeout) { init_timer(&ti); ti.function = nbd_xmit_timeout; ti.data = (unsigned long)current; - ti.expires = jiffies + lo->xmit_timeout; + ti.expires = jiffies + nbd->xmit_timeout; add_timer(&ti); } result = kernel_sendmsg(sock, &msg, &iov, 1, size); - if (lo->xmit_timeout) + if (nbd->xmit_timeout) del_timer_sync(&ti); } else result = kernel_recvmsg(sock, &msg, &iov, 1, size, @@ -200,7 +200,7 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, task_pid_nr(current), current->comm, dequeue_signal_lock(current, ¤t->blocked, &info)); result = -EINTR; - sock_shutdown(lo, !send); + sock_shutdown(nbd, !send); break; } @@ -218,18 +218,19 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size, return result; } -static inline int sock_send_bvec(struct nbd_device *lo, struct bio_vec *bvec, +static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, int flags) { int result; void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(lo, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); + result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, + bvec->bv_len, flags); kunmap(bvec->bv_page); return result; } /* always call with the tx_lock held */ -static int nbd_send_req(struct nbd_device *lo, struct request *req) +static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; @@ -242,14 +243,14 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) memcpy(request.handle, &req, sizeof(req)); dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", - lo->disk->disk_name, req, + nbd->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); - result = sock_xmit(lo, 1, &request, sizeof(request), + result = sock_xmit(nbd, 1, &request, sizeof(request), (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); goto error_out; } @@ -266,10 +267,10 @@ static int nbd_send_req(struct nbd_device *lo, struct request *req) if (!rq_iter_last(req, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", - lo->disk->disk_name, req, bvec->bv_len); - result = sock_send_bvec(lo, bvec, flags); + nbd->disk->disk_name, req, bvec->bv_len); + result = sock_send_bvec(nbd, bvec, flags); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); goto error_out; @@ -282,25 +283,25 @@ error_out: return -EIO; } -static struct request *nbd_find_request(struct nbd_device *lo, +static struct request *nbd_find_request(struct nbd_device *nbd, struct request *xreq) { struct request *req, *tmp; int err; - err = wait_event_interruptible(lo->active_wq, lo->active_req != xreq); + err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); if (unlikely(err)) goto out; - spin_lock(&lo->queue_lock); - list_for_each_entry_safe(req, tmp, &lo->queue_head, queuelist) { + spin_lock(&nbd->queue_lock); + list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { if (req != xreq) continue; list_del_init(&req->queuelist); - spin_unlock(&lo->queue_lock); + spin_unlock(&nbd->queue_lock); return req; } - spin_unlock(&lo->queue_lock); + spin_unlock(&nbd->queue_lock); err = -ENOENT; @@ -308,78 +309,78 @@ out: return ERR_PTR(err); } -static inline int sock_recv_bvec(struct nbd_device *lo, struct bio_vec *bvec) +static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; void *kaddr = kmap(bvec->bv_page); - result = sock_xmit(lo, 0, kaddr + bvec->bv_offset, bvec->bv_len, + result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, MSG_WAITALL); kunmap(bvec->bv_page); return result; } /* NULL returned = something went wrong, inform userspace */ -static struct request *nbd_read_stat(struct nbd_device *lo) +static struct request *nbd_read_stat(struct nbd_device *nbd) { int result; struct nbd_reply reply; struct request *req; reply.magic = 0; - result = sock_xmit(lo, 0, &reply, sizeof(reply), MSG_WAITALL); + result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), + dev_err(disk_to_dev(nbd->disk), "Receive control failed (result %d)\n", result); goto harderror; } if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { - dev_err(disk_to_dev(lo->disk), "Wrong magic (0x%lx)\n", + dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", (unsigned long)ntohl(reply.magic)); result = -EPROTO; goto harderror; } - req = nbd_find_request(lo, *(struct request **)reply.handle); + req = nbd_find_request(nbd, *(struct request **)reply.handle); if (IS_ERR(req)) { result = PTR_ERR(req); if (result != -ENOENT) goto harderror; - dev_err(disk_to_dev(lo->disk), "Unexpected reply (%p)\n", + dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", reply.handle); result = -EBADR; goto harderror; } if (ntohl(reply.error)) { - dev_err(disk_to_dev(lo->disk), "Other side returned error (%d)\n", + dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got reply\n", - lo->disk->disk_name, req); + nbd->disk->disk_name, req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; struct bio_vec *bvec; rq_for_each_segment(bvec, req, iter) { - result = sock_recv_bvec(lo, bvec); + result = sock_recv_bvec(nbd, bvec); if (result <= 0) { - dev_err(disk_to_dev(lo->disk), "Receive data failed (result %d)\n", + dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", - lo->disk->disk_name, req, bvec->bv_len); + nbd->disk->disk_name, req, bvec->bv_len); } } return req; harderror: - lo->harderror = result; + nbd->harderror = result; return NULL; } @@ -397,48 +398,48 @@ static struct device_attribute pid_attr = { .show = pid_show, }; -static int nbd_do_it(struct nbd_device *lo) +static int nbd_do_it(struct nbd_device *nbd) { struct request *req; int ret; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); - lo->pid = task_pid_nr(current); - ret = device_create_file(disk_to_dev(lo->disk), &pid_attr); + nbd->pid = task_pid_nr(current); + ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { - dev_err(disk_to_dev(lo->disk), "device_create_file failed!\n"); - lo->pid = 0; + dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); + nbd->pid = 0; return ret; } - while ((req = nbd_read_stat(lo)) != NULL) + while ((req = nbd_read_stat(nbd)) != NULL) nbd_end_request(req); - device_remove_file(disk_to_dev(lo->disk), &pid_attr); - lo->pid = 0; + device_remove_file(disk_to_dev(nbd->disk), &pid_attr); + nbd->pid = 0; return 0; } -static void nbd_clear_que(struct nbd_device *lo) +static void nbd_clear_que(struct nbd_device *nbd) { struct request *req; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); /* - * Because we have set lo->sock to NULL under the tx_lock, all + * Because we have set nbd->sock to NULL under the tx_lock, all * modifications to the list must have completed by now. For * the same reason, the active_req must be NULL. * * As a consequence, we don't need to take the spin lock while * purging the list here. */ - BUG_ON(lo->sock); - BUG_ON(lo->active_req); + BUG_ON(nbd->sock); + BUG_ON(nbd->active_req); - while (!list_empty(&lo->queue_head)) { - req = list_entry(lo->queue_head.next, struct request, + while (!list_empty(&nbd->queue_head)) { + req = list_entry(nbd->queue_head.next, struct request, queuelist); list_del_init(&req->queuelist); req->errors++; @@ -447,7 +448,7 @@ static void nbd_clear_que(struct nbd_device *lo) } -static void nbd_handle_req(struct nbd_device *lo, struct request *req) +static void nbd_handle_req(struct nbd_device *nbd, struct request *req) { if (req->cmd_type != REQ_TYPE_FS) goto error_out; @@ -455,8 +456,8 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req) nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { nbd_cmd(req) = NBD_CMD_WRITE; - if (lo->flags & NBD_READ_ONLY) { - dev_err(disk_to_dev(lo->disk), + if (nbd->flags & NBD_READ_ONLY) { + dev_err(disk_to_dev(nbd->disk), "Write on read-only\n"); goto error_out; } @@ -464,29 +465,29 @@ static void nbd_handle_req(struct nbd_device *lo, struct request *req) req->errors = 0; - mutex_lock(&lo->tx_lock); - if (unlikely(!lo->sock)) { - mutex_unlock(&lo->tx_lock); - dev_err(disk_to_dev(lo->disk), + mutex_lock(&nbd->tx_lock); + if (unlikely(!nbd->sock)) { + mutex_unlock(&nbd->tx_lock); + dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); goto error_out; } - lo->active_req = req; + nbd->active_req = req; - if (nbd_send_req(lo, req) != 0) { - dev_err(disk_to_dev(lo->disk), "Request send failed\n"); + if (nbd_send_req(nbd, req) != 0) { + dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; nbd_end_request(req); } else { - spin_lock(&lo->queue_lock); - list_add(&req->queuelist, &lo->queue_head); - spin_unlock(&lo->queue_lock); + spin_lock(&nbd->queue_lock); + list_add(&req->queuelist, &nbd->queue_head); + spin_unlock(&nbd->queue_lock); } - lo->active_req = NULL; - mutex_unlock(&lo->tx_lock); - wake_up_all(&lo->active_wq); + nbd->active_req = NULL; + mutex_unlock(&nbd->tx_lock); + wake_up_all(&nbd->active_wq); return; @@ -497,28 +498,28 @@ error_out: static int nbd_thread(void *data) { - struct nbd_device *lo = data; + struct nbd_device *nbd = data; struct request *req; set_user_nice(current, -20); - while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { + while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { /* wait for something to do */ - wait_event_interruptible(lo->waiting_wq, + wait_event_interruptible(nbd->waiting_wq, kthread_should_stop() || - !list_empty(&lo->waiting_queue)); + !list_empty(&nbd->waiting_queue)); /* extract request */ - if (list_empty(&lo->waiting_queue)) + if (list_empty(&nbd->waiting_queue)) continue; - spin_lock_irq(&lo->queue_lock); - req = list_entry(lo->waiting_queue.next, struct request, + spin_lock_irq(&nbd->queue_lock); + req = list_entry(nbd->waiting_queue.next, struct request, queuelist); list_del_init(&req->queuelist); - spin_unlock_irq(&lo->queue_lock); + spin_unlock_irq(&nbd->queue_lock); /* handle request */ - nbd_handle_req(lo, req); + nbd_handle_req(nbd, req); } return 0; } @@ -526,7 +527,7 @@ static int nbd_thread(void *data) /* * We always wait for result of write, for now. It would be nice to make it optional * in future - * if ((rq_data_dir(req) == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) + * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ @@ -535,19 +536,19 @@ static void do_nbd_request(struct request_queue *q) struct request *req; while ((req = blk_fetch_request(q)) != NULL) { - struct nbd_device *lo; + struct nbd_device *nbd; spin_unlock_irq(q->queue_lock); dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", req->rq_disk->disk_name, req, req->cmd_type); - lo = req->rq_disk->private_data; + nbd = req->rq_disk->private_data; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); - if (unlikely(!lo->sock)) { - dev_err(disk_to_dev(lo->disk), + if (unlikely(!nbd->sock)) { + dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); req->errors++; nbd_end_request(req); @@ -555,11 +556,11 @@ static void do_nbd_request(struct request_queue *q) continue; } - spin_lock_irq(&lo->queue_lock); - list_add_tail(&req->queuelist, &lo->waiting_queue); - spin_unlock_irq(&lo->queue_lock); + spin_lock_irq(&nbd->queue_lock); + list_add_tail(&req->queuelist, &nbd->waiting_queue); + spin_unlock_irq(&nbd->queue_lock); - wake_up(&lo->waiting_wq); + wake_up(&nbd->waiting_wq); spin_lock_irq(q->queue_lock); } @@ -567,32 +568,32 @@ static void do_nbd_request(struct request_queue *q) /* Must be called with tx_lock held */ -static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, +static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, unsigned int cmd, unsigned long arg) { switch (cmd) { case NBD_DISCONNECT: { struct request sreq; - dev_info(disk_to_dev(lo->disk), "NBD_DISCONNECT\n"); + dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; - if (!lo->sock) + if (!nbd->sock) return -EINVAL; - nbd_send_req(lo, &sreq); + nbd_send_req(nbd, &sreq); return 0; } case NBD_CLEAR_SOCK: { struct file *file; - lo->sock = NULL; - file = lo->file; - lo->file = NULL; - nbd_clear_que(lo); - BUG_ON(!list_empty(&lo->queue_head)); + nbd->sock = NULL; + file = nbd->file; + nbd->file = NULL; + nbd_clear_que(nbd); + BUG_ON(!list_empty(&nbd->queue_head)); if (file) fput(file); return 0; @@ -600,14 +601,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, case NBD_SET_SOCK: { struct file *file; - if (lo->file) + if (nbd->file) return -EBUSY; file = fget(arg); if (file) { struct inode *inode = file->f_path.dentry->d_inode; if (S_ISSOCK(inode->i_mode)) { - lo->file = file; - lo->sock = SOCKET_I(inode); + nbd->file = file; + nbd->sock = SOCKET_I(inode); if (max_part > 0) bdev->bd_invalidated = 1; return 0; @@ -619,29 +620,29 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, } case NBD_SET_BLKSIZE: - lo->blksize = arg; - lo->bytesize &= ~(lo->blksize-1); - bdev->bd_inode->i_size = lo->bytesize; - set_blocksize(bdev, lo->blksize); - set_capacity(lo->disk, lo->bytesize >> 9); + nbd->blksize = arg; + nbd->bytesize &= ~(nbd->blksize-1); + bdev->bd_inode->i_size = nbd->bytesize; + set_blocksize(bdev, nbd->blksize); + set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_SIZE: - lo->bytesize = arg & ~(lo->blksize-1); - bdev->bd_inode->i_size = lo->bytesize; - set_blocksize(bdev, lo->blksize); - set_capacity(lo->disk, lo->bytesize >> 9); + nbd->bytesize = arg & ~(nbd->blksize-1); + bdev->bd_inode->i_size = nbd->bytesize; + set_blocksize(bdev, nbd->blksize); + set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_TIMEOUT: - lo->xmit_timeout = arg * HZ; + nbd->xmit_timeout = arg * HZ; return 0; case NBD_SET_SIZE_BLOCKS: - lo->bytesize = ((u64) arg) * lo->blksize; - bdev->bd_inode->i_size = lo->bytesize; - set_blocksize(bdev, lo->blksize); - set_capacity(lo->disk, lo->bytesize >> 9); + nbd->bytesize = ((u64) arg) * nbd->blksize; + bdev->bd_inode->i_size = nbd->bytesize; + set_blocksize(bdev, nbd->blksize); + set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_DO_IT: { @@ -649,38 +650,38 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, struct file *file; int error; - if (lo->pid) + if (nbd->pid) return -EBUSY; - if (!lo->file) + if (!nbd->file) return -EINVAL; - mutex_unlock(&lo->tx_lock); + mutex_unlock(&nbd->tx_lock); - thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); + thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); if (IS_ERR(thread)) { - mutex_lock(&lo->tx_lock); + mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); } wake_up_process(thread); - error = nbd_do_it(lo); + error = nbd_do_it(nbd); kthread_stop(thread); - mutex_lock(&lo->tx_lock); + mutex_lock(&nbd->tx_lock); if (error) return error; - sock_shutdown(lo, 0); - file = lo->file; - lo->file = NULL; - nbd_clear_que(lo); - dev_warn(disk_to_dev(lo->disk), "queue cleared\n"); + sock_shutdown(nbd, 0); + file = nbd->file; + nbd->file = NULL; + nbd_clear_que(nbd); + dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); if (file) fput(file); - lo->bytesize = 0; + nbd->bytesize = 0; bdev->bd_inode->i_size = 0; - set_capacity(lo->disk, 0); + set_capacity(nbd->disk, 0); if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); - return lo->harderror; + return nbd->harderror; } case NBD_CLEAR_QUE: @@ -688,14 +689,14 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, * This is for compatibility only. The queue is always cleared * by NBD_DO_IT or NBD_CLEAR_SOCK. */ - BUG_ON(!lo->sock && !list_empty(&lo->queue_head)); + BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); return 0; case NBD_PRINT_DEBUG: - dev_info(disk_to_dev(lo->disk), + dev_info(disk_to_dev(nbd->disk), "next = %p, prev = %p, head = %p\n", - lo->queue_head.next, lo->queue_head.prev, - &lo->queue_head); + nbd->queue_head.next, nbd->queue_head.prev, + &nbd->queue_head); return 0; } return -ENOTTY; @@ -704,21 +705,21 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, static int nbd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct nbd_device *lo = bdev->bd_disk->private_data; + struct nbd_device *nbd = bdev->bd_disk->private_data; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; - BUG_ON(lo->magic != LO_MAGIC); + BUG_ON(nbd->magic != NBD_MAGIC); /* Anyone capable of this syscall can do *real bad* things */ dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", - lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); + nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); - mutex_lock(&lo->tx_lock); - error = __nbd_ioctl(bdev, lo, cmd, arg); - mutex_unlock(&lo->tx_lock); + mutex_lock(&nbd->tx_lock); + error = __nbd_ioctl(bdev, nbd, cmd, arg); + mutex_unlock(&nbd->tx_lock); return error; } @@ -804,7 +805,7 @@ static int __init nbd_init(void) for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].file = NULL; - nbd_dev[i].magic = LO_MAGIC; + nbd_dev[i].magic = NBD_MAGIC; nbd_dev[i].flags = 0; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index cf82fedae09..e53fc24c6af 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c @@ -118,8 +118,8 @@ enum kcs_states { #define MAX_KCS_WRITE_SIZE IPMI_MAX_MSG_LENGTH /* Timeouts in microseconds. */ -#define IBF_RETRY_TIMEOUT 1000000 -#define OBF_RETRY_TIMEOUT 1000000 +#define IBF_RETRY_TIMEOUT 5000000 +#define OBF_RETRY_TIMEOUT 5000000 #define MAX_ERROR_RETRIES 10 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index c90e9390b78..2c29942b132 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -45,6 +45,7 @@ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> +#include <linux/interrupt.h> #define PFX "IPMI message handler: " @@ -52,6 +53,8 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); +static void smi_recv_tasklet(unsigned long); +static void handle_new_recv_msgs(ipmi_smi_t intf); static int initialized; @@ -354,12 +357,15 @@ struct ipmi_smi { int curr_seq; /* - * Messages that were delayed for some reason (out of memory, - * for instance), will go in here to be processed later in a - * periodic timer interrupt. + * Messages queued for delivery. If delivery fails (out of memory + * for instance), They will stay in here to be processed later in a + * periodic timer interrupt. The tasklet is for handling received + * messages directly from the handler. */ spinlock_t waiting_msgs_lock; struct list_head waiting_msgs; + atomic_t watchdog_pretimeouts_to_deliver; + struct tasklet_struct recv_tasklet; /* * The list of command receivers that are registered for commands @@ -492,6 +498,8 @@ static void clean_up_interface_data(ipmi_smi_t intf) struct cmd_rcvr *rcvr, *rcvr2; struct list_head list; + tasklet_kill(&intf->recv_tasklet); + free_smi_msg_list(&intf->waiting_msgs); free_recv_msg_list(&intf->waiting_events); @@ -2785,12 +2793,17 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) return; } -void ipmi_poll_interface(ipmi_user_t user) +static void ipmi_poll(ipmi_smi_t intf) { - ipmi_smi_t intf = user->intf; - if (intf->handlers->poll) intf->handlers->poll(intf->send_info); + /* In case something came in */ + handle_new_recv_msgs(intf); +} + +void ipmi_poll_interface(ipmi_user_t user) +{ + ipmi_poll(user->intf); } EXPORT_SYMBOL(ipmi_poll_interface); @@ -2859,6 +2872,10 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, #endif spin_lock_init(&intf->waiting_msgs_lock); INIT_LIST_HEAD(&intf->waiting_msgs); + tasklet_init(&intf->recv_tasklet, + smi_recv_tasklet, + (unsigned long) intf); + atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->events_lock); INIT_LIST_HEAD(&intf->waiting_events); intf->waiting_events_count = 0; @@ -3621,11 +3638,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf, } /* - * Handle a new message. Return 1 if the message should be requeued, + * Handle a received message. Return 1 if the message should be requeued, * 0 if the message should be freed, or -1 if the message should not * be freed or requeued. */ -static int handle_new_recv_msg(ipmi_smi_t intf, +static int handle_one_recv_msg(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { int requeue; @@ -3783,12 +3800,72 @@ static int handle_new_recv_msg(ipmi_smi_t intf, return requeue; } +/* + * If there are messages in the queue or pretimeouts, handle them. + */ +static void handle_new_recv_msgs(ipmi_smi_t intf) +{ + struct ipmi_smi_msg *smi_msg; + unsigned long flags = 0; + int rv; + int run_to_completion = intf->run_to_completion; + + /* See if any waiting messages need to be processed. */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_msgs_lock, flags); + while (!list_empty(&intf->waiting_msgs)) { + smi_msg = list_entry(intf->waiting_msgs.next, + struct ipmi_smi_msg, link); + list_del(&smi_msg->link); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + rv = handle_one_recv_msg(intf, smi_msg); + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_msgs_lock, flags); + if (rv == 0) { + /* Message handled */ + ipmi_free_smi_msg(smi_msg); + } else if (rv < 0) { + /* Fatal error on the message, del but don't free. */ + } else { + /* + * To preserve message order, quit if we + * can't handle a message. + */ + list_add(&smi_msg->link, &intf->waiting_msgs); + break; + } + } + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + + /* + * If the pretimout count is non-zero, decrement one from it and + * deliver pretimeouts to all the users. + */ + if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) { + ipmi_user_t user; + + rcu_read_lock(); + list_for_each_entry_rcu(user, &intf->users, link) { + if (user->handler->ipmi_watchdog_pretimeout) + user->handler->ipmi_watchdog_pretimeout( + user->handler_data); + } + rcu_read_unlock(); + } +} + +static void smi_recv_tasklet(unsigned long val) +{ + handle_new_recv_msgs((ipmi_smi_t) val); +} + /* Handle a new message from the lower layer. */ void ipmi_smi_msg_received(ipmi_smi_t intf, struct ipmi_smi_msg *msg) { unsigned long flags = 0; /* keep us warning-free. */ - int rv; int run_to_completion; @@ -3842,31 +3919,11 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, run_to_completion = intf->run_to_completion; if (!run_to_completion) spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - if (!list_empty(&intf->waiting_msgs)) { - list_add_tail(&msg->link, &intf->waiting_msgs); - if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); - goto out; - } + list_add_tail(&msg->link, &intf->waiting_msgs); if (!run_to_completion) spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); - rv = handle_new_recv_msg(intf, msg); - if (rv > 0) { - /* - * Could not handle the message now, just add it to a - * list to handle later. - */ - run_to_completion = intf->run_to_completion; - if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - list_add_tail(&msg->link, &intf->waiting_msgs); - if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); - } else if (rv == 0) { - ipmi_free_smi_msg(msg); - } - + tasklet_schedule(&intf->recv_tasklet); out: return; } @@ -3874,16 +3931,8 @@ EXPORT_SYMBOL(ipmi_smi_msg_received); void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) { - ipmi_user_t user; - - rcu_read_lock(); - list_for_each_entry_rcu(user, &intf->users, link) { - if (!user->handler->ipmi_watchdog_pretimeout) - continue; - - user->handler->ipmi_watchdog_pretimeout(user->handler_data); - } - rcu_read_unlock(); + atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); + tasklet_schedule(&intf->recv_tasklet); } EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); @@ -3997,28 +4046,12 @@ static void ipmi_timeout_handler(long timeout_period) ipmi_smi_t intf; struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; - struct ipmi_smi_msg *smi_msg, *smi_msg2; unsigned long flags; int i; rcu_read_lock(); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - /* See if any waiting messages need to be processed. */ - spin_lock_irqsave(&intf->waiting_msgs_lock, flags); - list_for_each_entry_safe(smi_msg, smi_msg2, - &intf->waiting_msgs, link) { - if (!handle_new_recv_msg(intf, smi_msg)) { - list_del(&smi_msg->link); - ipmi_free_smi_msg(smi_msg); - } else { - /* - * To preserve message order, quit if we - * can't handle a message. - */ - break; - } - } - spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); + tasklet_schedule(&intf->recv_tasklet); /* * Go through the seq table and find any messages that @@ -4172,12 +4205,48 @@ EXPORT_SYMBOL(ipmi_free_recv_msg); #ifdef CONFIG_IPMI_PANIC_EVENT +static atomic_t panic_done_count = ATOMIC_INIT(0); + static void dummy_smi_done_handler(struct ipmi_smi_msg *msg) { + atomic_dec(&panic_done_count); } static void dummy_recv_done_handler(struct ipmi_recv_msg *msg) { + atomic_dec(&panic_done_count); +} + +/* + * Inside a panic, send a message and wait for a response. + */ +static void ipmi_panic_request_and_wait(ipmi_smi_t intf, + struct ipmi_addr *addr, + struct kernel_ipmi_msg *msg) +{ + struct ipmi_smi_msg smi_msg; + struct ipmi_recv_msg recv_msg; + int rv; + + smi_msg.done = dummy_smi_done_handler; + recv_msg.done = dummy_recv_done_handler; + atomic_add(2, &panic_done_count); + rv = i_ipmi_request(NULL, + intf, + addr, + 0, + msg, + intf, + &smi_msg, + &recv_msg, + 0, + intf->channels[0].address, + intf->channels[0].lun, + 0, 1); /* Don't retry, and don't wait. */ + if (rv) + atomic_sub(2, &panic_done_count); + while (atomic_read(&panic_done_count) != 0) + ipmi_poll(intf); } #ifdef CONFIG_IPMI_PANIC_STRING @@ -4216,8 +4285,6 @@ static void send_panic_events(char *str) unsigned char data[16]; struct ipmi_system_interface_addr *si; struct ipmi_addr addr; - struct ipmi_smi_msg smi_msg; - struct ipmi_recv_msg recv_msg; si = (struct ipmi_system_interface_addr *) &addr; si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; @@ -4245,9 +4312,6 @@ static void send_panic_events(char *str) data[7] = str[2]; } - smi_msg.done = dummy_smi_done_handler; - recv_msg.done = dummy_recv_done_handler; - /* For every registered interface, send the event. */ list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { if (!intf->handlers) @@ -4257,18 +4321,7 @@ static void send_panic_events(char *str) intf->run_to_completion = 1; /* Send the event announcing the panic. */ intf->handlers->set_run_to_completion(intf->send_info, 1); - i_ipmi_request(NULL, - intf, - &addr, - 0, - &msg, - intf, - &smi_msg, - &recv_msg, - 0, - intf->channels[0].address, - intf->channels[0].lun, - 0, 1); /* Don't retry, and don't wait. */ + ipmi_panic_request_and_wait(intf, &addr, &msg); } #ifdef CONFIG_IPMI_PANIC_STRING @@ -4316,18 +4369,7 @@ static void send_panic_events(char *str) msg.data = NULL; msg.data_len = 0; intf->null_user_handler = device_id_fetcher; - i_ipmi_request(NULL, - intf, - &addr, - 0, - &msg, - intf, - &smi_msg, - &recv_msg, - 0, - intf->channels[0].address, - intf->channels[0].lun, - 0, 1); /* Don't retry, and don't wait. */ + ipmi_panic_request_and_wait(intf, &addr, &msg); if (intf->local_event_generator) { /* Request the event receiver from the local MC. */ @@ -4336,18 +4378,7 @@ static void send_panic_events(char *str) msg.data = NULL; msg.data_len = 0; intf->null_user_handler = event_receiver_fetcher; - i_ipmi_request(NULL, - intf, - &addr, - 0, - &msg, - intf, - &smi_msg, - &recv_msg, - 0, - intf->channels[0].address, - intf->channels[0].lun, - 0, 1); /* no retry, and no wait. */ + ipmi_panic_request_and_wait(intf, &addr, &msg); } intf->null_user_handler = NULL; @@ -4404,18 +4435,7 @@ static void send_panic_events(char *str) strncpy(data+5, p, 11); p += size; - i_ipmi_request(NULL, - intf, - &addr, - 0, - &msg, - intf, - &smi_msg, - &recv_msg, - 0, - intf->channels[0].address, - intf->channels[0].lun, - 0, 1); /* no retry, and no wait. */ + ipmi_panic_request_and_wait(intf, &addr, &msg); } } #endif /* CONFIG_IPMI_PANIC_STRING */ diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index f9fdc114b31..1e638fff40e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -170,7 +170,6 @@ struct smi_info { struct si_sm_handlers *handlers; enum si_type si_type; spinlock_t si_lock; - spinlock_t msg_lock; struct list_head xmit_msgs; struct list_head hp_xmit_msgs; struct ipmi_smi_msg *curr_msg; @@ -319,16 +318,8 @@ static int register_xaction_notifier(struct notifier_block *nb) static void deliver_recv_msg(struct smi_info *smi_info, struct ipmi_smi_msg *msg) { - /* Deliver the message to the upper layer with the lock - released. */ - - if (smi_info->run_to_completion) { - ipmi_smi_msg_received(smi_info->intf, msg); - } else { - spin_unlock(&(smi_info->si_lock)); - ipmi_smi_msg_received(smi_info->intf, msg); - spin_lock(&(smi_info->si_lock)); - } + /* Deliver the message to the upper layer. */ + ipmi_smi_msg_received(smi_info->intf, msg); } static void return_hosed_msg(struct smi_info *smi_info, int cCode) @@ -357,13 +348,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) struct timeval t; #endif - /* - * No need to save flags, we aleady have interrupts off and we - * already hold the SMI lock. - */ - if (!smi_info->run_to_completion) - spin_lock(&(smi_info->msg_lock)); - /* Pick the high priority queue first. */ if (!list_empty(&(smi_info->hp_xmit_msgs))) { entry = smi_info->hp_xmit_msgs.next; @@ -401,9 +385,6 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) rv = SI_SM_CALL_WITHOUT_DELAY; } out: - if (!smi_info->run_to_completion) - spin_unlock(&(smi_info->msg_lock)); - return rv; } @@ -480,9 +461,7 @@ static void handle_flags(struct smi_info *smi_info) start_clear_flags(smi_info); smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; - spin_unlock(&(smi_info->si_lock)); ipmi_smi_watchdog_pretimeout(smi_info->intf); - spin_lock(&(smi_info->si_lock)); } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { /* Messages available. */ smi_info->curr_msg = ipmi_alloc_smi_msg(); @@ -888,19 +867,6 @@ static void sender(void *send_info, printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); #endif - /* - * last_timeout_jiffies is updated here to avoid - * smi_timeout() handler passing very large time_diff - * value to smi_event_handler() that causes - * the send command to abort. - */ - smi_info->last_timeout_jiffies = jiffies; - - mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); - - if (smi_info->thread) - wake_up_process(smi_info->thread); - if (smi_info->run_to_completion) { /* * If we are running to completion, then throw it in @@ -923,16 +889,29 @@ static void sender(void *send_info, return; } - spin_lock_irqsave(&smi_info->msg_lock, flags); + spin_lock_irqsave(&smi_info->si_lock, flags); if (priority > 0) list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); else list_add_tail(&msg->link, &smi_info->xmit_msgs); - spin_unlock_irqrestore(&smi_info->msg_lock, flags); - spin_lock_irqsave(&smi_info->si_lock, flags); - if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) + if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { + /* + * last_timeout_jiffies is updated here to avoid + * smi_timeout() handler passing very large time_diff + * value to smi_event_handler() that causes + * the send command to abort. + */ + smi_info->last_timeout_jiffies = jiffies; + + mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); + + if (smi_info->thread) + wake_up_process(smi_info->thread); + start_next_msg(smi_info); + smi_event_handler(smi_info, 0); + } spin_unlock_irqrestore(&smi_info->si_lock, flags); } @@ -1033,16 +1012,19 @@ static int ipmi_thread(void *data) static void poll(void *send_info) { struct smi_info *smi_info = send_info; - unsigned long flags; + unsigned long flags = 0; + int run_to_completion = smi_info->run_to_completion; /* * Make sure there is some delay in the poll loop so we can * drive time forward and timeout things. */ udelay(10); - spin_lock_irqsave(&smi_info->si_lock, flags); + if (!run_to_completion) + spin_lock_irqsave(&smi_info->si_lock, flags); smi_event_handler(smi_info, 10); - spin_unlock_irqrestore(&smi_info->si_lock, flags); + if (!run_to_completion) + spin_unlock_irqrestore(&smi_info->si_lock, flags); } static void request_events(void *send_info) @@ -1679,10 +1661,8 @@ static struct smi_info *smi_info_alloc(void) { struct smi_info *info = kzalloc(sizeof(*info), GFP_KERNEL); - if (info) { + if (info) spin_lock_init(&info->si_lock); - spin_lock_init(&info->msg_lock); - } return info; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 020a6aec2d8..7ed356e5203 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -520,6 +520,7 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; + atomic_add(2, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, @@ -528,8 +529,8 @@ static void panic_halt_ipmi_heartbeat(void) &panic_halt_heartbeat_smi_msg, &panic_halt_heartbeat_recv_msg, 1); - if (!rv) - atomic_add(2, &panic_done_count); + if (rv) + atomic_sub(2, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { @@ -553,16 +554,18 @@ static void panic_halt_ipmi_set_timeout(void) /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); + atomic_add(2, &panic_done_count); rv = i_ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); - if (!rv) { - atomic_add(2, &panic_done_count); - if (send_heartbeat_now) - panic_halt_ipmi_heartbeat(); - } else + if (rv) { + atomic_sub(2, &panic_done_count); printk(KERN_WARNING PFX "Unable to extend the watchdog timeout."); + } else { + if (send_heartbeat_now) + panic_halt_ipmi_heartbeat(); + } while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); } @@ -1164,7 +1167,7 @@ static int wdog_reboot_handler(struct notifier_block *this, if (code == SYS_POWER_OFF || code == SYS_HALT) { /* Disable the WDT if we are shutting down. */ ipmi_watchdog_state = WDOG_TIMEOUT_NONE; - panic_halt_ipmi_set_timeout(); + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { /* Set a long timer to let the reboot happens, but reboot if it hangs, but only if the watchdog @@ -1172,7 +1175,7 @@ static int wdog_reboot_handler(struct notifier_block *this, timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; - panic_halt_ipmi_set_timeout(); + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } } return NOTIFY_OK; diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c index a2161f631a8..2231aec2391 100644 --- a/drivers/video/backlight/tosa_lcd.c +++ b/drivers/video/backlight/tosa_lcd.c @@ -271,7 +271,7 @@ static int tosa_lcd_resume(struct spi_device *spi) } #else #define tosa_lcd_suspend NULL -#define tosa_lcd_reume NULL +#define tosa_lcd_resume NULL #endif static struct spi_driver tosa_lcd_driver = { |