diff options
Diffstat (limited to 'drivers/message/i2o/i2o_config.c')
| -rw-r--r-- | drivers/message/i2o/i2o_config.c | 412 |
1 files changed, 212 insertions, 200 deletions
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c index 3c3a7abebb1..04bd3b6de40 100644 --- a/drivers/message/i2o/i2o_config.c +++ b/drivers/message/i2o/i2o_config.c @@ -10,16 +10,16 @@ * Added basic ioctl() support * Deepak Saxena (06/07/1999): * Added software download ioctl (still testing) - * Auvo Häkkinen (09/10/1999): + * Auvo Häkkinen (09/10/1999): * Changes to i2o_cfg_reply(), ioctl_parms() * Added ioct_validate() - * Taneli Vähäkangas (09/30/1999): + * Taneli Vähäkangas (09/30/1999): * Fixed ioctl_swdl() - * Taneli Vähäkangas (10/04/1999): + * Taneli Vähäkangas (10/04/1999): * Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel() * Deepak Saxena (11/18/1999): * Added event managmenet support - * Alan Cox <alan@redhat.com>: + * Alan Cox <alan@lxorguk.ukuu.org.uk>: * 2.4 rewrite ported to 2.5 * Markus Lidel <Markus.Lidel@shadowconnect.com>: * Added pass-thru support for Adaptec's raidutils @@ -31,8 +31,9 @@ */ #include <linux/miscdevice.h> -#include <linux/smp_lock.h> +#include <linux/mutex.h> #include <linux/compat.h> +#include <linux/slab.h> #include <asm/uaccess.h> @@ -40,8 +41,8 @@ #define SG_TABLESIZE 30 -static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, - unsigned long arg); +static DEFINE_MUTEX(i2o_cfg_mutex); +static long i2o_cfg_ioctl(struct file *, unsigned int, unsigned long); static spinlock_t i2o_config_lock; @@ -111,11 +112,11 @@ static int i2o_cfg_gethrt(unsigned long arg) len = 8 + ((hrt->entry_len * hrt->num_entries) << 2); - /* We did a get user...so assuming mem is ok...is this bad? */ - put_user(len, kcmd.reslen); - if (len > reslen) + if (put_user(len, kcmd.reslen)) + ret = -EFAULT; + else if (len > reslen) ret = -ENOBUFS; - if (copy_to_user(kcmd.resbuf, (void *)hrt, len)) + else if (copy_to_user(kcmd.resbuf, (void *)hrt, len)) ret = -EFAULT; return ret; @@ -147,8 +148,9 @@ static int i2o_cfg_getlct(unsigned long arg) lct = (i2o_lct *) c->lct; len = (unsigned int)lct->table_size << 2; - put_user(len, kcmd.reslen); - if (len > reslen) + if (put_user(len, kcmd.reslen)) + ret = -EFAULT; + else if (len > reslen) ret = -ENOBUFS; else if (copy_to_user(kcmd.resbuf, lct, len)) ret = -EFAULT; @@ -186,20 +188,22 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type) if (!dev) return -ENXIO; - ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL); - if (!ops) - return -ENOMEM; + /* + * Stop users being able to try and allocate arbitrary amounts + * of DMA space. 64K is way more than sufficient for this. + */ + if (kcmd.oplen > 65536) + return -EMSGSIZE; - if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) { - kfree(ops); - return -EFAULT; - } + ops = memdup_user(kcmd.opbuf, kcmd.oplen); + if (IS_ERR(ops)) + return PTR_ERR(ops); /* * It's possible to have a _very_ large table * and that the user asks for all of it at once... */ - res = (u8 *) kmalloc(65536, GFP_KERNEL); + res = kmalloc(65536, GFP_KERNEL); if (!res) { kfree(ops); return -ENOMEM; @@ -213,8 +217,9 @@ static int i2o_cfg_parms(unsigned long arg, unsigned int type) return -EAGAIN; } - put_user(len, kcmd.reslen); - if (len > reslen) + if (put_user(len, kcmd.reslen)) + ret = -EFAULT; + else if (len > reslen) ret = -ENOBUFS; else if (copy_to_user(kcmd.resbuf, res, len)) ret = -EFAULT; @@ -230,8 +235,7 @@ static int i2o_cfg_swdl(unsigned long arg) struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; @@ -257,31 +261,38 @@ static int i2o_cfg_swdl(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { - i2o_msg_nop(c, m); + if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { + i2o_msg_nop(c, msg); return -ENOMEM; } - __copy_from_user(buffer.virt, kxfer.buf, fragsize); + if (__copy_from_user(buffer.virt, kxfer.buf, fragsize)) { + i2o_msg_nop(c, msg); + i2o_dma_free(&c->pdev->dev, &buffer); + return -EFAULT; + } - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); - writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) | - (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); - writel(0xD0000000 | fragsize, &msg->body[3]); - writel(buffer.phys, &msg->body[4]); + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | + ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer. + sw_type) << 16) | + (((u32) maxfrag) << 8) | (((u32) curfrag))); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); + msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); + msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != -ETIMEDOUT) i2o_dma_free(&c->pdev->dev, &buffer); @@ -302,58 +313,57 @@ static int i2o_cfg_swul(unsigned long arg) struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; unsigned char maxfrag = 0, curfrag = 1; struct i2o_dma buffer; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int status = 0, swlen = 0, fragsize = 8192; struct i2o_controller *c; int ret = 0; if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer))) - goto return_fault; + return -EFAULT; if (get_user(swlen, kxfer.swlen) < 0) - goto return_fault; + return -EFAULT; if (get_user(maxfrag, kxfer.maxfrag) < 0) - goto return_fault; + return -EFAULT; if (get_user(curfrag, kxfer.curfrag) < 0) - goto return_fault; + return -EFAULT; if (curfrag == maxfrag) fragsize = swlen - (maxfrag - 1) * 8192; if (!kxfer.buf) - goto return_fault; + return -EFAULT; c = i2o_find_iop(kxfer.iop); if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) { - i2o_msg_nop(c, m); + if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize)) { + i2o_msg_nop(c, msg); return -ENOMEM; } - writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]); - writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((u32) kxfer.flags << 24 | (u32) kxfer. - sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag, - &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); - writel(0xD0000000 | fragsize, &msg->body[3]); - writel(buffer.phys, &msg->body[4]); + msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer. + sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); + msg->body[3] = cpu_to_le32(0xD0000000 | fragsize); + msg->body[4] = cpu_to_le32(buffer.phys); osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize); - status = i2o_msg_post_wait_mem(c, m, 60, &buffer); + status = i2o_msg_post_wait_mem(c, msg, 60, &buffer); if (status != I2O_POST_WAIT_OK) { if (status != -ETIMEDOUT) @@ -368,20 +378,15 @@ static int i2o_cfg_swul(unsigned long arg) i2o_dma_free(&c->pdev->dev, &buffer); - return_ret: return ret; - return_fault: - ret = -EFAULT; - goto return_ret; -}; +} static int i2o_cfg_swdel(unsigned long arg) { struct i2o_controller *c; struct i2o_sw_xfer kxfer; struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int swlen; int token; @@ -395,21 +400,21 @@ static int i2o_cfg_swdel(unsigned long arg) if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); - writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16, - &msg->body[0]); - writel(swlen, &msg->body[1]); - writel(kxfer.sw_id, &msg->body[2]); + msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); + msg->body[0] = + cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16); + msg->body[1] = cpu_to_le32(swlen); + msg->body[2] = cpu_to_le32(kxfer.sw_id); - token = i2o_msg_post_wait(c, m, 10); + token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("swdel failed, DetailedStatus = %d\n", token); @@ -423,25 +428,24 @@ static int i2o_cfg_validate(unsigned long arg) { int token; int iop = (int)arg; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; struct i2o_controller *c; c = i2o_find_iop(iop); if (!c) return -ENXIO; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(0, &msg->u.head[3]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(0); - token = i2o_msg_post_wait(c, m, 10); + token = i2o_msg_post_wait(c, msg, 10); if (token != I2O_POST_WAIT_OK) { osm_info("Can't validate configuration, ErrorStatus = %d\n", @@ -454,8 +458,7 @@ static int i2o_cfg_validate(unsigned long arg) static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) { - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg; struct i2o_evt_id kdesc; struct i2o_controller *c; @@ -474,18 +477,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp) if (!d) return -ENODEV; - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - if (m == I2O_QUEUE_EMPTY) - return -EBUSY; + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); - writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); - writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid, - &msg->u.head[1]); - writel(i2o_config_driver.context, &msg->u.head[2]); - writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]); - writel(kdesc.evt_mask, &msg->body[0]); + msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); + msg->u.head[1] = + cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | + kdesc.tid); + msg->u.head[2] = cpu_to_le32(i2o_config_driver.context); + msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data)); + msg->body[0] = cpu_to_le32(kdesc.evt_mask); - i2o_msg_post(c, m); + i2o_msg_post(c, msg); return 0; } @@ -517,7 +521,6 @@ static int i2o_cfg_evt_get(unsigned long arg, struct file *fp) return 0; } -#ifdef CONFIG_I2O_EXT_ADAPTEC #ifdef CONFIG_COMPAT static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, unsigned long arg) @@ -537,7 +540,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, u32 sg_index = 0; i2o_status_block *sb; struct i2o_message *msg; - u32 m; unsigned int iop; cmd = (struct i2o_cmd_passthru32 __user *)arg; @@ -553,8 +555,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, return -ENXIO; } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - sb = c->status_block.virt; if (get_user(size, &user_msg[0])) { @@ -572,32 +572,34 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, size <<= 2; // Convert to bytes + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + rcode = -EFAULT; /* Copy in the user's I2O command */ if (copy_from_user(msg, user_msg, size)) { osm_warn("unable to copy user message\n"); - return -EFAULT; + goto out; } i2o_dump_message(msg); if (get_user(reply_size, &user_reply[0]) < 0) - return -EFAULT; + goto out; reply_size >>= 16; reply_size <<= 2; - reply = kmalloc(reply_size, GFP_KERNEL); + rcode = -ENOMEM; + reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); - return -ENOMEM; + goto out; } - memset(reply, 0, reply_size); sg_offset = (msg->u.head[0] >> 4) & 0x0f; - writel(i2o_config_driver.context, &msg->u.s.icntxt); - writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); - memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; @@ -631,17 +633,16 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, goto cleanup; } sg_size = sg[i].flag_count & 0xffffff; - p = &(sg_list[sg_index++]); + p = &(sg_list[sg_index]); /* Allocate memory for the transfer */ - if (i2o_dma_alloc - (&c->pdev->dev, p, sg_size, - PCI_DMA_BIDIRECTIONAL)) { + if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { printk(KERN_DEBUG "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name, sg_size, i, sg_count); rcode = -ENOMEM; goto sg_list_cleanup; } + sg_index++; /* Copy in the user's SG buffer if necessary */ if (sg[i]. flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { @@ -662,12 +663,15 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } - rcode = i2o_msg_post_wait(c, m, 60); - if (rcode) + rcode = i2o_msg_post_wait(c, msg, 60); + msg = NULL; + if (rcode) { + reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; + } if (sg_offset) { - u32 msg[I2O_OUTBOUND_MSG_FRAME_SIZE]; + u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; /* Copy back the Scatter Gather buffers back to user space */ u32 j; // TODO 64bit fix @@ -675,7 +679,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, int sg_size; // re-acquire the original message to handle correctly the sg copy operation - memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); + memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); // get user msg size in u32s if (get_user(size, &user_msg[0])) { rcode = -EFAULT; @@ -683,8 +687,13 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } size = size >> 16; size *= 4; + if (size > sizeof(rmsg)) { + rcode = -EINVAL; + goto sg_list_cleanup; + } + /* Copy in the user's I2O command */ - if (copy_from_user(msg, user_msg, size)) { + if (copy_from_user(rmsg, user_msg, size)) { rcode = -EFAULT; goto sg_list_cleanup; } @@ -692,7 +701,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, (size - sg_offset * 4) / sizeof(struct sg_simple_element); // TODO 64bit fix - sg = (struct sg_simple_element *)(msg + sg_offset); + sg = (struct sg_simple_element *)(rmsg + sg_offset); for (j = 0; j < sg_count; j++) { /* Copy out the SG list to user's buffer if necessary */ if (! @@ -714,6 +723,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, } } +sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones @@ -722,7 +732,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, "%s: Could not copy message context FROM user\n", c->name); rcode = -EFAULT; - goto sg_list_cleanup; } if (copy_to_user(user_reply, reply, reply_size)) { printk(KERN_WARNING @@ -730,13 +739,14 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd, rcode = -EFAULT; } } - - sg_list_cleanup: for (i = 0; i < sg_index; i++) i2o_dma_free(&c->pdev->dev, &sg_list[i]); - cleanup: +cleanup: kfree(reply); +out: + if (msg) + i2o_msg_nop(c, msg); return rcode; } @@ -744,24 +754,25 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) { int ret; - lock_kernel(); switch (cmd) { case I2OGETIOPS: - ret = i2o_cfg_ioctl(NULL, file, cmd, arg); + ret = i2o_cfg_ioctl(file, cmd, arg); break; case I2OPASSTHRU32: + mutex_lock(&i2o_cfg_mutex); ret = i2o_cfg_passthru32(file, cmd, arg); + mutex_unlock(&i2o_cfg_mutex); break; default: ret = -ENOIOCTLCMD; break; } - unlock_kernel(); return ret; } #endif +#ifdef CONFIG_I2O_EXT_ADAPTEC static int i2o_cfg_passthru(unsigned long arg) { struct i2o_cmd_passthru __user *cmd = @@ -773,15 +784,13 @@ static int i2o_cfg_passthru(unsigned long arg) u32 size = 0; u32 reply_size = 0; u32 rcode = 0; - void *sg_list[SG_TABLESIZE]; + struct i2o_dma sg_list[SG_TABLESIZE]; u32 sg_offset = 0; u32 sg_count = 0; int sg_index = 0; u32 i = 0; - void *p = NULL; i2o_status_block *sb; - struct i2o_message __iomem *msg; - u32 m; + struct i2o_message *msg; unsigned int iop; if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg)) @@ -793,8 +802,6 @@ static int i2o_cfg_passthru(unsigned long arg) return -ENXIO; } - m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); - sb = c->status_block.virt; if (get_user(size, &user_msg[0])) @@ -810,32 +817,35 @@ static int i2o_cfg_passthru(unsigned long arg) size <<= 2; // Convert to bytes + msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); + if (IS_ERR(msg)) + return PTR_ERR(msg); + + rcode = -EFAULT; /* Copy in the user's I2O command */ if (copy_from_user(msg, user_msg, size)) - return -EFAULT; + goto out; if (get_user(reply_size, &user_reply[0]) < 0) - return -EFAULT; + goto out; reply_size >>= 16; reply_size <<= 2; - reply = kmalloc(reply_size, GFP_KERNEL); + reply = kzalloc(reply_size, GFP_KERNEL); if (!reply) { printk(KERN_WARNING "%s: Could not allocate reply buffer\n", c->name); - return -ENOMEM; + rcode = -ENOMEM; + goto out; } - memset(reply, 0, reply_size); sg_offset = (msg->u.head[0] >> 4) & 0x0f; - writel(i2o_config_driver.context, &msg->u.s.icntxt); - writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt); - memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE); if (sg_offset) { struct sg_simple_element *sg; + struct i2o_dma *p; if (sg_offset * 4 >= size) { rcode = -EFAULT; @@ -865,22 +875,22 @@ static int i2o_cfg_passthru(unsigned long arg) goto sg_list_cleanup; } sg_size = sg[i].flag_count & 0xffffff; + p = &(sg_list[sg_index]); + if (i2o_dma_alloc(&c->pdev->dev, p, sg_size)) { /* Allocate memory for the transfer */ - p = kmalloc(sg_size, GFP_KERNEL); - if (!p) { printk(KERN_DEBUG "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n", c->name, sg_size, i, sg_count); rcode = -ENOMEM; goto sg_list_cleanup; } - sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame. + sg_index++; /* Copy in the user's SG buffer if necessary */ if (sg[i]. flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) { // TODO 64bit fix if (copy_from_user - (p, (void __user *)sg[i].addr_bus, + (p->virt, (void __user *)sg[i].addr_bus, sg_size)) { printk(KERN_DEBUG "%s: Could not copy SG buf %d FROM user\n", @@ -889,17 +899,19 @@ static int i2o_cfg_passthru(unsigned long arg) goto sg_list_cleanup; } } - //TODO 64bit fix - sg[i].addr_bus = virt_to_bus(p); + sg[i].addr_bus = p->phys; } } - rcode = i2o_msg_post_wait(c, m, 60); - if (rcode) + rcode = i2o_msg_post_wait(c, msg, 60); + msg = NULL; + if (rcode) { + reply[4] = ((u32) rcode) << 24; goto sg_list_cleanup; + } if (sg_offset) { - u32 msg[128]; + u32 rmsg[I2O_OUTBOUND_MSG_FRAME_SIZE]; /* Copy back the Scatter Gather buffers back to user space */ u32 j; // TODO 64bit fix @@ -907,7 +919,7 @@ static int i2o_cfg_passthru(unsigned long arg) int sg_size; // re-acquire the original message to handle correctly the sg copy operation - memset(&msg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); + memset(&rmsg, 0, I2O_OUTBOUND_MSG_FRAME_SIZE * 4); // get user msg size in u32s if (get_user(size, &user_msg[0])) { rcode = -EFAULT; @@ -915,8 +927,13 @@ static int i2o_cfg_passthru(unsigned long arg) } size = size >> 16; size *= 4; + if (size > sizeof(rmsg)) { + rcode = -EFAULT; + goto sg_list_cleanup; + } + /* Copy in the user's I2O command */ - if (copy_from_user(msg, user_msg, size)) { + if (copy_from_user(rmsg, user_msg, size)) { rcode = -EFAULT; goto sg_list_cleanup; } @@ -924,7 +941,7 @@ static int i2o_cfg_passthru(unsigned long arg) (size - sg_offset * 4) / sizeof(struct sg_simple_element); // TODO 64bit fix - sg = (struct sg_simple_element *)(msg + sg_offset); + sg = (struct sg_simple_element *)(rmsg + sg_offset); for (j = 0; j < sg_count; j++) { /* Copy out the SG list to user's buffer if necessary */ if (! @@ -933,11 +950,11 @@ static int i2o_cfg_passthru(unsigned long arg) sg_size = sg[j].flag_count & 0xffffff; // TODO 64bit fix if (copy_to_user - ((void __user *)sg[j].addr_bus, sg_list[j], + ((void __user *)sg[j].addr_bus, sg_list[j].virt, sg_size)) { printk(KERN_WARNING "%s: Could not copy %p TO user %x\n", - c->name, sg_list[j], + c->name, sg_list[j].virt, sg[j].addr_bus); rcode = -EFAULT; goto sg_list_cleanup; @@ -946,6 +963,7 @@ static int i2o_cfg_passthru(unsigned long arg) } } +sg_list_cleanup: /* Copy back the reply to user space */ if (reply_size) { // we wrote our own values for context - now restore the user supplied ones @@ -962,12 +980,14 @@ static int i2o_cfg_passthru(unsigned long arg) } } - sg_list_cleanup: for (i = 0; i < sg_index; i++) - kfree(sg_list[i]); + i2o_dma_free(&c->pdev->dev, &sg_list[i]); - cleanup: +cleanup: kfree(reply); +out: + if (msg) + i2o_msg_nop(c, msg); return rcode; } #endif @@ -975,11 +995,11 @@ static int i2o_cfg_passthru(unsigned long arg) /* * IOCTL Handler */ -static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, - unsigned long arg) +static long i2o_cfg_ioctl(struct file *fp, unsigned int cmd, unsigned long arg) { int ret; + mutex_lock(&i2o_cfg_mutex); switch (cmd) { case I2OGETIOPS: ret = i2o_cfg_getiops(arg); @@ -1035,20 +1055,20 @@ static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd, osm_debug("unknown ioctl called!\n"); ret = -EINVAL; } - + mutex_unlock(&i2o_cfg_mutex); return ret; } static int cfg_open(struct inode *inode, struct file *file) { - struct i2o_cfg_info *tmp = - (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info), + struct i2o_cfg_info *tmp = kmalloc(sizeof(struct i2o_cfg_info), GFP_KERNEL); unsigned long flags; if (!tmp) return -ENOMEM; + mutex_lock(&i2o_cfg_mutex); file->private_data = (void *)(i2o_cfg_info_id++); tmp->fp = file; tmp->fasync = NULL; @@ -1062,6 +1082,7 @@ static int cfg_open(struct inode *inode, struct file *file) spin_lock_irqsave(&i2o_config_lock, flags); open_files = tmp; spin_unlock_irqrestore(&i2o_config_lock, flags); + mutex_unlock(&i2o_cfg_mutex); return 0; } @@ -1070,53 +1091,44 @@ static int cfg_fasync(int fd, struct file *fp, int on) { ulong id = (ulong) fp->private_data; struct i2o_cfg_info *p; + int ret = -EBADF; + mutex_lock(&i2o_cfg_mutex); for (p = open_files; p; p = p->next) if (p->q_id == id) break; - if (!p) - return -EBADF; - - return fasync_helper(fd, fp, on, &p->fasync); + if (p) + ret = fasync_helper(fd, fp, on, &p->fasync); + mutex_unlock(&i2o_cfg_mutex); + return ret; } static int cfg_release(struct inode *inode, struct file *file) { ulong id = (ulong) file->private_data; - struct i2o_cfg_info *p1, *p2; + struct i2o_cfg_info *p, **q; unsigned long flags; - lock_kernel(); - p1 = p2 = NULL; - + mutex_lock(&i2o_cfg_mutex); spin_lock_irqsave(&i2o_config_lock, flags); - for (p1 = open_files; p1;) { - if (p1->q_id == id) { - - if (p1->fasync) - cfg_fasync(-1, file, 0); - if (p2) - p2->next = p1->next; - else - open_files = p1->next; - - kfree(p1); + for (q = &open_files; (p = *q) != NULL; q = &p->next) { + if (p->q_id == id) { + *q = p->next; + kfree(p); break; } - p2 = p1; - p1 = p1->next; } spin_unlock_irqrestore(&i2o_config_lock, flags); - unlock_kernel(); + mutex_unlock(&i2o_cfg_mutex); return 0; } -static struct file_operations config_fops = { +static const struct file_operations config_fops = { .owner = THIS_MODULE, .llseek = no_llseek, - .ioctl = i2o_cfg_ioctl, + .unlocked_ioctl = i2o_cfg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = i2o_cfg_compat_ioctl, #endif |
